diff --git a/.mailmap b/.mailmap index 34acd34bbf9bfc..b78aa092b4bb85 100644 --- a/.mailmap +++ b/.mailmap @@ -19,6 +19,7 @@ Abhinav Kumar Ahmad Masri Adam Oldham Adam Radford +Aditya Garg Adriana Reus Adrian Bunk Ajay Kaher @@ -207,6 +208,7 @@ Claudiu Beznea Colin Ian King Corey Minyard Damian Hobson-Garcia +Dan Carpenter Dan Carpenter Dan Williams Daniel Borkmann @@ -495,6 +497,7 @@ Leon Romanovsky Leon Romanovsky Leon Romanovsky Leo Yan +Liam R. Howlett Liam Mark Linas Vepstas Linus Lüssing @@ -505,6 +508,8 @@ Linus Walleij Linus Walleij Linus Walleij +Li Wang +Li Wang Li Yang Li Yang Lior David @@ -687,6 +692,7 @@ Punit Agrawal Puranjay Mohan Qais Yousef Qais Yousef +Qi Zheng Quentin Monnet Quentin Monnet Quentin Perret diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 8ad0b27813175d..6efd0095ed995b 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -220,7 +220,7 @@ cgroup v2 currently supports the following mount options. memory_hugetlb_accounting Count HugeTLB memory usage towards the cgroup's overall memory usage for the memory controller (for the purpose of - statistics reporting and memory protetion). This is a new + statistics reporting and memory protection). This is a new behavior that could regress existing setups, so it must be explicitly opted in with this mount option. diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml index 7c4d6170491db6..f5c584cf2146dd 100644 --- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml +++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml @@ -24,6 +24,7 @@ properties: compatible: items: - enum: + - qcom,eliza-ipcc - qcom,glymur-ipcc - qcom,kaanapali-ipcc - qcom,milos-ipcc diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt2701-audio.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt2701-audio.yaml index 45382c4d86aa35..30f331366566e1 100644 --- a/Documentation/devicetree/bindings/sound/mediatek,mt2701-audio.yaml +++ b/Documentation/devicetree/bindings/sound/mediatek,mt2701-audio.yaml @@ -32,6 +32,7 @@ properties: maxItems: 1 clocks: + minItems: 34 items: - description: audio infra sys clock - description: top audio mux 1 @@ -67,8 +68,13 @@ properties: - description: top audio a1 sys pd - description: top audio a2 sys pd - description: audio merge interface pd + - description: HADDS2 PLL 294 MHz (HDMI audio path root) + - description: HDMI audio interface pd + - description: S/PDIF interface pd + - description: audio APLL root pd clock-names: + minItems: 34 items: - const: infra_sys_audio_clk - const: top_audio_mux1_sel @@ -104,6 +110,10 @@ properties: - const: audio_a1sys_pd - const: audio_a2sys_pd - const: audio_mrgif_pd + - const: hadds2pll_294m + - const: audio_hdmi_pd + - const: audio_spdf_pd + - const: audio_apll_pd required: - compatible @@ -113,4 +123,17 @@ required: - clocks - clock-names +allOf: + - if: + properties: + compatible: + contains: + const: mediatek,mt7622-audio + then: + properties: + clocks: + maxItems: 34 + clock-names: + maxItems: 34 + additionalProperties: false diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt2701-hdmi-audio.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt2701-hdmi-audio.yaml new file mode 100644 index 00000000000000..9d5a8166e51ffc --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt2701-hdmi-audio.yaml @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt2701-hdmi-audio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek MT2701/MT7623N HDMI audio output + +maintainers: + - Daniel Golle + +description: + Sound card routing the MT2701/MT7623N Audio Front End HDMI + playback path to the on-chip HDMI transmitter. The AFE + provides the DMA memif and internal I2S engine; the HDMI + transmitter acts as the audio codec on the serialised link. + +properties: + compatible: + oneOf: + - const: mediatek,mt2701-hdmi-audio + - items: + - const: mediatek,mt7623n-hdmi-audio + - const: mediatek,mt2701-hdmi-audio + + mediatek,platform: + $ref: /schemas/types.yaml#/definitions/phandle + description: Phandle of the MT2701/MT7623N AFE platform node. + + mediatek,audio-codec: + $ref: /schemas/types.yaml#/definitions/phandle + description: Phandle of the HDMI transmitter acting as audio codec. + +required: + - compatible + - mediatek,platform + - mediatek,audio-codec + +additionalProperties: false + +examples: + - | + sound-hdmi { + compatible = "mediatek,mt7623n-hdmi-audio", + "mediatek,mt2701-hdmi-audio"; + mediatek,platform = <&afe>; + mediatek,audio-codec = <&hdmi0>; + }; diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt6351-sound.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt6351-sound.yaml new file mode 100644 index 00000000000000..b422e238b5127b --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt6351-sound.yaml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt6351-sound.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek MT6351 Audio CODEC + +maintainers: + - KaiChieh Chuang + +description: + MT6351 Audio CODEC is a part of the MediaTek MT6351 PMIC. + It communicates with the SoC through the MediaTek PMIC wrapper(pwrap). + +allOf: + - $ref: dai-common.yaml# + +properties: + compatible: + const: mediatek,mt6351-sound + + "#sound-dai-cells": + const: 0 + +required: + - compatible + - "#sound-dai-cells" + +unevaluatedProperties: false + +examples: + - | + sound { + compatible = "mediatek,mt6351-sound"; + #sound-dai-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8196-afe.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8196-afe.yaml new file mode 100644 index 00000000000000..949f8622baf9f2 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt8196-afe.yaml @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt8196-afe.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Audio Front End PCM controller for MT8196 + +maintainers: + - Darren Ye + +properties: + compatible: + const: mediatek,mt8196-afe + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + memory-region: + maxItems: 1 + + power-domains: + maxItems: 1 + + clocks: + items: + - description: mux for audio intbus + - description: mux for audio engen1 + - description: mux for audio engen2 + - description: mux for audio h + - description: audio apll1 clock + - description: audio apll2 clock + - description: audio apll12 divide for i2sin0 + - description: audio apll12 divide for i2sin1 + - description: audio apll12 divide for fmi2s + - description: audio apll12 divide for tdmout mck + - description: audio apll12 divide for tdmout bck + - description: mux for adsp clock + + clock-names: + items: + - const: top_aud_intbus + - const: top_aud_eng1 + - const: top_aud_eng2 + - const: top_aud_h + - const: apll1 + - const: apll2 + - const: apll12_div_i2sin0 + - const: apll12_div_i2sin1 + - const: apll12_div_fmi2s + - const: apll12_div_tdmout_m + - const: apll12_div_tdmout_b + - const: top_adsp + +required: + - compatible + - reg + - interrupts + - memory-region + - power-domains + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + afe@1a110000 { + compatible = "mediatek,mt8196-afe"; + reg = <0 0x1a110000 0 0x9000>; + interrupts = ; + memory-region = <&afe_dma_mem_reserved>; + power-domains = <&scpsys 14>; //MT8196_POWER_DOMAIN_AUDIO + pinctrl-names = "default"; + pinctrl-0 = <&aud_pins_default>; + clocks = <&vlp_cksys_clk 40>, //CLK_VLP_CK_AUD_INTBUS_SEL + <&vlp_cksys_clk 38>, //CLK_VLP_CK_AUD_ENGEN1_SEL + <&vlp_cksys_clk 39>, //CLK_VLP_CK_AUD_ENGEN2_SEL + <&vlp_cksys_clk 37>, //CLK_VLP_CK_AUDIO_H_SEL + <&vlp_cksys_clk 0>, //CLK_VLP_CK_VLP_APLL1 + <&vlp_cksys_clk 1>, //CLK_VLP_CK_VLP_APLL2 + <&cksys_clk 80>, //CLK_CK_APLL12_CK_DIV_I2SIN0 + <&cksys_clk 81>, //CLK_CK_APLL12_CK_DIV_I2SIN1 + <&cksys_clk 92>, //CLK_CK_APLL12_CK_DIV_FMI2S + <&cksys_clk 93>, //CLK_CK_APLL12_CK_DIV_TDMOUT_M + <&cksys_clk 94>, //CLK_CK_APLL12_CK_DIV_TDMOUT_B + <&cksys_clk 45>; //CLK_CK_ADSP_SEL + clock-names = "top_aud_intbus", + "top_aud_eng1", + "top_aud_eng2", + "top_aud_h", + "apll1", + "apll2", + "apll12_div_i2sin0", + "apll12_div_i2sin1", + "apll12_div_fmi2s", + "apll12_div_tdmout_m", + "apll12_div_tdmout_b", + "top_adsp"; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8196-nau8825.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8196-nau8825.yaml new file mode 100644 index 00000000000000..83350faa1e2999 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt8196-nau8825.yaml @@ -0,0 +1,100 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt8196-nau8825.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek MT8196 ASoC sound card + +maintainers: + - Darren Ye + +allOf: + - $ref: sound-card-common.yaml# + +properties: + compatible: + enum: + - mediatek,mt8196-nau8825-sound + - mediatek,mt8196-rt5682s-sound + - mediatek,mt8196-rt5650-sound + + mediatek,platform: + $ref: /schemas/types.yaml#/definitions/phandle + description: The phandle of MT8188 ASoC platform. + +patternProperties: + "^dai-link-[0-9]+$": + type: object + description: + Container for dai-link level properties and CODEC sub-nodes. + + properties: + link-name: + description: + This property corresponds to the name of the BE dai-link to which + we are going to update parameters in this node. + items: + enum: + - TDM_DPTX_BE + - I2SOUT6_BE + - I2SIN6_BE + - I2SOUT4_BE + - I2SOUT3_BE + + codec: + description: Holds subnode which indicates codec dai. + type: object + additionalProperties: false + properties: + sound-dai: + minItems: 1 + maxItems: 2 + required: + - sound-dai + + dai-format: + description: audio format. + items: + enum: + - i2s + - right_j + - left_j + - dsp_a + - dsp_b + + mediatek,clk-provider: + $ref: /schemas/types.yaml#/definitions/string + description: Indicates dai-link clock master. + enum: + - cpu + - codec + + additionalProperties: false + + required: + - link-name + +required: + - compatible + - mediatek,platform + +unevaluatedProperties: false + +examples: + - | + sound { + compatible = "mediatek,mt8196-nau8825-sound"; + model = "mt8196-nau8825"; + mediatek,platform = <&afe>; + dai-link-0 { + link-name = "I2SOUT6_BE"; + dai-format = "i2s"; + mediatek,clk-provider = "cpu"; + codec { + sound-dai = <&nau8825>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/sound/mt6351.txt b/Documentation/devicetree/bindings/sound/mt6351.txt deleted file mode 100644 index 7fb2cb99245ed5..00000000000000 --- a/Documentation/devicetree/bindings/sound/mt6351.txt +++ /dev/null @@ -1,16 +0,0 @@ -Mediatek MT6351 Audio Codec - -The communication between MT6351 and SoC is through Mediatek PMIC wrapper. -For more detail, please visit Mediatek PMIC wrapper documentation. - -Must be a child node of PMIC wrapper. - -Required properties: - -- compatible : "mediatek,mt6351-sound". - -Example: - -mt6351_snd { - compatible = "mediatek,mt6351-sound"; -}; diff --git a/Documentation/filesystems/isofs.rst b/Documentation/filesystems/isofs.rst index 08fd469091d4bf..2a30999b024f31 100644 --- a/Documentation/filesystems/isofs.rst +++ b/Documentation/filesystems/isofs.rst @@ -57,7 +57,7 @@ Mount options unique to the isofs filesystem. Recommended documents about ISO 9660 standard are located at: - http://www.y-adagio.com/ -- ftp://ftp.ecma.ch/ecma-st/Ecma-119.pdf +- https://ecma-international.org/wp-content/uploads/ECMA-119_2nd_edition_december_1987.pdf Quoting from the PDF "This 2nd Edition of Standard ECMA-119 is technically identical with ISO 9660.", so it is a valid and gratis substitute of the diff --git a/Documentation/netlink/specs/psp.yaml b/Documentation/netlink/specs/psp.yaml index 100c36cda8e5d4..bfcd6e4ecb850e 100644 --- a/Documentation/netlink/specs/psp.yaml +++ b/Documentation/netlink/specs/psp.yaml @@ -188,6 +188,7 @@ operations: name: dev-set doc: Set the configuration of a PSP device. attribute-set: dev + flags: [admin-perm] do: request: attributes: @@ -207,6 +208,7 @@ operations: name: key-rotate doc: Rotate the device key. attribute-set: dev + flags: [admin-perm] do: request: attributes: diff --git a/Documentation/sound/codecs/index.rst b/Documentation/sound/codecs/index.rst index 2cb95d87bbef29..7594d0a38d6bcf 100644 --- a/Documentation/sound/codecs/index.rst +++ b/Documentation/sound/codecs/index.rst @@ -7,3 +7,4 @@ Codec-Specific Information :maxdepth: 2 cs35l56 + tas675x diff --git a/Documentation/sound/codecs/tas675x.rst b/Documentation/sound/codecs/tas675x.rst index 36bcdf18d23866..c08b0e392306e5 100644 --- a/Documentation/sound/codecs/tas675x.rst +++ b/Documentation/sound/codecs/tas675x.rst @@ -656,7 +656,7 @@ These faults place affected channels into the FAULT state. The driver issues fault clear (register 0x01 bit 3) to allow recovery. Overtemperature Shutdown (0x87) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ======== ========================================== Bits Description diff --git a/MAINTAINERS b/MAINTAINERS index 4f9b28a780be02..a03907010da46b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7873,7 +7873,7 @@ F: drivers/gpu/drm/sun4i/sun8i* DRM DRIVER FOR APPLE TOUCH BARS M: Aun-Ali Zaidi -M: Aditya Garg +M: Aditya Garg L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -13860,7 +13860,6 @@ M: Pratyush Yadav R: Dave Young L: kexec@lists.infradead.org S: Maintained -W: http://lse.sourceforge.net/kdump/ F: Documentation/admin-guide/kdump/ F: fs/proc/vmcore.c F: include/linux/crash_core.h @@ -15252,7 +15251,7 @@ M: Andrea Cervesato M: Cyril Hrubis M: Jan Stancek M: Petr Vorel -M: Li Wang +M: Li Wang M: Yang Xu M: Xiao Yang L: ltp@lists.linux.it (subscribers-only) @@ -15399,7 +15398,7 @@ F: include/net/netns/mctp.h F: net/mctp/ MAPLE TREE -M: Liam R. Howlett +M: Liam R. Howlett R: Alice Ryhl R: Andrew Ballance L: maple-tree@lists.infradead.org @@ -16759,7 +16758,7 @@ MEMORY MANAGEMENT - CORE M: Andrew Morton M: David Hildenbrand R: Lorenzo Stoakes -R: Liam R. Howlett +R: Liam R. Howlett R: Vlastimil Babka R: Mike Rapoport R: Suren Baghdasaryan @@ -16805,7 +16804,7 @@ F: mm/sparse.c F: mm/util.c F: mm/vmpressure.c F: mm/vmstat.c -N: include/linux/page[-_]* +N: include\/linux\/page[-_][a-zA-Z]* MEMORY MANAGEMENT - EXECMEM M: Andrew Morton @@ -16895,7 +16894,7 @@ MEMORY MANAGEMENT - MISC M: Andrew Morton M: David Hildenbrand R: Lorenzo Stoakes -R: Liam R. Howlett +R: Liam R. Howlett R: Vlastimil Babka R: Mike Rapoport R: Suren Baghdasaryan @@ -16962,6 +16961,7 @@ S: Maintained F: include/linux/compaction.h F: include/linux/gfp.h F: include/linux/page-isolation.h +F: include/linux/pageblock-flags.h F: mm/compaction.c F: mm/debug_page_alloc.c F: mm/debug_page_ref.c @@ -16983,7 +16983,7 @@ M: Andrew Morton M: Johannes Weiner R: David Hildenbrand R: Michal Hocko -R: Qi Zheng +R: Qi Zheng R: Shakeel Butt R: Lorenzo Stoakes L: linux-mm@kvack.org @@ -16996,7 +16996,7 @@ M: Andrew Morton M: David Hildenbrand M: Lorenzo Stoakes R: Rik van Riel -R: Liam R. Howlett +R: Liam R. Howlett R: Vlastimil Babka R: Harry Yoo R: Jann Horn @@ -17043,7 +17043,7 @@ M: David Hildenbrand M: Lorenzo Stoakes R: Zi Yan R: Baolin Wang -R: Liam R. Howlett +R: Liam R. Howlett R: Nico Pache R: Ryan Roberts R: Dev Jain @@ -17081,7 +17081,7 @@ F: tools/testing/selftests/mm/uffd-*.[ch] MEMORY MANAGEMENT - RUST M: Alice Ryhl R: Lorenzo Stoakes -R: Liam R. Howlett +R: Liam R. Howlett L: linux-mm@kvack.org L: rust-for-linux@vger.kernel.org S: Maintained @@ -17095,7 +17095,7 @@ F: rust/kernel/page.rs MEMORY MAPPING M: Andrew Morton -M: Liam R. Howlett +M: Liam R. Howlett M: Lorenzo Stoakes R: Vlastimil Babka R: Jann Horn @@ -17127,7 +17127,7 @@ F: tools/testing/vma/ MEMORY MAPPING - LOCKING M: Andrew Morton M: Suren Baghdasaryan -M: Liam R. Howlett +M: Liam R. Howlett M: Lorenzo Stoakes R: Vlastimil Babka R: Shakeel Butt @@ -17142,7 +17142,7 @@ F: mm/mmap_lock.c MEMORY MAPPING - MADVISE (MEMORY ADVICE) M: Andrew Morton -M: Liam R. Howlett +M: Liam R. Howlett M: Lorenzo Stoakes M: David Hildenbrand R: Vlastimil Babka @@ -18672,19 +18672,59 @@ F: net/xfrm/ F: tools/testing/selftests/net/ipsec.c NETWORKING [IPv4/IPv6] -M: "David S. Miller" M: David Ahern +M: Ido Schimmel L: netdev@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git -F: arch/x86/net/* -F: include/linux/ip.h -F: include/linux/ipv6* +F: Documentation/netlink/specs/rt-addr.yaml +F: Documentation/netlink/specs/rt-neigh.yaml +F: Documentation/netlink/specs/rt-route.yaml +F: Documentation/netlink/specs/rt-rule.yaml +F: include/linux/inetdevice.h +F: include/linux/mroute* +F: include/net/addrconf.h +F: include/net/arp.h F: include/net/fib* +F: include/net/if_inet6.h +F: include/net/inetpeer.h F: include/net/ip* +F: include/net/lwtunnel.h +F: include/net/ndisc.h +F: include/net/netns/nexthop.h +F: include/net/nexthop.h F: include/net/route.h -F: net/ipv4/ -F: net/ipv6/ +F: include/uapi/linux/fib_rules.h +F: include/uapi/linux/in_route.h +F: include/uapi/linux/mroute* +F: include/uapi/linux/nexthop.h +F: net/core/fib* +F: net/core/lwtunnel.c +F: net/ipv4/arp.c +F: net/ipv4/devinet.c +F: net/ipv4/fib* +F: net/ipv4/icmp.c +F: net/ipv4/igmp.c +F: net/ipv4/inet_fragment.c +F: net/ipv4/inetpeer.c +F: net/ipv4/ip* +F: net/ipv4/metrics.c +F: net/ipv4/netlink.c +F: net/ipv4/nexthop.c +F: net/ipv4/route.c +F: net/ipv6/addr* +F: net/ipv6/anycast.c +F: net/ipv6/exthdrs.c +F: net/ipv6/exthdrs_core.c +F: net/ipv6/fib* +F: net/ipv6/icmp.c +F: net/ipv6/ip* +F: net/ipv6/mcast* +F: net/ipv6/ndisc.c +F: net/ipv6/output_core.c +F: net/ipv6/reassembly.c +F: net/ipv6/route.c +F: tools/testing/selftests/net/fib* +F: tools/testing/selftests/net/forwarding/ NETWORKING [LABELED] (NetLabel, Labeled IPsec, SECMARK) M: Paul Moore @@ -18819,18 +18859,11 @@ F: Documentation/networking/net_failover.rst F: drivers/net/net_failover.c F: include/net/net_failover.h -NEXTHOP -M: David Ahern -L: netdev@vger.kernel.org -S: Maintained -F: include/net/netns/nexthop.h -F: include/net/nexthop.h -F: include/uapi/linux/nexthop.h -F: net/ipv4/nexthop.c - NFC SUBSYSTEM -L: netdev@vger.kernel.org -S: Orphan +M: David Heidelberg +L: oe-linux-nfc@lists.linux.dev +S: Maintained +T: git https://codeberg.org/linux-nfc/linux.git F: Documentation/devicetree/bindings/net/nfc/ F: drivers/nfc/ F: include/net/nfc/ @@ -20774,6 +20807,7 @@ M: Dominik Brodowski S: Odd Fixes T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/linux.git F: Documentation/pcmcia/ +F: drivers/net/ethernet/8390/pcnet_cs.c F: drivers/pcmcia/ F: include/pcmcia/ F: tools/pcmcia/ @@ -23369,7 +23403,7 @@ RUST [ALLOC] M: Danilo Krummrich R: Lorenzo Stoakes R: Vlastimil Babka -R: Liam R. Howlett +R: Liam R. Howlett R: Uladzislau Rezki L: rust-for-linux@vger.kernel.org S: Maintained @@ -23521,7 +23555,7 @@ F: drivers/s390/net/ S390 PCI SUBSYSTEM M: Niklas Schnelle -M: Gerald Schaefer +M: Gerd Bayer L: linux-s390@vger.kernel.org S: Supported F: Documentation/arch/s390/pci.rst @@ -24314,7 +24348,7 @@ F: include/media/i2c/rj54n1cb0c.h SHRINKER M: Andrew Morton M: Dave Chinner -R: Qi Zheng +R: Qi Zheng R: Roman Gushchin R: Muchun Song L: linux-mm@kvack.org @@ -24764,6 +24798,7 @@ SOFTWARE RAID (Multiple Disks) SUPPORT M: Song Liu M: Yu Kuai R: Li Nan +R: Xiao Ni L: linux-raid@vger.kernel.org S: Supported Q: https://patchwork.kernel.org/project/linux-raid/list/ diff --git a/Makefile b/Makefile index e27c91ea56fcf8..9f88dcaae38271 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 7 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index ff636197903838..c588eeea1485e2 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -58,8 +58,15 @@ static unsigned long ac97_reset_config[] = { GPIO95_AC97_nRESET, }; -void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio) +void pxa27x_configure_ac97reset(struct gpio_desc *gpiod, bool to_gpio) { + int reset_gpio; + + if (!gpiod) + return; + + reset_gpio = desc_to_gpio(gpiod); + /* * This helper function is used to work around a bug in the pxa27x's * ac97 controller during a warm reset. The configuration of the diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index d4d7451c2c129f..a8cb5a5c93b783 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -40,7 +40,7 @@ static __always_inline void __pmr_local_irq_enable(void) barrier(); } -static inline void arch_local_irq_enable(void) +static __always_inline void arch_local_irq_enable(void) { if (system_uses_irq_prio_masking()) { __pmr_local_irq_enable(); @@ -68,7 +68,7 @@ static __always_inline void __pmr_local_irq_disable(void) barrier(); } -static inline void arch_local_irq_disable(void) +static __always_inline void arch_local_irq_disable(void) { if (system_uses_irq_prio_masking()) { __pmr_local_irq_disable(); @@ -90,7 +90,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void) /* * Save the current interrupt enable state. */ -static inline unsigned long arch_local_save_flags(void) +static __always_inline unsigned long arch_local_save_flags(void) { if (system_uses_irq_prio_masking()) { return __pmr_local_save_flags(); @@ -109,7 +109,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) return flags != GIC_PRIO_IRQON; } -static inline bool arch_irqs_disabled_flags(unsigned long flags) +static __always_inline bool arch_irqs_disabled_flags(unsigned long flags) { if (system_uses_irq_prio_masking()) { return __pmr_irqs_disabled_flags(flags); @@ -128,7 +128,7 @@ static __always_inline bool __pmr_irqs_disabled(void) return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); } -static inline bool arch_irqs_disabled(void) +static __always_inline bool arch_irqs_disabled(void) { if (system_uses_irq_prio_masking()) { return __pmr_irqs_disabled(); @@ -160,7 +160,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void) return flags; } -static inline unsigned long arch_local_irq_save(void) +static __always_inline unsigned long arch_local_irq_save(void) { if (system_uses_irq_prio_masking()) { return __pmr_local_irq_save(); @@ -187,7 +187,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags) /* * restore saved IRQ state */ -static inline void arch_local_irq_restore(unsigned long flags) +static __always_inline void arch_local_irq_restore(unsigned long flags) { if (system_uses_irq_prio_masking()) { __pmr_local_irq_restore(flags); diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 74a4f738c5f52e..229ee7976f6934 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -68,7 +68,12 @@ #define KERNEL_SEGMENT_COUNT 5 #if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN -#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1) +/* + * KERNEL_SEGMENT_COUNT counts the permanent kernel VMAs. The early mapping + * has one additional split, [_text, _stext). Reserve one more page for the + * SWAPPER_BLOCK_SIZE-unaligned boundaries. + */ +#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 2) /* * The initial ID map consists of the kernel image, mapped as two separate * segments, and may appear misaligned wrt the swapper block size. This means diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 37414440cee7f1..043495f7fc78b9 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -50,6 +50,9 @@ #include +#define MARKER(m) \ + m, __after_##m = m - 1 + enum __kvm_host_smccc_func { /* Hypercalls that are unavailable once pKVM has finalised. */ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ @@ -59,8 +62,10 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, + + MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM), + __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, - __KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, /* Hypercalls that are always available and common to [nh]VHE/pKVM. */ __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, @@ -72,11 +77,20 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, + __KVM_HOST_SMCCC_FUNC___tracing_load, + __KVM_HOST_SMCCC_FUNC___tracing_unload, + __KVM_HOST_SMCCC_FUNC___tracing_enable, + __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, + __KVM_HOST_SMCCC_FUNC___tracing_update_clock, + __KVM_HOST_SMCCC_FUNC___tracing_reset, + __KVM_HOST_SMCCC_FUNC___tracing_enable_event, + __KVM_HOST_SMCCC_FUNC___tracing_write_event, __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, __KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr, __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, - __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, + + MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY), /* Hypercalls that are available only when pKVM has finalised. */ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, @@ -100,14 +114,8 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, - __KVM_HOST_SMCCC_FUNC___tracing_load, - __KVM_HOST_SMCCC_FUNC___tracing_unload, - __KVM_HOST_SMCCC_FUNC___tracing_enable, - __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, - __KVM_HOST_SMCCC_FUNC___tracing_update_clock, - __KVM_HOST_SMCCC_FUNC___tracing_reset, - __KVM_HOST_SMCCC_FUNC___tracing_enable_event, - __KVM_HOST_SMCCC_FUNC___tracing_write_event, + + MARKER(__KVM_HOST_SMCCC_FUNC_MAX) }; #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 851f6171751c04..65eead8362e0b4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -450,9 +450,6 @@ struct kvm_vcpu_fault_info { r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ __after_##r = __MAX__(__before_##r - 1, r) -#define MARKER(m) \ - m, __after_##m = m - 1 - enum vcpu_sysreg { __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ MPIDR_EL1, /* MultiProcessor Affinity Register */ @@ -1548,7 +1545,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) -#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) +#define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED) int kvm_trng_call(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM diff --git a/arch/arm64/kernel/pi/patch-scs.c b/arch/arm64/kernel/pi/patch-scs.c index dac568e4a54f23..3944ad899021cd 100644 --- a/arch/arm64/kernel/pi/patch-scs.c +++ b/arch/arm64/kernel/pi/patch-scs.c @@ -196,9 +196,9 @@ static int scs_handle_fde_frame(const struct eh_frame *frame, loc += *opcode++ * code_alignment_factor; loc += (*opcode++ << 8) * code_alignment_factor; loc += (*opcode++ << 16) * code_alignment_factor; - loc += (*opcode++ << 24) * code_alignment_factor; + loc += ((u64)*opcode++ << 24) * code_alignment_factor; size -= 4; - break; + break; case DW_CFA_def_cfa: case DW_CFA_offset_extended: diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 08ffc5a5aea4cc..38e6fa204c17b5 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -67,6 +67,9 @@ struct rt_sigframe_user_layout { unsigned long end_offset; }; +#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) +#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) + /* * Holds any EL0-controlled state that influences unprivileged memory accesses. * This includes both accesses done in userspace and uaccess done in the kernel. @@ -74,13 +77,35 @@ struct rt_sigframe_user_layout { * This state needs to be carefully managed to ensure that it doesn't cause * uaccess to fail when setting up the signal frame, and the signal handler * itself also expects a well-defined state when entered. + * + * The struct should be zero-initialised. Its members should only be accessed + * via the accessors below. __valid_fields tracks which of the fields are valid + * (have been set to some value). */ struct user_access_state { - u64 por_el0; + unsigned int __valid_fields; + u64 __por_el0; }; -#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) -#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) +#define UA_STATE_HAS_POR_EL0 BIT(0) + +static void set_ua_state_por_el0(struct user_access_state *ua_state, + u64 por_el0) +{ + ua_state->__por_el0 = por_el0; + ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0; +} + +static int get_ua_state_por_el0(const struct user_access_state *ua_state, + u64 *por_el0) +{ + if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) { + *por_el0 = ua_state->__por_el0; + return 0; + } + + return -ENOENT; +} /* * Save the user access state into ua_state and reset it to disable any @@ -94,7 +119,7 @@ static void save_reset_user_access_state(struct user_access_state *ua_state) for (int pkey = 0; pkey < arch_max_pkey(); pkey++) por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX); - ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); + set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0)); write_sysreg_s(por_enable_all, SYS_POR_EL0); /* * No ISB required as we can tolerate spurious Overlay faults - @@ -122,8 +147,10 @@ static void set_handler_user_access_state(void) */ static void restore_user_access_state(const struct user_access_state *ua_state) { - if (system_supports_poe()) - write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); + u64 por_el0; + + if (get_ua_state_por_el0(ua_state, &por_el0) == 0) + write_sysreg_s(por_el0, SYS_POR_EL0); } static void init_user_layout(struct rt_sigframe_user_layout *user) @@ -333,11 +360,16 @@ static int restore_fpmr_context(struct user_ctxs *user) static int preserve_poe_context(struct poe_context __user *ctx, const struct user_access_state *ua_state) { - int err = 0; + int err; + u64 por_el0; + + err = get_ua_state_por_el0(ua_state, &por_el0); + if (WARN_ON_ONCE(err)) + return err; __put_user_error(POE_MAGIC, &ctx->head.magic, err); __put_user_error(sizeof(*ctx), &ctx->head.size, err); - __put_user_error(ua_state->por_el0, &ctx->por_el0, err); + __put_user_error(por_el0, &ctx->por_el0, err); return err; } @@ -353,7 +385,7 @@ static int restore_poe_context(struct user_ctxs *user, __get_user_error(por_el0, &(user->poe->por_el0), err); if (!err) - ua_state->por_el0 = por_el0; + set_ua_state_por_el0(ua_state, por_el0); return err; } @@ -1095,7 +1127,7 @@ SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; - struct user_access_state ua_state; + struct user_access_state ua_state = {}; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -1507,7 +1539,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, { struct rt_sigframe_user_layout user; struct rt_sigframe __user *frame; - struct user_access_state ua_state; + struct user_access_state ua_state = {}; int err = 0; fpsimd_save_and_flush_current_state(); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 176cbe8baad30c..8bb2c7422cc8b0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -824,6 +824,10 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE); + irq_lines |= (!irqchip_in_kernel(v->kvm) && + (kvm_timer_should_notify_user(v) || + kvm_pmu_should_notify_user(v))); + return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); } diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c index f35b8dddd7c1f4..0622162b089e50 100644 --- a/arch/arm64/kvm/config.c +++ b/arch/arm64/kvm/config.c @@ -131,7 +131,6 @@ struct reg_feat_map_desc { } #define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP -#define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2 #define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP #define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP #define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP @@ -192,7 +191,7 @@ struct reg_feat_map_desc { #define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP #define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP #define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP -#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9 +#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, DebugVer, V8P9 #define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP #define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP #define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP @@ -283,7 +282,7 @@ static bool feat_anerr(struct kvm *kvm) static bool feat_sme_smps(struct kvm *kvm) { /* - * Revists this if KVM ever supports SME -- this really should + * Revisit this if KVM ever supports SME -- this really should * look at the guest's view of SMIDR_EL1. Funnily enough, this * is not captured in the JSON file, but only as a note in the * ARM ARM. @@ -295,17 +294,27 @@ static bool feat_sme_smps(struct kvm *kvm) static bool feat_spe_fds(struct kvm *kvm) { /* - * Revists this if KVM ever supports SPE -- this really should + * Revisit this if KVM ever supports SPE -- this really should * look at the guest's view of PMSIDR_EL1. */ return (kvm_has_feat(kvm, FEAT_SPEv1p4) && (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS)); } +static bool feat_spe_fne(struct kvm *kvm) +{ + /* + * Revisit this if KVM ever supports SPE -- this really should + * look at the guest's view of PMSIDR_EL1. + */ + return (kvm_has_feat(kvm, FEAT_SPEv1p2) && + (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FnE)); +} + static bool feat_trbe_mpam(struct kvm *kvm) { /* - * Revists this if KVM ever supports both MPAM and TRBE -- + * Revisit this if KVM ever supports both MPAM and TRBE -- * this really should look at the guest's view of TRBIDR_EL1. */ return (kvm_has_feat(kvm, FEAT_TRBE) && @@ -537,7 +546,7 @@ static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = { HDFGRTR_EL2_PMBPTR_EL1 | HDFGRTR_EL2_PMBLIMITR_EL1, FEAT_SPE), - NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), + NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA | HDFGRTR_EL2_nBRBCTL | HDFGRTR_EL2_nBRBIDR, @@ -605,7 +614,7 @@ static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = { HDFGWTR_EL2_PMBPTR_EL1 | HDFGWTR_EL2_PMBLIMITR_EL1, FEAT_SPE), - NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), + NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA | HDFGWTR_EL2_nBRBCTL, FEAT_BRBE), diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 73f2e0221e7032..06db299c37a89a 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -709,6 +709,14 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_tlb_flush_vmid_range), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), + HANDLE_FUNC(__tracing_load), + HANDLE_FUNC(__tracing_unload), + HANDLE_FUNC(__tracing_enable), + HANDLE_FUNC(__tracing_swap_reader), + HANDLE_FUNC(__tracing_update_clock), + HANDLE_FUNC(__tracing_reset), + HANDLE_FUNC(__tracing_enable_event), + HANDLE_FUNC(__tracing_write_event), HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs), HANDLE_FUNC(__vgic_v5_save_apr), @@ -735,22 +743,16 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_vcpu_load), HANDLE_FUNC(__pkvm_vcpu_put), HANDLE_FUNC(__pkvm_tlb_flush_vmid), - HANDLE_FUNC(__tracing_load), - HANDLE_FUNC(__tracing_unload), - HANDLE_FUNC(__tracing_enable), - HANDLE_FUNC(__tracing_swap_reader), - HANDLE_FUNC(__tracing_update_clock), - HANDLE_FUNC(__tracing_reset), - HANDLE_FUNC(__tracing_enable_event), - HANDLE_FUNC(__tracing_write_event), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, id, host_ctxt, 0); - unsigned long hcall_min = 0, hcall_max = -1; + unsigned long hcall_min = 0, hcall_max = __KVM_HOST_SMCCC_FUNC_MAX; hcall_t hfn; + BUILD_BUG_ON(ARRAY_SIZE(host_hcall) != __KVM_HOST_SMCCC_FUNC_MAX); + /* * If pKVM has been initialised then reject any calls to the * early "privileged" hypercalls. Note that we cannot reject @@ -763,16 +765,14 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) if (static_branch_unlikely(&kvm_protected_mode_initialized)) { hcall_min = __KVM_HOST_SMCCC_FUNC_MIN_PKVM; } else { - hcall_max = __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM; + hcall_max = __KVM_HOST_SMCCC_FUNC_PKVM_ONLY; } id &= ~ARM_SMCCC_CALL_HINTS; id -= KVM_HOST_SMCCC_ID(0); - if (unlikely(id < hcall_min || id > hcall_max || - id >= ARRAY_SIZE(host_hcall))) { + if (unlikely(id < hcall_min || id >= hcall_max)) goto inval; - } hfn = host_hcall[id]; if (unlikely(!hfn)) @@ -805,6 +805,10 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt) } func_id &= ~ARM_SMCCC_CALL_HINTS; + if (upper_32_bits(func_id)) { + cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED; + goto exit_skip_instr; + } handled = kvm_host_psci_handler(host_ctxt, func_id); if (!handled) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 7ed96d64d61107..e7496eb8562897 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -266,7 +266,8 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, if (hyp_vm->kvm.created_vcpus <= vcpu_idx) goto unlock; - hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; + /* Pairs with smp_store_release() in register_hyp_vcpu(). */ + hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]); if (!hyp_vcpu) goto unlock; @@ -860,12 +861,30 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, * the page-aligned size of 'struct pkvm_hyp_vcpu'. * Return 0 on success, negative error code on failure. */ +static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm, + struct pkvm_hyp_vcpu *hyp_vcpu) +{ + unsigned int idx = hyp_vcpu->vcpu.vcpu_idx; + + if (idx >= hyp_vm->kvm.created_vcpus) + return -EINVAL; + + if (hyp_vm->vcpus[idx]) + return -EINVAL; + + /* + * Ensure the hyp_vcpu is initialised before publishing it to + * the vCPU-load path via 'hyp_vm->vcpus[]'. + */ + smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu); + return 0; +} + int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva) { struct pkvm_hyp_vcpu *hyp_vcpu; struct pkvm_hyp_vm *hyp_vm; - unsigned int idx; int ret; hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu)); @@ -884,18 +903,11 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, if (ret) goto unlock; - idx = hyp_vcpu->vcpu.vcpu_idx; - if (idx >= hyp_vm->kvm.created_vcpus) { - ret = -EINVAL; - goto unlock; - } - - if (hyp_vm->vcpus[idx]) { - ret = -EINVAL; - goto unlock; + ret = register_hyp_vcpu(hyp_vm, hyp_vcpu); + if (ret) { + unpin_host_vcpu(host_vcpu); + unpin_host_sve_state(hyp_vcpu); } - - hyp_vm->vcpus[idx] = hyp_vcpu; unlock: hyp_spin_unlock(&vm_table_lock); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index d8e5b563fd3d9f..d461981616d902 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -312,15 +312,15 @@ void __noreturn __pkvm_init_finalise(void) }; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; - ret = fix_host_ownership(); + ret = fix_hyp_pgtable_refcnt(); if (ret) goto out; - ret = fix_hyp_pgtable_refcnt(); + ret = hyp_create_fixmap(); if (ret) goto out; - ret = hyp_create_fixmap(); + ret = fix_host_ownership(); if (ret) goto out; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index 406845b3117cfb..0643e333db35dc 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -91,7 +91,7 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu, * migration from old kernels to new kernels with legacy * userspace. */ - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); switch (reg) { case KVM_VGIC_IMP_REV_2: case KVM_VGIC_IMP_REV_3: diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 89edb84d1ac6d2..5913a20d830191 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -194,7 +194,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK) return -EINVAL; - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); switch (reg) { case KVM_VGIC_IMP_REV_2: case KVM_VGIC_IMP_REV_3: diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 31430e9bcfdd9d..7650f2adb5cf86 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1414,6 +1414,9 @@ static inline char *debug_get_user_string(const char __user *user_buf, { char *buffer; + if (!user_len) + return ERR_PTR(-EINVAL); + buffer = memdup_user_nul(user_buf, user_len); if (IS_ERR(buffer)) return buffer; @@ -1584,6 +1587,11 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view, char input_buf[1]; int rc = user_len; + if (!user_len) { + rc = -EINVAL; + goto out; + } + if (user_len > 0x10000) user_len = 0x10000; if (*offset != 0) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 191cc53caead39..028aeb9c48d6ff 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -438,7 +438,7 @@ void do_secure_storage_access(struct pt_regs *regs) panic("Unexpected PGM 0x3d with TEID bit 61=0"); } if (is_kernel_fault(regs)) { - folio = phys_to_folio(addr); + folio = virt_to_folio((void *)addr); if (unlikely(!folio_try_get(folio))) return; rc = uv_convert_from_secure(folio_to_phys(folio)); diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 63c9efc0634868..8488f76b48b405 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -7,7 +7,7 @@ /* * This is set up by the setup-routine at boot-time */ -extern unsigned char *boot_params_page; +extern unsigned char boot_params_page[]; #define PARAM boot_params_page #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) diff --git a/crypto/authencesn.c b/crypto/authencesn.c index af3d584e584fbc..522df41365d8f9 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,6 +390,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, auth = crypto_spawn_ahash_alg(&ctx->auth); auth_base = &auth->base; + if (auth->digestsize > 0 && auth->digestsize < 4) { + err = -EINVAL; + goto err_free_inst; + } + err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst), crypto_attr_alg_name(tb[2]), 0, mask); if (err) diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index b406d7a98996ce..cac07e997028a3 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -605,15 +605,12 @@ static umode_t acpi_tad_attr_is_visible(struct kobject *kobj, return 0; } -static const struct attribute_group acpi_tad_attr_group = { +static const struct attribute_group acpi_tad_group = { .attrs = acpi_tad_attrs, .is_visible = acpi_tad_attr_is_visible, }; -static const struct attribute_group *acpi_tad_attr_groups[] = { - &acpi_tad_attr_group, - NULL, -}; +__ATTRIBUTE_GROUPS(acpi_tad); #ifdef CONFIG_RTC_CLASS /* RTC class device interface */ @@ -683,9 +680,8 @@ static int acpi_tad_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) acpi_tad_rt_to_tm(&rt, &tm_now); - value = ktime_divns(ktime_sub(rtc_tm_to_ktime(t->time), - rtc_tm_to_ktime(tm_now)), NSEC_PER_SEC); - if (value <= 0 || value > U32_MAX) + value = rtc_tm_to_time64(&t->time) - rtc_tm_to_time64(&tm_now); + if (value <= 0 || value >= U32_MAX) return -EINVAL; } @@ -748,8 +744,7 @@ static int acpi_tad_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t) if (retval != ACPI_TAD_WAKE_DISABLED) { t->enabled = 1; - t->time = rtc_ktime_to_tm(ktime_add_ns(rtc_tm_to_ktime(tm_now), - (u64)retval * NSEC_PER_SEC)); + rtc_time64_to_tm(rtc_tm_to_time64(&tm_now) + retval, &t->time); } else { t->enabled = 0; t->time = tm_now; @@ -795,9 +790,9 @@ static int acpi_tad_disable_timer(struct device *dev, u32 timer_id) return acpi_tad_wake_set(dev, "_STV", timer_id, ACPI_TAD_WAKE_DISABLED); } -static void acpi_tad_remove(struct platform_device *pdev) +static void acpi_tad_remove(void *data) { - struct device *dev = &pdev->dev; + struct device *dev = data; struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); device_init_wakeup(dev, false); @@ -824,6 +819,7 @@ static int acpi_tad_probe(struct platform_device *pdev) struct acpi_tad_driver_data *dd; acpi_status status; unsigned long long caps; + int ret; /* * Initialization failure messages are mostly about firmware issues, so @@ -863,13 +859,21 @@ static int acpi_tad_probe(struct platform_device *pdev) } /* - * The platform bus type layer tells the ACPI PM domain powers up the - * device, so set the runtime PM status of it to "active". + * The platform bus type probe callback tells the ACPI PM domain to + * power up the device, so set the runtime PM status of it to "active". */ pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_suspend(dev); + /* + * acpi_tad_remove() needs to run after unregistering the RTC class + * device to avoid racing with the latter's callbacks. + */ + ret = devm_add_action_or_reset(&pdev->dev, acpi_tad_remove, &pdev->dev); + if (ret) + return ret; + if (caps & ACPI_TAD_RT) acpi_tad_register_rtc(dev, caps); @@ -885,10 +889,9 @@ static struct platform_driver acpi_tad_driver = { .driver = { .name = "acpi-tad", .acpi_match_table = acpi_tad_ids, - .dev_groups = acpi_tad_attr_groups, + .dev_groups = acpi_tad_groups, }, .probe = acpi_tad_probe, - .remove = acpi_tad_remove, }; MODULE_DEVICE_TABLE(acpi, acpi_tad_ids); diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index a9248af078f69b..1f3fa2278584b1 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -401,8 +401,18 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region( return NULL; } + +static bool is_memory_injection(u32 type, u32 flags) +{ + if (flags & SETWA_FLAGS_EINJV2) + return !!(type & ACPI_EINJV2_MEMORY); + if (type & ACPI5_VENDOR_BIT) + return !!(vendor_flags & SETWA_FLAGS_MEM); + return !!(type & MEM_ERROR_MASK) || !!(flags & SETWA_FLAGS_MEM); +} + /* Execute instructions in trigger error action table */ -static int __einj_error_trigger(u64 trigger_paddr, u32 type, +static int __einj_error_trigger(u64 trigger_paddr, u32 type, u32 flags, u64 param1, u64 param2) { struct acpi_einj_trigger trigger_tab; @@ -480,7 +490,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, * This will cause resource conflict with regular memory. So * remove it from trigger table resources. */ - if ((param_extension || acpi5) && (type & MEM_ERROR_MASK) && param2) { + if ((param_extension || acpi5) && is_memory_injection(type, flags)) { struct apei_resources addr_resources; apei_resources_init(&addr_resources); @@ -660,7 +670,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, return rc; trigger_paddr = apei_exec_ctx_get_output(&ctx); if (notrigger == 0) { - rc = __einj_error_trigger(trigger_paddr, type, param1, param2); + rc = __einj_error_trigger(trigger_paddr, type, flags, param1, param2); if (rc) return rc; } @@ -718,35 +728,30 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2))) return -EINVAL; + /* + * Injections targeting a CXL 1.0/1.1 port have to be injected + * via the einj_cxl_rch_error_inject() path as that does the proper + * validation of the given RCRB base (MMIO) address. + */ + if (einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM)) + return -EINVAL; + /* check if type is a valid EINJv2 error type */ if (is_v2) { if (!(type & available_error_type_v2)) return -EINVAL; } - /* - * We need extra sanity checks for memory errors. - * Other types leap directly to injection. - */ /* ensure param1/param2 existed */ if (!(param_extension || acpi5)) goto inject; - /* ensure injection is memory related */ - if (type & ACPI5_VENDOR_BIT) { - if (vendor_flags != SETWA_FLAGS_MEM) - goto inject; - } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) { - goto inject; - } - /* - * Injections targeting a CXL 1.0/1.1 port have to be injected - * via the einj_cxl_rch_error_inject() path as that does the proper - * validation of the given RCRB base (MMIO) address. + * We need extra sanity checks for memory errors. + * Other types leap directly to injection. */ - if (einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM)) - return -EINVAL; + if (!is_memory_injection(type, flags)) + goto inject; /* * Disallow crazy address masks that give BIOS leeway to pick diff --git a/drivers/acpi/arm64/cpuidle.c b/drivers/acpi/arm64/cpuidle.c index 801f9c45014255..c68a5db8ebba85 100644 --- a/drivers/acpi/arm64/cpuidle.c +++ b/drivers/acpi/arm64/cpuidle.c @@ -16,7 +16,7 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) { - int i, count; + int i; struct acpi_lpi_state *lpi; struct acpi_processor *pr = per_cpu(processors, cpu); @@ -30,14 +30,10 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) if (!psci_ops.cpu_suspend) return -EOPNOTSUPP; - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; - - for (i = 0; i < count; i++) { + for (i = 1; i < pr->power.count; i++) { u32 state; - lpi = &pr->power.lpi_states[i + 1]; + lpi = &pr->power.lpi_states[i]; /* * Only bits[31:0] represent a PSCI power_state while * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 2e91c5a977611d..f370be8715ae24 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -362,7 +362,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd) end: if (cmd == CMD_WRITE) { if (unlikely(ret)) { - for_each_online_cpu(i) { + for_each_possible_cpu(i) { struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); if (!desc) @@ -524,13 +524,13 @@ int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data) else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; - for_each_online_cpu(i) { + for_each_possible_cpu(i) { if (i == cpu) continue; match_cpc_ptr = per_cpu(cpc_desc_ptr, i); if (!match_cpc_ptr) - goto err_fault; + continue; match_pdomain = &(match_cpc_ptr->domain_info); if (match_pdomain->domain != pdomain->domain) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 0a3c8232d15d7a..458efa4fe9d4b0 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -916,6 +916,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "82K8"), }, }, + { + .callback = video_detect_force_native, + /* HP OMEN Gaming Laptop 16-n0xxx */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-n0xxx"), + }, + }, /* * x86 android tablets which directly control the backlight through diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index a5b959891cb70f..40baeac594a9f9 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -459,19 +459,11 @@ static void pata_parport_dev_release(struct device *dev) kfree(pi); } -static void pata_parport_bus_release(struct device *dev) -{ - /* nothing to do here but required to avoid warning on device removal */ -} - static const struct bus_type pata_parport_bus_type = { .name = DRV_NAME, }; -static struct device pata_parport_bus = { - .init_name = DRV_NAME, - .release = pata_parport_bus_release, -}; +static struct device *pata_parport_bus; static const struct scsi_host_template pata_parport_sht = { PATA_PARPORT_SHT("pata_parport") @@ -518,7 +510,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport, } /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */ - pi->dev.parent = &pata_parport_bus; + pi->dev.parent = pata_parport_bus; pi->dev.bus = &pata_parport_bus_type; pi->dev.driver = &pr->driver; pi->dev.release = pata_parport_dev_release; @@ -780,8 +772,9 @@ static __init int pata_parport_init(void) return error; } - error = device_register(&pata_parport_bus); - if (error) { + pata_parport_bus = root_device_register(DRV_NAME); + if (IS_ERR(pata_parport_bus)) { + error = PTR_ERR(pata_parport_bus); pr_err("failed to register pata_parport bus, error: %d\n", error); goto out_unregister_bus; } @@ -811,7 +804,7 @@ static __init int pata_parport_init(void) out_remove_new: bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device); out_unregister_dev: - device_unregister(&pata_parport_bus); + root_device_unregister(pata_parport_bus); out_unregister_bus: bus_unregister(&pata_parport_bus_type); return error; @@ -822,7 +815,7 @@ static __exit void pata_parport_exit(void) parport_unregister_driver(&pata_parport_driver); bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device); bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device); - device_unregister(&pata_parport_bus); + root_device_unregister(pata_parport_bus); bus_unregister(&pata_parport_bus_type); } diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index a11b30dda23be5..78c5f05a2ec101 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -1132,6 +1132,7 @@ EXPORT_SYMBOL(release_firmware); /* Async support */ struct firmware_work { struct work_struct work; + struct list_head list; struct module *module; const char *name; struct device *device; @@ -1140,6 +1141,17 @@ struct firmware_work { u32 opt_flags; }; +static LIST_HEAD(firmware_work_list); +static DEFINE_SPINLOCK(firmware_work_lock); + +static void firmware_work_free(struct firmware_work *fw_work) +{ + put_device(fw_work->device); /* taken in request_firmware_nowait() */ + module_put(fw_work->module); + kfree_const(fw_work->name); + kfree(fw_work); +} + static void request_firmware_work_func(struct work_struct *work) { struct firmware_work *fw_work; @@ -1150,11 +1162,15 @@ static void request_firmware_work_func(struct work_struct *work) _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0, fw_work->opt_flags); fw_work->cont(fw, fw_work->context); - put_device(fw_work->device); /* taken in request_firmware_nowait() */ - module_put(fw_work->module); - kfree_const(fw_work->name); - kfree(fw_work); + spin_lock_irq(&firmware_work_lock); + if (!list_empty(&fw_work->list)) { + list_del_init(&fw_work->list); + spin_unlock_irq(&firmware_work_lock); + firmware_work_free(fw_work); + return; + } + spin_unlock_irq(&firmware_work_lock); } @@ -1164,6 +1180,7 @@ static int _request_firmware_nowait( void (*cont)(const struct firmware *fw, void *context), bool nowarn) { struct firmware_work *fw_work; + unsigned long flags; fw_work = kzalloc_obj(struct firmware_work, gfp); if (!fw_work) @@ -1196,7 +1213,12 @@ static int _request_firmware_nowait( get_device(fw_work->device); INIT_WORK(&fw_work->work, request_firmware_work_func); + + spin_lock_irqsave(&firmware_work_lock, flags); + list_add_tail(&fw_work->list, &firmware_work_list); schedule_work(&fw_work->work); + spin_unlock_irqrestore(&firmware_work_lock, flags); + return 0; } @@ -1259,6 +1281,44 @@ int firmware_request_nowait_nowarn( } EXPORT_SYMBOL_GPL(firmware_request_nowait_nowarn); +/** + * request_firmware_nowait_cancel() - cancel an async firmware request + * @device: device for which the firmware is being loaded + * @context: context passed to request_firmware_nowait() + * @cont: callback passed to request_firmware_nowait() + * + * Cancel a pending request_firmware_nowait() request for @device, @context + * and @cont. If the associated work has already started, this function waits + * until the callback has returned. If the callback has already completed, this + * function does nothing. + * + * This function may sleep. + */ +void request_firmware_nowait_cancel(struct device *device, void *context, + void (*cont)(const struct firmware *fw, + void *context)) +{ + struct firmware_work *fw_work = NULL; + struct firmware_work *tmp; + + spin_lock_irq(&firmware_work_lock); + list_for_each_entry_reverse(tmp, &firmware_work_list, list) { + if (tmp->device == device && tmp->context == context && + tmp->cont == cont) { + fw_work = tmp; + list_del_init(&fw_work->list); + break; + } + } + spin_unlock_irq(&firmware_work_lock); + + if (!fw_work) + return; + cancel_work_sync(&fw_work->work); + firmware_work_free(fw_work); +} +EXPORT_SYMBOL_GPL(request_firmware_nowait_cancel); + #ifdef CONFIG_FW_CACHE static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index fc049612d6dc1a..62934cf4b10de8 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -631,6 +631,16 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi) WARN_ON(!cdo->generic_packet); + /* + * Propagate the drive's write support to the block layer so BLKROGET + * reflects actual write capability. Drivers that use GET CONFIGURATION + * features (CDC_MRW_W, CDC_RAM) must have called + * cdrom_probe_write_features() before register_cdrom() so the mask is + * complete here. + */ + set_disk_ro(disk, !CDROM_CAN(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | + CDC_CD_RW)); + cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); mutex_lock(&cdrom_mutex); list_add(&cdi->list, &cdrom_list); @@ -742,6 +752,44 @@ static int cdrom_is_random_writable(struct cdrom_device_info *cdi, int *write) return 0; } +/* + * Probe write-related MMC features via GET CONFIGURATION and update + * cdi->mask accordingly. Drivers that populate cdi->mask from the MODE SENSE + * capabilities page (e.g. sr) should call this after those MODE SENSE bits + * have been set but before register_cdrom(), so that the full set of + * write-capability bits is known by the time register_cdrom() decides on the + * initial read-only state of the disk. + */ +void cdrom_probe_write_features(struct cdrom_device_info *cdi) +{ + int mrw, mrw_write, ram_write; + + mrw = 0; + if (!cdrom_is_mrw(cdi, &mrw_write)) + mrw = 1; + + if (CDROM_CAN(CDC_MO_DRIVE)) + ram_write = 1; + else + (void) cdrom_is_random_writable(cdi, &ram_write); + + if (mrw) + cdi->mask &= ~CDC_MRW; + else + cdi->mask |= CDC_MRW; + + if (mrw_write) + cdi->mask &= ~CDC_MRW_W; + else + cdi->mask |= CDC_MRW_W; + + if (ram_write) + cdi->mask &= ~CDC_RAM; + else + cdi->mask |= CDC_RAM; +} +EXPORT_SYMBOL(cdrom_probe_write_features); + static int cdrom_media_erasable(struct cdrom_device_info *cdi) { disc_information di; @@ -894,33 +942,8 @@ static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi) */ static int cdrom_open_write(struct cdrom_device_info *cdi) { - int mrw, mrw_write, ram_write; int ret = 1; - mrw = 0; - if (!cdrom_is_mrw(cdi, &mrw_write)) - mrw = 1; - - if (CDROM_CAN(CDC_MO_DRIVE)) - ram_write = 1; - else - (void) cdrom_is_random_writable(cdi, &ram_write); - - if (mrw) - cdi->mask &= ~CDC_MRW; - else - cdi->mask |= CDC_MRW; - - if (mrw_write) - cdi->mask &= ~CDC_MRW_W; - else - cdi->mask |= CDC_MRW_W; - - if (ram_write) - cdi->mask &= ~CDC_RAM; - else - cdi->mask |= CDC_RAM; - if (CDROM_CAN(CDC_MRW_W)) ret = cdrom_mrw_open_write(cdi); else if (CDROM_CAN(CDC_DVD_RAM)) diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index af7ce62ec55ca8..0ff1658c2dc1ba 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -900,11 +900,21 @@ int dpll_pin_delete_ntf(struct dpll_pin *pin) return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin); } +/** + * __dpll_pin_change_ntf - notify that the pin has been changed + * @pin: registered pin pointer + * + * Context: caller must hold dpll_lock. Suitable for use inside pin + * callbacks which are already invoked under dpll_lock. + * Return: 0 if succeeds, error code otherwise. + */ int __dpll_pin_change_ntf(struct dpll_pin *pin) { + lockdep_assert_held(&dpll_lock); dpll_pin_notify(pin, DPLL_PIN_CHANGED); return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin); } +EXPORT_SYMBOL_GPL(__dpll_pin_change_ntf); /** * dpll_pin_change_ntf - notify that the pin has been changed diff --git a/drivers/dpll/dpll_netlink.h b/drivers/dpll/dpll_netlink.h index dd28b56d27c56d..a9cfd55f57fc42 100644 --- a/drivers/dpll/dpll_netlink.h +++ b/drivers/dpll/dpll_netlink.h @@ -11,5 +11,3 @@ int dpll_device_delete_ntf(struct dpll_device *dpll); int dpll_pin_create_ntf(struct dpll_pin *pin); int dpll_pin_delete_ntf(struct dpll_pin *pin); - -int __dpll_pin_change_ntf(struct dpll_pin *pin); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 737ef1ef96a559..66ca043658ff8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2839,8 +2839,12 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) * that checks whether the PSP is running. A solution for those issues * in the APU is to trigger a GPU reset, but this should be done during * the unload phase to avoid adding boot latency and screen flicker. + * GFX V11 has GC block as default off IP. Every time AMDGPU driver sends + * a request to PMFW to unload MP1, PMFW will put GC in reset and power down + * the voltage. Hence, skipping reset for APUs with GFX V11 or later. */ - if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) { + if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu && + amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 0, 0)) { r = amdgpu_asic_reset(adev); if (r) dev_err(adev->dev, "asic reset on %s failed\n", __func__); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index fcad7daaa41b54..8d99bfaa498f68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -3090,10 +3090,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): case IP_VERSION(11, 5, 3): - adev->family = AMDGPU_FAMILY_GC_11_5_0; - break; case IP_VERSION(11, 5, 4): - adev->family = AMDGPU_FAMILY_GC_11_5_4; + adev->family = AMDGPU_FAMILY_GC_11_5_0; break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e47921e2a9af27..46aae3fad4bf6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -3158,8 +3158,10 @@ static int __init amdgpu_init(void) amdgpu_register_atpx_handler(); amdgpu_acpi_detect(); - /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ - amdgpu_amdkfd_init(); + /* Ignore KFD init failures when CONFIG_HSA_AMD is not set. */ + r = amdgpu_amdkfd_init(); + if (r && r != -ENOENT) + goto error_fence; if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) { add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 285e217fba040b..3d9497d121ca20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -314,7 +314,10 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, mc->gart_start = max_mc_address - mc->gart_size + 1; break; case AMDGPU_GART_PLACEMENT_LOW: - mc->gart_start = 0; + if (size_bf >= mc->gart_size) + mc->gart_start = 0; + else + mc->gart_start = ALIGN(mc->fb_end, four_gb); break; case AMDGPU_GART_PLACEMENT_BEST_FIT: default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 06efce38f32382..71272f40feef83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -873,68 +873,59 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - int ret = 0; - unsigned int n, alloc_size; - uint32_t *regs; unsigned int se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & AMDGPU_INFO_MMR_SE_INDEX_MASK; unsigned int sh_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_MASK; - - if (!down_read_trylock(&adev->reset_domain->sem)) - return -ENOENT; + unsigned int alloc_size; + uint32_t *regs; + int ret; /* set full masks if the userspace set all bits * in the bitfields */ - if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) { + if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; - } else if (se_num >= AMDGPU_GFX_MAX_SE) { - ret = -EINVAL; - goto out; - } + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; - if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) { + if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; - } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) { - ret = -EINVAL; - goto out; - } + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; - if (info->read_mmr_reg.count > 128) { - ret = -EINVAL; - goto out; - } + if (info->read_mmr_reg.count > 128) + return -EINVAL; - regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); - if (!regs) { - ret = -ENOMEM; - goto out; - } + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), + GFP_KERNEL); + if (!regs) + return -ENOMEM; + down_read(&adev->reset_domain->sem); alloc_size = info->read_mmr_reg.count * sizeof(*regs); - amdgpu_gfx_off_ctrl(adev, false); + ret = 0; for (i = 0; i < info->read_mmr_reg.count; i++) { if (amdgpu_asic_read_register(adev, se_num, sh_num, info->read_mmr_reg.dword_offset + i, ®s[i])) { DRM_DEBUG_KMS("unallowed offset %#x\n", info->read_mmr_reg.dword_offset + i); - kfree(regs); - amdgpu_gfx_off_ctrl(adev, true); ret = -EFAULT; - goto out; + break; } } amdgpu_gfx_off_ctrl(adev, true); - n = copy_to_user(out, regs, min(size, alloc_size)); - kfree(regs); - ret = (n ? -EFAULT : 0); -out: up_read(&adev->reset_domain->sem); + + if (!ret) { + ret = copy_to_user(out, regs, min(size, alloc_size)) + ? -EFAULT : 0; + } + kfree(regs); return ret; } case AMDGPU_INFO_DEV_INFO: { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index cdf4909592d224..0c57fe25989452 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -1950,7 +1950,7 @@ void amdgpu_ras_check_bad_page_status(struct amdgpu_device *adev) if (!control || amdgpu_bad_page_threshold == 0) return; - if (control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) { + if (control->ras_num_bad_pages > ras->bad_page_cnt_threshold) { if (amdgpu_dpm_send_rma_reason(adev)) dev_warn(adev->dev, "Unable to send out-of-band RMA CPER"); else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0dc68fb9d88e57..3d2e00efc74156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -75,6 +75,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, unsigned int type, uint64_t size_in_page) { + if (!size_in_page) + return 0; + return ttm_range_man_init(&adev->mman.bdev, type, false, size_in_page); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index d5abf785ca17e3..de140a8ed1354a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -205,6 +205,19 @@ void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue) msecs_to_jiffies(timeout_ms)); } +void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell) +{ + struct xarray *xa = &adev->userq_doorbell_xa; + struct amdgpu_usermode_queue *queue; + unsigned long flags; + + xa_lock_irqsave(xa, flags); + queue = xa_load(xa, doorbell); + if (queue) + amdgpu_userq_fence_driver_process(queue->fence_drv); + xa_unlock_irqrestore(xa, flags); +} + static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue) { INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work); @@ -643,12 +656,6 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que #endif amdgpu_userq_detect_and_reset_queues(uq_mgr); r = amdgpu_userq_unmap_helper(queue); - /*TODO: It requires a reset for userq hw unmap error*/ - if (r) { - drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); - queue->state = AMDGPU_USERQ_STATE_HUNG; - } - atomic_dec(&uq_mgr->userq_count[queue->queue_type]); amdgpu_userq_cleanup(queue); mutex_unlock(&uq_mgr->userq_mutex); @@ -1187,7 +1194,7 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) bo = range->bo; ret = amdgpu_ttm_tt_get_user_pages(bo, range); if (ret) - goto unlock_all; + goto free_ranges; } invalidated = true; @@ -1214,6 +1221,7 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) unlock_all: drm_exec_fini(&exec); +free_ranges: xa_for_each(&xa, tmp_key, range) { if (!range) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index 675fe6395ac8ca..8b8f345b60b6b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -156,6 +156,7 @@ void amdgpu_userq_reset_work(struct work_struct *work); void amdgpu_userq_pre_reset(struct amdgpu_device *adev); int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost); void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue); +void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell); int amdgpu_userq_input_va_validate(struct amdgpu_device *adev, struct amdgpu_usermode_queue *queue, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 115a7b269af302..9ba9de16a27a23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3023,11 +3023,22 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, is_compute_context = vm->is_compute_context; - if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, - node_id, addr >> PAGE_SHIFT, ts, write_fault)) { + if (is_compute_context) { + /* Unreserve root since svm_range_restore_pages might try to reserve it. */ + /* TODO: rework svm_range_restore_pages so that this isn't necessary. */ amdgpu_bo_unreserve(root); + + if (!svm_range_restore_pages(adev, pasid, vmid, + node_id, addr >> PAGE_SHIFT, ts, write_fault)) { + amdgpu_bo_unref(&root); + return true; + } amdgpu_bo_unref(&root); - return true; + + /* Re-acquire the VM lock, could be that the VM was freed in between. */ + vm = amdgpu_vm_lock_by_pasid(adev, &root, pasid); + if (!vm) + return false; } addr /= AMDGPU_GPU_PAGE_SIZE; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8c82e90f871b73..d40ab1e9548060 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -6523,15 +6523,7 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: CP EOP\n"); if (adev->enable_mes && doorbell_offset) { - struct amdgpu_usermode_queue *queue; - struct xarray *xa = &adev->userq_doorbell_xa; - unsigned long flags; - - xa_lock_irqsave(xa, flags); - queue = xa_load(xa, doorbell_offset); - if (queue) - amdgpu_userq_fence_driver_process(queue->fence_drv); - xa_unlock_irqrestore(xa, flags); + amdgpu_userq_process_fence_irq(adev, doorbell_offset); } else { me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 65c33823a688fc..0e0b1e5b88fce8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4854,15 +4854,7 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: CP EOP\n"); if (adev->enable_mes && doorbell_offset) { - struct xarray *xa = &adev->userq_doorbell_xa; - struct amdgpu_usermode_queue *queue; - unsigned long flags; - - xa_lock_irqsave(xa, flags); - queue = xa_load(xa, doorbell_offset); - if (queue) - amdgpu_userq_fence_driver_process(queue->fence_drv); - xa_unlock_irqrestore(xa, flags); + amdgpu_userq_process_fence_irq(adev, doorbell_offset); } else { me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c index 68fd3c04134d8e..68db1bc73bc7c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c @@ -3643,16 +3643,7 @@ static int gfx_v12_1_eop_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: CP EOP\n"); if (adev->enable_mes && doorbell_offset) { - struct xarray *xa = &adev->userq_doorbell_xa; - struct amdgpu_usermode_queue *queue; - unsigned long flags; - - xa_lock_irqsave(xa, flags); - queue = xa_load(xa, doorbell_offset); - if (queue) - amdgpu_userq_fence_driver_process(queue->fence_drv); - - xa_unlock_irqrestore(xa, flags); + amdgpu_userq_process_fence_irq(adev, doorbell_offset); } else { me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 73223d97a87f59..ac90d8e9d86a83 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1571,6 +1571,71 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } +/** + * gfx_v6_0_setup_tcc() - setup which TCCs are used + * + * @adev: amdgpu_device pointer + * + * Verify whether the current GPU has any TCCs disabled, + * which can happen when the GPU is harvested and some + * memory channels are disabled, reducing the memory bus width. + * For example, on the Radeon HD 7870 XT (Tahiti LE). + * + * If some TCCs are disabled, we need to make sure that + * the disabled TCCs are not used, and the remaining TCCs + * are used optimally. + * + * TCP_CHAN_STEER_LO/HI control which TCC is used by TCP channels. + * TCP_ADDR_CONFIG.NUM_TCC_BANKS controls how many channels are used. + * + * For optimal performance: + * - Rely on the CHAN_STEER from the golden registers table, + * only skip disabled TCCs but keep the mapping order. + * - Limit NUM_TCC_BANKS to number of active TCCs to avoid thrashing, + * which performs better than using the same TCC twice. + */ +static void gfx_v6_0_setup_tcc(struct amdgpu_device *adev) +{ + u32 i, tcc, tcp_addr_config, num_active_tcc = 0; + u64 chan_steer, patched_chan_steer = 0; + const u32 num_max_tcc = adev->gfx.config.max_texture_channel_caches; + const u32 dis_tcc_mask = + amdgpu_gfx_create_bitmask(num_max_tcc) & + (REG_GET_FIELD(RREG32(mmCGTS_TCC_DISABLE), + CGTS_TCC_DISABLE, TCC_DISABLE) | + REG_GET_FIELD(RREG32(mmCGTS_USER_TCC_DISABLE), + CGTS_USER_TCC_DISABLE, TCC_DISABLE)); + + /* When no TCC is disabled, the golden registers table already has optimal TCC setup */ + if (!dis_tcc_mask) + return; + + /* Each 4-bit nibble contains the index of a TCC used by all TCPs */ + chan_steer = RREG32(mmTCP_CHAN_STEER_LO) | ((u64)RREG32(mmTCP_CHAN_STEER_HI) << 32ull); + + /* Patch the TCP to TCC mapping to skip disabled TCCs */ + for (i = 0; i < num_max_tcc; ++i) { + tcc = (chan_steer >> (u64)(4 * i)) & 0xf; + + if (!((1 << tcc) & dis_tcc_mask)) { + /* Copy enabled TCC indices to the patched register value. */ + patched_chan_steer |= (u64)tcc << (u64)(4 * num_active_tcc); + ++num_active_tcc; + } + } + + WARN_ON(num_active_tcc != num_max_tcc - hweight32(dis_tcc_mask)); + + /* Patch number of TCCs used by TCPs */ + tcp_addr_config = REG_SET_FIELD(RREG32(mmTCP_ADDR_CONFIG), + TCP_ADDR_CONFIG, NUM_TCC_BANKS, + num_active_tcc - 1); + + WREG32(mmTCP_ADDR_CONFIG, tcp_addr_config); + WREG32(mmTCP_CHAN_STEER_HI, upper_32_bits(patched_chan_steer)); + WREG32(mmTCP_CHAN_STEER_LO, lower_32_bits(patched_chan_steer)); +} + static void gfx_v6_0_config_init(struct amdgpu_device *adev) { adev->gfx.config.double_offchip_lds_buf = 0; @@ -1729,6 +1794,7 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) gfx_v6_0_tiling_mode_table_init(adev); gfx_v6_0_setup_rb(adev); + gfx_v6_0_setup_tcc(adev); gfx_v6_0_setup_spi(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 9fe8d10ab2705a..cffb1e6bab353f 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -802,6 +802,7 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v2_0_dec_ring_get_rptr, .get_wptr = jpeg_v2_0_dec_ring_get_wptr, .set_wptr = jpeg_v2_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 20983f126b4907..13a6e24c624a24 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -693,6 +693,7 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, @@ -724,6 +725,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 98f5e0622bc58a..d0445df39d2c04 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -594,6 +594,7 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v3_0_dec_ring_get_rptr, .get_wptr = jpeg_v3_0_dec_ring_get_wptr, .set_wptr = jpeg_v3_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 0bd83820dd20ca..6fd4238a8471a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -759,6 +759,7 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v4_0_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 82abe181c73099..0c746580de1130 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -1219,6 +1219,7 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 54fd9c800c40af..a43582b9c876c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -804,6 +804,7 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 46bf15dce2bd04..72a4b2d0676fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -680,6 +680,7 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index edecbfe66c79a1..250316704dfac4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -884,6 +884,7 @@ static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_2.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_2.c index 285c459379c4a4..7a4ecea6b39a49 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_2.c @@ -703,6 +703,7 @@ static const struct amd_ip_funcs jpeg_v5_0_2_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v5_0_2_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v5_0_2_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_2_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_2_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c index 1821dced936fb9..e7546816baba34 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c @@ -661,6 +661,7 @@ static const struct amd_ip_funcs jpeg_v5_3_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v5_3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, + .no_user_fence = true, .get_rptr = jpeg_v5_3_0_dec_ring_get_rptr, .get_wptr = jpeg_v5_3_0_dec_ring_get_wptr, .set_wptr = jpeg_v5_3_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 0f530bb8a9a360..8ca46e1e474edc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1662,17 +1662,8 @@ static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev, u32 doorbell_offset = entry->src_data[0]; if (adev->enable_mes && doorbell_offset) { - struct amdgpu_usermode_queue *queue; - struct xarray *xa = &adev->userq_doorbell_xa; - unsigned long flags; - doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; - - xa_lock_irqsave(xa, flags); - queue = xa_load(xa, doorbell_offset); - if (queue) - amdgpu_userq_fence_driver_process(queue->fence_drv); - xa_unlock_irqrestore(xa, flags); + amdgpu_userq_process_fence_irq(adev, doorbell_offset); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 9ed817b69a3b74..37191e2918d453 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1594,17 +1594,8 @@ static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev, u32 doorbell_offset = entry->src_data[0]; if (adev->enable_mes && doorbell_offset) { - struct xarray *xa = &adev->userq_doorbell_xa; - struct amdgpu_usermode_queue *queue; - unsigned long flags; - doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; - - xa_lock_irqsave(xa, flags); - queue = xa_load(xa, doorbell_offset); - if (queue) - amdgpu_userq_fence_driver_process(queue->fence_drv); - xa_unlock_irqrestore(xa, flags); + amdgpu_userq_process_fence_irq(adev, doorbell_offset); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index fea576a7f397f0..efb3fde919ee3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -242,6 +242,10 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev) uint64_t addr; uint32_t size; + /* When the keyselect is already set, don't perturb it. */ + if (RREG32(mmUVD_FW_START)) + return; + /* program the VCPU memory controller bits 0-27 */ addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; @@ -284,6 +288,12 @@ static int uvd_v3_1_fw_validate(struct amdgpu_device *adev) int i; uint32_t keysel = adev->uvd.keyselect; + if (RREG32(mmUVD_FW_START) & UVD_FW_STATUS__PASS_MASK) { + dev_dbg(adev->dev, "UVD keyselect already set: 0x%x (on CPU: 0x%x)\n", + RREG32(mmUVD_FW_START), adev->uvd.keyselect); + return 0; + } + WREG32(mmUVD_FW_START, keysel); for (i = 0; i < 10; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index e35fae9cdaf66e..0442bfcfd384d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -2113,6 +2113,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, + .no_user_fence = true, .secure_submission_supported = true, .get_rptr = vcn_v2_0_dec_ring_get_rptr, .get_wptr = vcn_v2_0_dec_ring_get_wptr, @@ -2145,6 +2146,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v2_0_enc_ring_get_rptr, .get_wptr = vcn_v2_0_enc_ring_get_wptr, .set_wptr = vcn_v2_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 006a1545119717..8b8184fe6764b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1778,6 +1778,7 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, + .no_user_fence = true, .secure_submission_supported = true, .get_rptr = vcn_v2_5_dec_ring_get_rptr, .get_wptr = vcn_v2_5_dec_ring_get_wptr, @@ -1879,6 +1880,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v2_5_enc_ring_get_rptr, .get_wptr = vcn_v2_5_enc_ring_get_wptr, .set_wptr = vcn_v2_5_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 6fb4fcdbba4f14..81bba3ec2a9378 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1856,6 +1856,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0x3f, .nop = VCN_DEC_SW_CMD_NO_OP, + .no_user_fence = true, .secure_submission_supported = true, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, @@ -1972,6 +1973,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { uint32_t offset, size, *create; + uint64_t buf_end; if (msg[0] != RDECODE_MESSAGE_CREATE) continue; @@ -1979,7 +1981,8 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, offset = msg[1]; size = msg[2]; - if (size < 4 || offset + size > end - addr) { + if (size < 4 || check_add_overflow(offset, size, &buf_end) || + buf_end > end - addr) { DRM_ERROR("VCN message buffer exceeds BO bounds!\n"); r = -EINVAL; goto out; @@ -2036,6 +2039,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, + .no_user_fence = true, .secure_submission_supported = true, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, @@ -2138,6 +2142,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v3_0_enc_ring_get_rptr, .get_wptr = vcn_v3_0_enc_ring_get_wptr, .set_wptr = vcn_v3_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 5dec92691f73ee..ff7269bafae8ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -1889,6 +1889,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { uint32_t offset, size, *create; + uint64_t buf_end; if (msg[0] != RDECODE_MESSAGE_CREATE) continue; @@ -1896,7 +1897,8 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, offset = msg[1]; size = msg[2]; - if (size < 4 || offset + size > end - addr) { + if (size < 4 || check_add_overflow(offset, size, &buf_end) || + buf_end > end - addr) { DRM_ERROR("VCN message buffer exceeds BO bounds!\n"); r = -EINVAL; goto out; @@ -1994,6 +1996,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata), .get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index ff3013b97abd10..10e8fc2821f37c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1775,6 +1775,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v4_0_3_unified_ring_get_rptr, .get_wptr = vcn_v4_0_3_unified_ring_get_wptr, .set_wptr = vcn_v4_0_3_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 1f6a22983c0dd5..1571cc5a148c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1483,6 +1483,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index 6109124f852e52..d5f49fa33bee4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -1207,6 +1207,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v5_0_0_unified_ring_get_rptr, .get_wptr = vcn_v5_0_0_unified_ring_get_wptr, .set_wptr = vcn_v5_0_0_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index c28c6aff17aaa0..54fbf8d73ca679 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -1419,6 +1419,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c index c3d3cc0230580d..bbc172db91a11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c @@ -994,6 +994,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_2_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .no_user_fence = true, .get_rptr = vcn_v5_0_2_unified_ring_get_rptr, .get_wptr = vcn_v5_0_2_unified_ring_get_wptr, .set_wptr = vcn_v5_0_2_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 55ea5145a28acc..f829d65a79b43e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1695,6 +1696,16 @@ static int kfd_ioctl_smi_events(struct file *filep, return kfd_smi_event_open(pdd->dev, &args->anon_fd); } +static int kfd_ioctl_svm_validate(void *kdata, unsigned int usize) +{ + struct kfd_ioctl_svm_args *args = kdata; + size_t expected = struct_size(args, attrs, args->nattr); + + if (expected == SIZE_MAX || usize < expected) + return -EINVAL; + return 0; +} + #if IS_ENABLED(CONFIG_HSA_AMD_SVM) static int kfd_ioctl_set_xnack_mode(struct file *filep, @@ -3209,7 +3220,11 @@ static int kfd_ioctl_create_process(struct file *filep, struct kfd_process *p, v #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ - .cmd_drv = 0, .name = #ioctl} + .validate = NULL, .cmd_drv = 0, .name = #ioctl} + +#define AMDKFD_IOCTL_DEF_V(ioctl, _func, _validate, _flags) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ + .validate = _validate, .cmd_drv = 0, .name = #ioctl} /** Ioctl table */ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { @@ -3306,7 +3321,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS, kfd_ioctl_smi_events, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0), + AMDKFD_IOCTL_DEF_V(AMDKFD_IOC_SVM, kfd_ioctl_svm, + kfd_ioctl_svm_validate, 0), AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE, kfd_ioctl_set_xnack_mode, 0), @@ -3431,6 +3447,12 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) memset(kdata, 0, usize); } + if (ioctl->validate) { + retcode = ioctl->validate(kdata, usize); + if (retcode) + goto err_i1; + } + retcode = func(filep, process, kdata); if (cmd & IOC_OUT) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6e333bfa17d627..163d665a6074cc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1047,10 +1047,13 @@ extern struct srcu_struct kfd_processes_srcu; typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, void *data); +typedef int amdkfd_ioctl_validate_t(void *kdata, unsigned int usize); + struct amdkfd_ioctl_desc { unsigned int cmd; int flags; amdkfd_ioctl_t *func; + amdkfd_ioctl_validate_t *validate; unsigned int cmd_drv; const char *name; }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b120fdb0ef77b5..38085a0a0f58ee 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1366,6 +1366,12 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last, gpu_start, gpu_end); + + if (!amdgpu_vm_ready(vm)) { + pr_debug("VM not ready, canceling unmap\n"); + return -EINVAL; + } + return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start, gpu_end, init_pte_value, 0, 0, NULL, NULL, fence); @@ -1443,6 +1449,11 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, last_start, last_start + npages - 1, readonly); + if (!amdgpu_vm_ready(vm)) { + pr_debug("VM not ready, canceling map\n"); + return -EINVAL; + } + for (i = offset; i < offset + npages; i++) { uint64_t gpu_start; uint64_t gpu_end; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e96a12ff2d31e1..5fc5d56085066b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1903,7 +1903,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - init_data.asic_id.chip_family = adev->family; + /* special handling for early revisions of GC 11.5.4 */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 4)) + init_data.asic_id.chip_family = AMDGPU_FAMILY_GC_11_5_4; + else + init_data.asic_id.chip_family = adev->family; init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; @@ -9404,9 +9408,21 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, if (acrtc_state) { timing = &acrtc_state->stream->timing; - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < - IP_VERSION(3, 5, 0) || - !(adev->flags & AMD_IS_APU)) { + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= + IP_VERSION(3, 2, 0) && + !(adev->flags & AMD_IS_APU)) { + /* + * DGPUs NV3x and newer that support idle optimizations + * experience intermittent flip-done timeouts on cursor + * updates. Restore 5s offdelay behavior for now. + * + * Discussion on the issue: + * https://lore.kernel.org/amd-gfx/20260217191632.1243826-1-sysdadmin@m1k.cloud/ + */ + config.offdelay_ms = 5000; + config.disable_immediate = false; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0)) { /* * Older HW and DGPU have issues with instant off; * use a 2 frame offdelay. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 3b8ae7798a9372..a3cb05490dc916 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1032,6 +1032,45 @@ dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); } +static const struct drm_edid * +dm_helpers_read_vbios_hardcoded_edid(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct dc_bios *bios = link->ctx->dc_bios; + struct embedded_panel_info info; + const struct drm_edid *edid; + enum bp_result r; + + if (!dc_is_embedded_signal(link->connector_signal) || + !bios->funcs->get_embedded_panel_info) + return NULL; + + memset(&info, 0, sizeof(info)); + r = bios->funcs->get_embedded_panel_info(bios, &info); + + if (r != BP_RESULT_OK) { + dm_error("Error when reading embedded panel info: %u\n", r); + return NULL; + } + + if (!info.fake_edid || !info.fake_edid_size) { + dm_error("Embedded panel info doesn't contain an EDID\n"); + return NULL; + } + + edid = drm_edid_alloc(info.fake_edid, info.fake_edid_size); + + if (!drm_edid_valid(edid)) { + dm_error("EDID from embedded panel info is invalid\n"); + drm_edid_free(edid); + return NULL; + } + + aconnector->base.display_info.width_mm = info.panel_width_mm; + aconnector->base.display_info.height_mm = info.panel_height_mm; + + return edid; +} + void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps) { edid_caps->scdc_present = hdmi->scdc.supported; @@ -1052,6 +1091,9 @@ enum dc_edid_status dm_helpers_read_local_edid( if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; + else if (link->ddc_hw_inst == GPIO_DDC_LINE_UNKNOWN && + dc_is_embedded_signal(link->connector_signal)) + ddc = NULL; else ddc = &aconnector->i2c->base; @@ -1065,6 +1107,8 @@ enum dc_edid_status dm_helpers_read_local_edid( drm_edid = dm_helpers_read_acpi_edid(aconnector); if (drm_edid) drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else if (!ddc) + drm_edid = dm_helpers_read_vbios_hardcoded_edid(link, aconnector); else drm_edid = drm_edid_read_ddc(connector, ddc); drm_edid_connector_update(connector, drm_edid); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index dd362071a6c9e7..c307f42fe0b988 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -794,11 +794,13 @@ static enum bp_result bios_parser_external_encoder_control( static enum bp_result bios_parser_dac_load_detection( struct dc_bios *dcb, - enum engine_id engine_id) + enum engine_id engine_id, + struct graphics_object_id ext_enc_id) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct dc_context *ctx = dcb->ctx; struct bp_load_detection_parameters bp_params = {0}; + struct bp_external_encoder_control ext_cntl = {0}; enum bp_result bp_result = BP_RESULT_UNSUPPORTED; uint32_t bios_0_scratch; uint32_t device_id_mask = 0; @@ -824,6 +826,13 @@ static enum bp_result bios_parser_dac_load_detection( bp_params.engine_id = engine_id; bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params); + } else if (ext_enc_id.id) { + if (!bp->cmd_tbl.external_encoder_control) + return BP_RESULT_UNSUPPORTED; + + ext_cntl.action = EXTERNAL_ENCODER_CONTROL_DAC_LOAD_DETECT; + ext_cntl.encoder_id = ext_enc_id; + bp_result = bp->cmd_tbl.external_encoder_control(bp, &ext_cntl); } if (bp_result != BP_RESULT_OK) @@ -1304,6 +1313,60 @@ static enum bp_result bios_parser_get_embedded_panel_info( return BP_RESULT_FAILURE; } +static enum bp_result get_embedded_panel_extra_info( + struct bios_parser *bp, + struct embedded_panel_info *info, + const uint32_t table_offset) +{ + uint8_t *record = bios_get_image(&bp->base, table_offset, 1); + ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; + ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; + + while (*record != ATOM_RECORD_END_TYPE) { + switch (*record) { + case LCD_MODE_PATCH_RECORD_MODE_TYPE: + record += sizeof(ATOM_PATCH_RECORD_MODE); + break; + case LCD_RTS_RECORD_TYPE: + record += sizeof(ATOM_LCD_RTS_RECORD); + break; + case LCD_CAP_RECORD_TYPE: + record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); + break; + case LCD_FAKE_EDID_PATCH_RECORD_TYPE: + fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; + if (fake_edid_record->ucFakeEDIDLength) { + if (fake_edid_record->ucFakeEDIDLength == 128) + info->fake_edid_size = + fake_edid_record->ucFakeEDIDLength; + else + info->fake_edid_size = + fake_edid_record->ucFakeEDIDLength * 128; + + info->fake_edid = fake_edid_record->ucFakeEDIDString; + + record += struct_size(fake_edid_record, + ucFakeEDIDString, + info->fake_edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; + } + break; + case LCD_PANEL_RESOLUTION_RECORD_TYPE: + panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; + info->panel_width_mm = panel_res_record->usHSize; + info->panel_height_mm = panel_res_record->usVSize; + record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); + break; + default: + return BP_RESULT_BADBIOSTABLE; + } + } + + return BP_RESULT_OK; +} + static enum bp_result get_embedded_panel_info_v1_2( struct bios_parser *bp, struct embedded_panel_info *info) @@ -1420,6 +1483,10 @@ static enum bp_result get_embedded_panel_info_v1_2( if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.API_ENABLED = true; + if (lvds->usExtInfoTableOffset) + return get_embedded_panel_extra_info(bp, info, + le16_to_cpu(lvds->usExtInfoTableOffset) + DATA_TABLES(LCD_Info)); + return BP_RESULT_OK; } @@ -1545,6 +1612,10 @@ static enum bp_result get_embedded_panel_info_v1_3( (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL & lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT; + if (lvds->usExtInfoTableOffset) + return get_embedded_panel_extra_info(bp, info, + le16_to_cpu(lvds->usExtInfoTableOffset) + DATA_TABLES(LCD_Info)); + return BP_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7f55ba09b19112..37714d4371fba1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1682,7 +1682,7 @@ struct dc_scratch_space { struct dc_link_training_overrides preferred_training_settings; struct dp_audio_test_data audio_test_data; - uint8_t ddc_hw_inst; + enum gpio_ddc_line ddc_hw_inst; uint8_t hpd_src; diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 6f96c5cf39fe8d..526f71616f94b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -102,7 +102,8 @@ struct dc_vbios_funcs { struct bp_external_encoder_control *cntl); enum bp_result (*dac_load_detection)( struct dc_bios *bios, - enum engine_id engine_id); + enum engine_id engine_id, + struct graphics_object_id ext_enc_id); enum bp_result (*transmitter_control)( struct dc_bios *bios, struct bp_transmitter_control *cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 5f40ae9e3120f4..e15fd1454d3b53 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -1102,7 +1102,9 @@ void dce110_link_encoder_hw_init( ASSERT(result == BP_RESULT_OK); } - aux_initialize(enc110); + + if (enc110->aux_regs) + aux_initialize(enc110); /* reinitialize HPD. * hpd_initialize() will pass DIG_FE id to HW context. diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c index 6f2a0d5d963bd2..62fe5c3b18dc71 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c @@ -40,8 +40,8 @@ #define FN(reg_name, field_name) \ mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name -#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 -#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 +#define MCIF_ADDR(addr) ((uint32_t)((((unsigned long long)(addr) & 0xffffffffffULL) + 0xFEULL) >> 8)) +#define MCIF_ADDR_HIGH(addr) ((uint32_t)(((unsigned long long)(addr)) >> 40)) /* wbif programming guide: * 1. set up wbif parameter: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index a2c46350e44e8a..95f8b7c7d657a1 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -646,6 +646,9 @@ enum gpio_result dal_ddc_change_mode( enum gpio_ddc_line dal_ddc_get_line( const struct ddc *ddc) { + if (!ddc) + return GPIO_DDC_LINE_UNKNOWN; + return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 5273ca09fe121e..f0abbb7c2cb24d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -665,16 +665,45 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) } static void -dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) +dce110_external_encoder_control(enum bp_external_encoder_control_action action, + struct dc_link *link, + struct dc_crtc_timing *timing) { - struct dc_link *link = pipe_ctx->stream->link; + struct dc *dc = link->ctx->dc; struct dc_bios *bios = link->ctx->dc_bios; - struct bp_encoder_control encoder_control = {0}; + const struct dc_link_settings *link_settings = &link->cur_link_settings; + enum bp_result bp_result = BP_RESULT_OK; + struct bp_external_encoder_control ext_cntl = { + .action = action, + .connector_obj_id = link->link_enc->connector, + .encoder_id = link->ext_enc_id, + .lanes_number = link_settings->lane_count, + .link_rate = link_settings->link_rate, + + /* Use signal type of the real link encoder, ie. DP */ + .signal = link->connector_signal, + + /* We don't know the timing yet when executing the SETUP action, + * so use a reasonably high default value. It seems that ENABLE + * can change the actual pixel clock but doesn't work with higher + * pixel clocks than what SETUP was called with. + */ + .pixel_clock = timing ? timing->pix_clk_100hz / 10 : 300000, + .color_depth = timing ? timing->display_color_depth : COLOR_DEPTH_888, + }; + DC_LOGGER_INIT(dc->ctx); - encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; - encoder_control.engine_id = link->link_enc->analog_engine; - encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; - bios->funcs->encoder_control(bios, &encoder_control); + bp_result = bios->funcs->external_encoder_control(bios, &ext_cntl); + + if (bp_result != BP_RESULT_OK) + DC_LOG_ERROR("Failed to execute external encoder action: 0x%x\n", action); +} + +static void +dce110_prepare_ddc(struct dc_link *link) +{ + if (link->ext_enc_id.id) + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DDC_SETUP, link, NULL); } static bool @@ -684,7 +713,8 @@ dce110_dac_load_detect(struct dc_link *link) struct link_encoder *link_enc = link->link_enc; enum bp_result bp_result; - bp_result = bios->funcs->dac_load_detection(bios, link_enc->analog_engine); + bp_result = bios->funcs->dac_load_detection( + bios, link_enc->analog_engine, link->ext_enc_id); return bp_result == BP_RESULT_OK; } @@ -700,7 +730,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; - link_hwss->setup_stream_attribute(pipe_ctx); link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -719,8 +748,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) tg->funcs->set_early_control(tg, early_control); - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - dce110_dac_encoder_control(pipe_ctx, true); + if (link->ext_enc_id.id) + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_ENABLE, link, timing); } static enum bp_result link_transmitter_control( @@ -1219,8 +1248,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - dce110_dac_encoder_control(pipe_ctx, false); + if (link->ext_enc_id.id) + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DISABLE, link, NULL); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1603,22 +1632,6 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static void -dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) -{ - struct dc_link *link = pipe_ctx->stream->link; - struct dc_bios *bios = link->ctx->dc_bios; - struct bp_crtc_source_select crtc_source_select = {0}; - enum engine_id engine_id = link->link_enc->preferred_engine; - - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) - engine_id = link->link_enc->analog_engine; - crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; - crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth; - crtc_source_select.engine_id = engine_id; - crtc_source_select.sink_signal = pipe_ctx->stream->signal; - bios->funcs->select_crtc_source(bios, &crtc_source_select); -} enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, @@ -1639,10 +1652,6 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( hws->funcs.disable_stream_gating(dc, pipe_ctx); } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { - dce110_select_crtc_source(pipe_ctx); - } - if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output = {0}; @@ -1722,8 +1731,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && - !dc_is_rgb_signal(pipe_ctx->stream->signal)) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); @@ -3376,6 +3384,15 @@ void dce110_enable_tmds_link_output(struct dc_link *link, link->phy_state.symclk_state = SYMCLK_ON_TX_ON; } +static void dce110_enable_analog_link_output( + struct dc_link *link, + uint32_t pix_clk_100hz) +{ + link->link_enc->funcs->enable_analog_output( + link->link_enc, + pix_clk_100hz); +} + void dce110_enable_dp_link_output( struct dc_link *link, const struct link_resource *link_res, @@ -3423,6 +3440,11 @@ void dce110_enable_dp_link_output( } } + if (link->ext_enc_id.id) { + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_INIT, link, NULL); + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_SETUP, link, NULL); + } + if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); @@ -3513,8 +3535,10 @@ static const struct hw_sequencer_funcs dce110_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, + .enable_analog_link_output = dce110_enable_analog_link_output, .disable_link_output = dce110_disable_link_output, .dac_load_detect = dce110_dac_load_detect, + .prepare_ddc = dce110_prepare_ddc, }; static const struct hwseq_private_funcs dce110_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 7e7682d7dfc834..ae4c4ad05baa02 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -568,7 +568,9 @@ static bool construct_phy(struct dc_link *link, goto ddc_create_fail; } - if (!link->ddc->ddc_pin) { + /* Embedded display connectors such as LVDS may not have DDC. */ + if (!link->ddc->ddc_pin && + !dc_is_embedded_signal(link->connector_signal)) { DC_ERROR("Failed to get I2C info for connector!\n"); goto ddc_create_fail; } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 6a25dcfcdf17ea..d2d56a1c4b8b3a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -753,7 +753,8 @@ static struct link_encoder *dce60_link_encoder_create( enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], + enc_init_data->channel == CHANNEL_ID_UNKNOWN ? + NULL : &link_enc_aux_regs[enc_init_data->channel - 1], enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ? NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 33be49b3c1b178..6c00497e9a0129 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -760,7 +760,8 @@ static struct link_encoder *dce80_link_encoder_create( enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], + enc_init_data->channel == CHANNEL_ID_UNKNOWN ? + NULL : &link_enc_aux_regs[enc_init_data->channel - 1], enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ? NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 38a77fa9b4afde..a0f03fb67605eb 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -153,6 +153,10 @@ struct embedded_panel_info { uint32_t drr_enabled; uint32_t min_drr_refresh_rate; bool realtek_eDPToLVDS; + uint16_t panel_width_mm; + uint16_t panel_height_mm; + uint16_t fake_edid_size; + const uint8_t *fake_edid; }; struct dc_firmware_info { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 7f386ff0c872d8..9d8b1227388fad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -425,6 +425,7 @@ static int aldebaran_set_default_dpm_table(struct smu_context *smu) dpm_table->dpm_levels[0].enabled = true; dpm_table->dpm_levels[1].value = pptable->GfxclkFmax; dpm_table->dpm_levels[1].enabled = true; + dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index cd0a23f432ff86..0df8c05a7fce7f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1129,6 +1129,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) /* gfxclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.gfx_table; dpm_table->clk_type = SMU_GFXCLK; + dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { /* In the case of gfxclk, only fine-grained dpm is honored. * Get min/max values from FW. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 3d49e58794d29f..90c7127beabffc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1370,7 +1370,7 @@ int smu_cmn_print_dpm_clk_levels(struct smu_context *smu, level_index = 1; } - if (!is_fine_grained) { + if (!is_fine_grained || count == 1) { for (i = 0; i < count; i++) { freq_match = !is_deep_sleep && smu_cmn_freqs_match( diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index c598b99673fc15..e7db4e4ea700fe 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -831,7 +831,7 @@ static void fill_palette_332(struct drm_crtc *crtc, u16 r, u16 g, u16 b, } /** - * drm_crtc_fill_palette_332 - Programs a default palette for R332-like formats + * drm_crtc_fill_palette_332 - Programs a default palette for RGB332-like formats * @crtc: The displaying CRTC * @set_palette: Callback for programming the hardware gamma LUT * diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 9166c353f131cb..88808e972cc156 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -172,8 +172,8 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, } for (i = 0; i < info->num_planes; i++) { - unsigned int width = mode_cmd->width / (i ? info->hsub : 1); - unsigned int height = mode_cmd->height / (i ? info->vsub : 1); + unsigned int width = drm_format_info_plane_width(info, mode_cmd->width, i); + unsigned int height = drm_format_info_plane_height(info, mode_cmd->height, i); unsigned int min_size; objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]); diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index e154cb35f604dc..6193811ef7bebb 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -558,6 +558,6 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir) &pvr_fw_trace_fops); } - debugfs_create_file("trace_mask", 0600, dir, fw_trace, + debugfs_create_file("trace_mask", 0600, dir, pvr_dev, &pvr_fw_trace_mask_fops); } diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c index d38ba70f4e0d37..247cf13c80a055 100644 --- a/drivers/gpu/drm/sysfb/ofdrm.c +++ b/drivers/gpu/drm/sysfb/ofdrm.c @@ -350,6 +350,7 @@ static void ofdrm_pci_release(void *data) struct pci_dev *pcidev = data; pci_disable_device(pcidev); + pci_dev_put(pcidev); } static int ofdrm_device_init_pci(struct ofdrm_device *odev) @@ -375,6 +376,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev) if (ret) { drm_err(dev, "pci_enable_device(%s) failed: %d\n", dev_name(&pcidev->dev), ret); + pci_dev_put(pcidev); return ret; } ret = devm_add_action_or_reset(&pdev->dev, ofdrm_pci_release, pcidev); diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c index 3bae91d7eefed7..278bb23fe4c8d0 100644 --- a/drivers/gpu/drm/tiny/appletbdrm.c +++ b/drivers/gpu/drm/tiny/appletbdrm.c @@ -353,7 +353,7 @@ static int appletbdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, frames_size + sizeof(struct appletbdrm_fb_request_footer), 16); - appletbdrm_state->request = kzalloc(request_size, GFP_KERNEL); + appletbdrm_state->request = kvzalloc(request_size, GFP_KERNEL); if (!appletbdrm_state->request) return -ENOMEM; @@ -543,7 +543,7 @@ static void appletbdrm_primary_plane_destroy_state(struct drm_plane *plane, { struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state); - kfree(appletbdrm_state->request); + kvfree(appletbdrm_state->request); kfree(appletbdrm_state->response); __drm_gem_destroy_shadow_plane_state(&appletbdrm_state->base); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 08a0e9480d706c..17950fe3a0ec64 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -285,13 +285,12 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout) return unode->urb; } -#define GET_URB_TIMEOUT HZ struct urb *udl_get_urb(struct udl_device *udl) { struct urb *urb; spin_lock_irq(&udl->urbs.lock); - urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT); + urb = udl_get_urb_locked(udl, HZ * 2); spin_unlock_irq(&udl->urbs.lock); return urb; } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 231e829bd709a2..1ca073a4ecb250 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -342,8 +343,10 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom return; urb = udl_get_urb(udl); - if (!urb) + if (!urb) { + drm_err_ratelimited(dev, "get urb failed when enabling crtc\n"); goto out; + } buf = (char *)urb->transfer_buffer; buf = udl_vidreg_lock(buf); diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 49de1c22a4696c..03242e8b3d876c 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -88,6 +88,7 @@ xe-y += xe_bb.o \ xe_irq.o \ xe_late_bind_fw.o \ xe_lrc.o \ + xe_mem_pool.o \ xe_migrate.o \ xe_mmio.o \ xe_mmio_gem.o \ diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 4ebaa0888a433b..9c88ca3ce768d6 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -583,7 +583,7 @@ #define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) #define LSCFE_SAME_ADDRESS_ATOMICS_COALESCING_DISABLE REG_BIT(35 - 32) -#define ROW_CHICKEN5 XE_REG_MCR(0xe7f0) +#define ROW_CHICKEN5 XE_REG_MCR(0xe7f0, XE_REG_OPTION_MASKED) #define CPSS_AWARE_DIS REG_BIT(3) #define SARB_CHICKEN1 XE_REG_MCR(0xe90c) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index a7c2dc7f224c36..4075edf974216c 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -2322,8 +2322,10 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, } /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */ - if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT)) + if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT)) { + xe_bo_free(bo); return ERR_PTR(-EINVAL); + } if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && @@ -2342,8 +2344,10 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, alignment = SZ_4K >> PAGE_SHIFT; } - if (type == ttm_bo_type_device && aligned_size != size) + if (type == ttm_bo_type_device && aligned_size != size) { + xe_bo_free(bo); return ERR_PTR(-EINVAL); + } if (!bo) { bo = xe_bo_alloc(); diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index ff8317bfc1ae8e..9d19940b8fc035 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -18,6 +18,7 @@ #include "xe_ggtt_types.h" struct xe_device; +struct xe_mem_pool_node; struct xe_vm; #define XE_BO_MAX_PLACEMENTS 3 @@ -88,7 +89,7 @@ struct xe_bo { bool ccs_cleared; /** @bb_ccs: BB instructions of CCS read/write. Valid only for VF */ - struct xe_bb *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT]; + struct xe_mem_pool_node *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT]; /** * @cpu_caching: CPU caching mode. Currently only used for userspace diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 7f9602b3363db1..b9828da1589723 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -258,6 +258,13 @@ struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags) return ERR_PTR(ret); } +/* + * Takes ownership of @storage: on success it is transferred to the returned + * drm_gem_object; on failure it is freed before returning the error. + * This matches the contract of xe_bo_init_locked() which frees @storage on + * its error paths, so callers need not (and must not) free @storage after + * this call. + */ static struct drm_gem_object * xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, struct dma_buf *dma_buf) @@ -271,8 +278,10 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, int ret = 0; dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm); - if (!dummy_obj) + if (!dummy_obj) { + xe_bo_free(storage); return ERR_PTR(-ENOMEM); + } dummy_obj->resv = resv; xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) { @@ -281,6 +290,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, if (ret) break; + /* xe_bo_init_locked() frees storage on error */ bo = xe_bo_init_locked(xe, storage, NULL, resv, NULL, dma_buf->size, 0, /* Will require 1way or 2way for vm_bind */ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec); @@ -368,12 +378,15 @@ struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev, goto out_err; } - /* Errors here will take care of freeing the bo. */ + /* + * xe_dma_buf_init_obj() takes ownership of bo on both success + * and failure, so we must not touch bo after this call. + */ obj = xe_dma_buf_init_obj(dev, bo, dma_buf); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + dma_buf_detach(dma_buf, attach); return obj; - - + } get_dma_buf(dma_buf); obj->import_attach = attach; return obj; diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index c34408cfd292c3..dddcdd0bb7a33e 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -869,14 +869,14 @@ static int xe_eu_stall_stream_close(struct inode *inode, struct file *file) struct xe_eu_stall_data_stream *stream = file->private_data; struct xe_gt *gt = stream->gt; - drm_dev_put(>->tile->xe->drm); - mutex_lock(>->eu_stall->stream_lock); xe_eu_stall_disable_locked(stream); xe_eu_stall_data_buf_destroy(stream); xe_eu_stall_stream_free(stream); mutex_unlock(>->eu_stall->stream_lock); + drm_dev_put(>->tile->xe->drm); + return 0; } diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index b287d0e0e60a44..071b8c41df43ed 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -1405,7 +1405,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (q->vm && q->hwe->hw_engine_group) { err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); if (err) - goto put_exec_queue; + goto kill_exec_queue; } } @@ -1416,12 +1416,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, /* user id alloc must always be last in ioctl to prevent UAF */ err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); if (err) - goto kill_exec_queue; + goto del_hw_engine_group; args->exec_queue_id = id; return 0; +del_hw_engine_group: + if (q->vm && q->hwe && q->hwe->hw_engine_group) + xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); kill_exec_queue: xe_exec_queue_kill(q); delete_queue_group: @@ -1760,7 +1763,7 @@ void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q, void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q, unsigned int type) { - xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || + xe_assert(gt_to_xe(q->gt), type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); dma_fence_put(q->tlb_inval[type].last_fence); diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index e5c234f3d795ee..0d13e357fb43c5 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -166,7 +166,7 @@ static int query_compatibility_version(struct xe_gsc *gsc) &rd_offset); if (err) { xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err); - return err; + goto out_bo; } compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index a145234f662b57..10556156eaadb3 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -259,24 +259,12 @@ static void guc_submit_sw_fini(struct drm_device *drm, void *arg) } static void guc_submit_fini(void *arg) -{ - struct xe_guc *guc = arg; - - /* Forcefully kill any remaining exec queues */ - xe_guc_ct_stop(&guc->ct); - guc_submit_reset_prepare(guc); - xe_guc_softreset(guc); - xe_guc_submit_stop(guc); - xe_uc_fw_sanitize(&guc->fw); - xe_guc_submit_pause_abort(guc); -} - -static void guc_submit_wedged_fini(void *arg) { struct xe_guc *guc = arg; struct xe_exec_queue *q; unsigned long index; + /* Drop any wedged queue refs */ mutex_lock(&guc->submission_state.lock); xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { if (exec_queue_wedged(q)) { @@ -286,6 +274,14 @@ static void guc_submit_wedged_fini(void *arg) } } mutex_unlock(&guc->submission_state.lock); + + /* Forcefully kill any remaining exec queues */ + xe_guc_ct_stop(&guc->ct); + guc_submit_reset_prepare(guc); + xe_guc_softreset(guc); + xe_guc_submit_stop(guc); + xe_uc_fw_sanitize(&guc->fw); + xe_guc_submit_pause_abort(guc); } static const struct xe_exec_queue_ops guc_exec_queue_ops; @@ -1320,10 +1316,8 @@ static void disable_scheduling_deregister(struct xe_guc *guc, void xe_guc_submit_wedge(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); - struct xe_gt *gt = guc_to_gt(guc); struct xe_exec_queue *q; unsigned long index; - int err; xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); @@ -1335,15 +1329,6 @@ void xe_guc_submit_wedge(struct xe_guc *guc) return; if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) { - err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, - guc_submit_wedged_fini, guc); - if (err) { - xe_gt_err(gt, "Failed to register clean-up on wedged.mode=%s; " - "Although device is wedged.\n", - xe_wedged_mode_to_string(XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET)); - return; - } - mutex_lock(&guc->submission_state.lock); xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) if (xe_exec_queue_get_unless_zero(q)) diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 9d12a0d2f0b599..c725cde4508d20 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1214,7 +1214,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc, if (xe_gt_WARN_ON(lrc->gt, max_len < 3)) return -ENOSPC; - *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1); + *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_LRM_CS_MMIO | MI_LRI_NUM_REGS(1); *cmd++ = CS_DEBUG_MODE2(0).addr; *cmd++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE); diff --git a/drivers/gpu/drm/xe/xe_mem_pool.c b/drivers/gpu/drm/xe/xe_mem_pool.c new file mode 100644 index 00000000000000..d5e24d6aa88dd0 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_mem_pool.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2026 Intel Corporation + */ + +#include + +#include + +#include "instructions/xe_mi_commands.h" +#include "xe_bo.h" +#include "xe_device_types.h" +#include "xe_map.h" +#include "xe_mem_pool.h" +#include "xe_mem_pool_types.h" +#include "xe_tile_printk.h" + +/** + * struct xe_mem_pool - DRM MM pool for sub-allocating memory from a BO on an + * XE tile. + * + * The XE memory pool is a DRM MM manager that provides sub-allocation of memory + * from a backing buffer object (BO) on a specific XE tile. It is designed to + * manage memory for GPU workloads, allowing for efficient allocation and + * deallocation of memory regions within the BO. + * + * The memory pool maintains a primary BO that is pinned in the GGTT and mapped + * into the CPU address space for direct access. Optionally, it can also maintain + * a shadow BO that can be used for atomic updates to the primary BO's contents. + * + * The API provided by the memory pool allows clients to allocate and free memory + * regions, retrieve GPU and CPU addresses, and synchronize data between the + * primary and shadow BOs as needed. + */ +struct xe_mem_pool { + /** @base: Range allocator over [0, @size) in bytes */ + struct drm_mm base; + /** @bo: Active pool BO (GGTT-pinned, CPU-mapped). */ + struct xe_bo *bo; + /** @shadow: Shadow BO for atomic command updates. */ + struct xe_bo *shadow; + /** @swap_guard: Timeline guard updating @bo and @shadow */ + struct mutex swap_guard; + /** @cpu_addr: CPU virtual address of the active BO. */ + void *cpu_addr; + /** @is_iomem: Indicates if the BO mapping is I/O memory. */ + bool is_iomem; +}; + +static struct xe_mem_pool *node_to_pool(struct xe_mem_pool_node *node) +{ + return container_of(node->sa_node.mm, struct xe_mem_pool, base); +} + +static struct xe_tile *pool_to_tile(struct xe_mem_pool *pool) +{ + return pool->bo->tile; +} + +static void fini_pool_action(struct drm_device *drm, void *arg) +{ + struct xe_mem_pool *pool = arg; + + if (pool->is_iomem) + kvfree(pool->cpu_addr); + + drm_mm_takedown(&pool->base); +} + +static int pool_shadow_init(struct xe_mem_pool *pool) +{ + struct xe_tile *tile = pool->bo->tile; + struct xe_device *xe = tile_to_xe(tile); + struct xe_bo *shadow; + int ret; + + xe_assert(xe, !pool->shadow); + + ret = drmm_mutex_init(&xe->drm, &pool->swap_guard); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&pool->swap_guard); + fs_reclaim_release(GFP_KERNEL); + } + shadow = xe_managed_bo_create_pin_map(xe, tile, + xe_bo_size(pool->bo), + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); + if (IS_ERR(shadow)) + return PTR_ERR(shadow); + + pool->shadow = shadow; + + return 0; +} + +/** + * xe_mem_pool_init() - Initialize memory pool. + * @tile: the &xe_tile where allocate. + * @size: number of bytes to allocate. + * @guard: the size of the guard region at the end of the BO that is not + * sub-allocated, in bytes. + * @flags: flags to use to create shadow pool. + * + * Initializes a memory pool for sub-allocating memory from a backing BO on the + * specified XE tile. The backing BO is pinned in the GGTT and mapped into + * the CPU address space for direct access. Optionally, a shadow BO can also be + * initialized for atomic updates to the primary BO's contents. + * + * Returns: a pointer to the &xe_mem_pool, or an error pointer on failure. + */ +struct xe_mem_pool *xe_mem_pool_init(struct xe_tile *tile, u32 size, + u32 guard, int flags) +{ + struct xe_device *xe = tile_to_xe(tile); + struct xe_mem_pool *pool; + struct xe_bo *bo; + u32 managed_size; + int ret; + + xe_tile_assert(tile, size > guard); + managed_size = size - guard; + + pool = drmm_kzalloc(&xe->drm, sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + bo = xe_managed_bo_create_pin_map(xe, tile, size, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); + if (IS_ERR(bo)) { + xe_tile_err(tile, "Failed to prepare %uKiB BO for mem pool (%pe)\n", + size / SZ_1K, bo); + return ERR_CAST(bo); + } + pool->bo = bo; + pool->is_iomem = bo->vmap.is_iomem; + + if (pool->is_iomem) { + pool->cpu_addr = kvzalloc(size, GFP_KERNEL); + if (!pool->cpu_addr) + return ERR_PTR(-ENOMEM); + } else { + pool->cpu_addr = bo->vmap.vaddr; + } + + if (flags & XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY) { + ret = pool_shadow_init(pool); + + if (ret) + goto out_err; + } + + drm_mm_init(&pool->base, 0, managed_size); + ret = drmm_add_action_or_reset(&xe->drm, fini_pool_action, pool); + if (ret) + return ERR_PTR(ret); + + return pool; + +out_err: + if (flags & XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY) + xe_tile_err(tile, + "Failed to initialize shadow BO for mem pool (%d)\n", ret); + if (bo->vmap.is_iomem) + kvfree(pool->cpu_addr); + return ERR_PTR(ret); +} + +/** + * xe_mem_pool_sync() - Copy the entire contents of the main pool to shadow pool. + * @pool: the memory pool containing the primary and shadow BOs. + * + * Copies the entire contents of the primary pool to the shadow pool. This must + * be done after xe_mem_pool_init() with the XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY + * flag to ensure that the shadow pool has the same initial contents as the primary + * pool. After this initial synchronization, clients can choose to synchronize the + * shadow pool with the primary pool on a node basis using + * xe_mem_pool_sync_shadow_locked() as needed. + * + * Return: None. + */ +void xe_mem_pool_sync(struct xe_mem_pool *pool) +{ + struct xe_tile *tile = pool_to_tile(pool); + struct xe_device *xe = tile_to_xe(tile); + + xe_tile_assert(tile, pool->shadow); + + xe_map_memcpy_to(xe, &pool->shadow->vmap, 0, + pool->cpu_addr, xe_bo_size(pool->bo)); +} + +/** + * xe_mem_pool_swap_shadow_locked() - Swap the primary BO with the shadow BO. + * @pool: the memory pool containing the primary and shadow BOs. + * + * Swaps the primary buffer object with the shadow buffer object in the mem + * pool. This allows for atomic updates to the contents of the primary BO + * by first writing to the shadow BO and then swapping it with the primary BO. + * Swap_guard must be held to ensure synchronization with any concurrent swap + * operations. + * + * Return: None. + */ +void xe_mem_pool_swap_shadow_locked(struct xe_mem_pool *pool) +{ + struct xe_tile *tile = pool_to_tile(pool); + + xe_tile_assert(tile, pool->shadow); + lockdep_assert_held(&pool->swap_guard); + + swap(pool->bo, pool->shadow); + if (!pool->bo->vmap.is_iomem) + pool->cpu_addr = pool->bo->vmap.vaddr; +} + +/** + * xe_mem_pool_sync_shadow_locked() - Copy node from primary pool to shadow pool. + * @node: the node allocated in the memory pool. + * + * Copies the specified batch buffer from the primary pool to the shadow pool. + * Swap_guard must be held to ensure synchronization with any concurrent swap + * operations. + * + * Return: None. + */ +void xe_mem_pool_sync_shadow_locked(struct xe_mem_pool_node *node) +{ + struct xe_mem_pool *pool = node_to_pool(node); + struct xe_tile *tile = pool_to_tile(pool); + struct xe_device *xe = tile_to_xe(tile); + struct drm_mm_node *sa_node = &node->sa_node; + + xe_tile_assert(tile, pool->shadow); + lockdep_assert_held(&pool->swap_guard); + + xe_map_memcpy_to(xe, &pool->shadow->vmap, + sa_node->start, + pool->cpu_addr + sa_node->start, + sa_node->size); +} + +/** + * xe_mem_pool_gpu_addr() - Retrieve GPU address of memory pool. + * @pool: the memory pool + * + * Returns: GGTT address of the memory pool. + */ +u64 xe_mem_pool_gpu_addr(struct xe_mem_pool *pool) +{ + return xe_bo_ggtt_addr(pool->bo); +} + +/** + * xe_mem_pool_cpu_addr() - Retrieve CPU address of manager pool. + * @pool: the memory pool + * + * Returns: CPU virtual address of memory pool. + */ +void *xe_mem_pool_cpu_addr(struct xe_mem_pool *pool) +{ + return pool->cpu_addr; +} + +/** + * xe_mem_pool_bo_swap_guard() - Retrieve the mutex used to guard swap + * operations on a memory pool. + * @pool: the memory pool + * + * Returns: Swap guard mutex or NULL if shadow pool is not created. + */ +struct mutex *xe_mem_pool_bo_swap_guard(struct xe_mem_pool *pool) +{ + if (!pool->shadow) + return NULL; + + return &pool->swap_guard; +} + +/** + * xe_mem_pool_bo_flush_write() - Copy the data from the sub-allocation + * to the GPU memory. + * @node: the node allocated in the memory pool to flush. + */ +void xe_mem_pool_bo_flush_write(struct xe_mem_pool_node *node) +{ + struct xe_mem_pool *pool = node_to_pool(node); + struct xe_tile *tile = pool_to_tile(pool); + struct xe_device *xe = tile_to_xe(tile); + struct drm_mm_node *sa_node = &node->sa_node; + + if (!pool->bo->vmap.is_iomem) + return; + + xe_map_memcpy_to(xe, &pool->bo->vmap, sa_node->start, + pool->cpu_addr + sa_node->start, + sa_node->size); +} + +/** + * xe_mem_pool_bo_sync_read() - Copy the data from GPU memory to the + * sub-allocation. + * @node: the node allocated in the memory pool to read back. + */ +void xe_mem_pool_bo_sync_read(struct xe_mem_pool_node *node) +{ + struct xe_mem_pool *pool = node_to_pool(node); + struct xe_tile *tile = pool_to_tile(pool); + struct xe_device *xe = tile_to_xe(tile); + struct drm_mm_node *sa_node = &node->sa_node; + + if (!pool->bo->vmap.is_iomem) + return; + + xe_map_memcpy_from(xe, pool->cpu_addr + sa_node->start, + &pool->bo->vmap, sa_node->start, sa_node->size); +} + +/** + * xe_mem_pool_alloc_node() - Allocate a new node for use with xe_mem_pool. + * + * Returns: node structure or an ERR_PTR(-ENOMEM). + */ +struct xe_mem_pool_node *xe_mem_pool_alloc_node(void) +{ + struct xe_mem_pool_node *node = kzalloc_obj(*node); + + if (!node) + return ERR_PTR(-ENOMEM); + + return node; +} + +/** + * xe_mem_pool_insert_node() - Insert a node into the memory pool. + * @pool: the memory pool to insert into + * @node: the node to insert + * @size: the size of the node to be allocated in bytes. + * + * Inserts a node into the specified memory pool using drm_mm for + * allocation. + * + * Returns: 0 on success or a negative error code on failure. + */ +int xe_mem_pool_insert_node(struct xe_mem_pool *pool, + struct xe_mem_pool_node *node, u32 size) +{ + if (!pool) + return -EINVAL; + + return drm_mm_insert_node(&pool->base, &node->sa_node, size); +} + +/** + * xe_mem_pool_free_node() - Free a node allocated from the memory pool. + * @node: the node to free + * + * Returns: None. + */ +void xe_mem_pool_free_node(struct xe_mem_pool_node *node) +{ + if (!node) + return; + + drm_mm_remove_node(&node->sa_node); + kfree(node); +} + +/** + * xe_mem_pool_node_cpu_addr() - Retrieve CPU address of the node. + * @node: the node allocated in the memory pool + * + * Returns: CPU virtual address of the node. + */ +void *xe_mem_pool_node_cpu_addr(struct xe_mem_pool_node *node) +{ + struct xe_mem_pool *pool = node_to_pool(node); + + return xe_mem_pool_cpu_addr(pool) + node->sa_node.start; +} + +/** + * xe_mem_pool_dump() - Dump the state of the DRM MM manager for debugging. + * @pool: the memory pool info be dumped. + * @p: The DRM printer to use for output. + * + * Only the drm managed region is dumped, not the state of the BOs or any other + * pool information. + * + * Returns: None. + */ +void xe_mem_pool_dump(struct xe_mem_pool *pool, struct drm_printer *p) +{ + drm_mm_print(&pool->base, p); +} diff --git a/drivers/gpu/drm/xe/xe_mem_pool.h b/drivers/gpu/drm/xe/xe_mem_pool.h new file mode 100644 index 00000000000000..89cd2555fe91a2 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_mem_pool.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2026 Intel Corporation + */ +#ifndef _XE_MEM_POOL_H_ +#define _XE_MEM_POOL_H_ + +#include +#include + +#include +#include "xe_mem_pool_types.h" + +struct drm_printer; +struct xe_mem_pool; +struct xe_tile; + +struct xe_mem_pool *xe_mem_pool_init(struct xe_tile *tile, u32 size, + u32 guard, int flags); +void xe_mem_pool_sync(struct xe_mem_pool *pool); +void xe_mem_pool_swap_shadow_locked(struct xe_mem_pool *pool); +void xe_mem_pool_sync_shadow_locked(struct xe_mem_pool_node *node); +u64 xe_mem_pool_gpu_addr(struct xe_mem_pool *pool); +void *xe_mem_pool_cpu_addr(struct xe_mem_pool *pool); +struct mutex *xe_mem_pool_bo_swap_guard(struct xe_mem_pool *pool); +void xe_mem_pool_bo_flush_write(struct xe_mem_pool_node *node); +void xe_mem_pool_bo_sync_read(struct xe_mem_pool_node *node); +struct xe_mem_pool_node *xe_mem_pool_alloc_node(void); +int xe_mem_pool_insert_node(struct xe_mem_pool *pool, + struct xe_mem_pool_node *node, u32 size); +void xe_mem_pool_free_node(struct xe_mem_pool_node *node); +void *xe_mem_pool_node_cpu_addr(struct xe_mem_pool_node *node); +void xe_mem_pool_dump(struct xe_mem_pool *pool, struct drm_printer *p); + +#endif diff --git a/drivers/gpu/drm/xe/xe_mem_pool_types.h b/drivers/gpu/drm/xe/xe_mem_pool_types.h new file mode 100644 index 00000000000000..d5e926c93351fc --- /dev/null +++ b/drivers/gpu/drm/xe/xe_mem_pool_types.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2026 Intel Corporation + */ + +#ifndef _XE_MEM_POOL_TYPES_H_ +#define _XE_MEM_POOL_TYPES_H_ + +#include + +#define XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY BIT(0) + +/** + * struct xe_mem_pool_node - Sub-range allocations from mem pool. + */ +struct xe_mem_pool_node { + /** @sa_node: drm_mm_node for this allocation. */ + struct drm_mm_node sa_node; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index fc918b4fba54b1..5fdc89ed5256b3 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -29,6 +29,7 @@ #include "xe_hw_engine.h" #include "xe_lrc.h" #include "xe_map.h" +#include "xe_mem_pool.h" #include "xe_mocs.h" #include "xe_printk.h" #include "xe_pt.h" @@ -1166,11 +1167,12 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, u32 batch_size, batch_size_allocated; struct xe_device *xe = gt_to_xe(gt); struct xe_res_cursor src_it, ccs_it; + struct xe_mem_pool *bb_pool; struct xe_sriov_vf_ccs_ctx *ctx; - struct xe_sa_manager *bb_pool; u64 size = xe_bo_size(src_bo); - struct xe_bb *bb = NULL; + struct xe_mem_pool_node *bb; u64 src_L0, src_L0_ofs; + struct xe_bb xe_bb_tmp; u32 src_L0_pt; int err; @@ -1208,18 +1210,18 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, size -= src_L0; } - bb = xe_bb_alloc(gt); + bb = xe_mem_pool_alloc_node(); if (IS_ERR(bb)) return PTR_ERR(bb); bb_pool = ctx->mem.ccs_bb_pool; - scoped_guard(mutex, xe_sa_bo_swap_guard(bb_pool)) { - xe_sa_bo_swap_shadow(bb_pool); + scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) { + xe_mem_pool_swap_shadow_locked(bb_pool); - err = xe_bb_init(bb, bb_pool, batch_size); + err = xe_mem_pool_insert_node(bb_pool, bb, batch_size * sizeof(u32)); if (err) { xe_gt_err(gt, "BB allocation failed.\n"); - xe_bb_free(bb, NULL); + kfree(bb); return err; } @@ -1227,6 +1229,7 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, size = xe_bo_size(src_bo); batch_size = 0; + xe_bb_tmp = (struct xe_bb){ .cs = xe_mem_pool_node_cpu_addr(bb), .len = 0 }; /* * Emit PTE and copy commands here. * The CCS copy command can only support limited size. If the size to be @@ -1255,24 +1258,27 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); batch_size += EMIT_COPY_CCS_DW; - emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src); + emit_pte(m, &xe_bb_tmp, src_L0_pt, false, true, &src_it, src_L0, src); - emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); + emit_pte(m, &xe_bb_tmp, ccs_pt, false, false, &ccs_it, ccs_size, src); - bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); - flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt, + xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len, + flush_flags); + flush_flags = xe_migrate_ccs_copy(m, &xe_bb_tmp, src_L0_ofs, src_is_pltt, src_L0_ofs, dst_is_pltt, src_L0, ccs_ofs, true); - bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); + xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len, + flush_flags); size -= src_L0; } - xe_assert(xe, (batch_size_allocated == bb->len)); + xe_assert(xe, (batch_size_allocated == xe_bb_tmp.len)); + xe_assert(xe, bb->sa_node.size == xe_bb_tmp.len * sizeof(u32)); src_bo->bb_ccs[read_write] = bb; xe_sriov_vf_ccs_rw_update_bb_addr(ctx); - xe_sa_bo_sync_shadow(bb->bo); + xe_mem_pool_sync_shadow_locked(bb); } return 0; @@ -1297,10 +1303,10 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo, enum xe_sriov_vf_ccs_rw_ctxs read_write) { - struct xe_bb *bb = src_bo->bb_ccs[read_write]; + struct xe_mem_pool_node *bb = src_bo->bb_ccs[read_write]; struct xe_device *xe = xe_bo_device(src_bo); + struct xe_mem_pool *bb_pool; struct xe_sriov_vf_ccs_ctx *ctx; - struct xe_sa_manager *bb_pool; u32 *cs; xe_assert(xe, IS_SRIOV_VF(xe)); @@ -1308,17 +1314,17 @@ void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo, ctx = &xe->sriov.vf.ccs.contexts[read_write]; bb_pool = ctx->mem.ccs_bb_pool; - guard(mutex) (xe_sa_bo_swap_guard(bb_pool)); - xe_sa_bo_swap_shadow(bb_pool); - - cs = xe_sa_bo_cpu_addr(bb->bo); - memset(cs, MI_NOOP, bb->len * sizeof(u32)); - xe_sriov_vf_ccs_rw_update_bb_addr(ctx); + scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) { + xe_mem_pool_swap_shadow_locked(bb_pool); - xe_sa_bo_sync_shadow(bb->bo); + cs = xe_mem_pool_node_cpu_addr(bb); + memset(cs, MI_NOOP, bb->sa_node.size); + xe_sriov_vf_ccs_rw_update_bb_addr(ctx); - xe_bb_free(bb, NULL); - src_bo->bb_ccs[read_write] = NULL; + xe_mem_pool_sync_shadow_locked(bb); + xe_mem_pool_free_node(bb); + src_bo->bb_ccs[read_write] = NULL; + } } /** diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 01673d2b246498..9f98d033416490 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -118,6 +118,7 @@ static const struct xe_graphics_desc graphics_xe2 = { static const struct xe_graphics_desc graphics_xe3p_lpg = { XE2_GFX_FEATURES, + .has_indirect_ring_state = 1, .multi_queue_engine_class_mask = BIT(XE_ENGINE_CLASS_COPY) | BIT(XE_ENGINE_CLASS_COMPUTE), .num_geometry_xecore_fuse_regs = 3, .num_compute_xecore_fuse_regs = 3, diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c index 80577e4b7437ca..8cc313182968d3 100644 --- a/drivers/gpu/drm/xe/xe_reg_whitelist.c +++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c @@ -226,7 +226,7 @@ void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent, } range_start = reg & REG_GENMASK(25, range_bit); - range_end = range_start | REG_GENMASK(range_bit, 0); + range_end = range_start | REG_GENMASK(range_bit - 1, 0); switch (val & RING_FORCE_TO_NONPRIV_ACCESS_MASK) { case RING_FORCE_TO_NONPRIV_ACCESS_RW: diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index db023fb66a279e..09b99fb2608bce 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -14,9 +14,9 @@ #include "xe_guc.h" #include "xe_guc_submit.h" #include "xe_lrc.h" +#include "xe_mem_pool.h" #include "xe_migrate.h" #include "xe_pm.h" -#include "xe_sa.h" #include "xe_sriov_printk.h" #include "xe_sriov_vf.h" #include "xe_sriov_vf_ccs.h" @@ -141,43 +141,47 @@ static u64 get_ccs_bb_pool_size(struct xe_device *xe) static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx) { + struct xe_mem_pool *pool; struct xe_device *xe = tile_to_xe(tile); - struct xe_sa_manager *sa_manager; + u32 *pool_cpu_addr, *last_dw_addr; u64 bb_pool_size; - int offset, err; + int err; bb_pool_size = get_ccs_bb_pool_size(xe); xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n", ctx->ctx_id ? "Restore" : "Save", bb_pool_size / SZ_1M); - sa_manager = __xe_sa_bo_manager_init(tile, bb_pool_size, SZ_4K, SZ_16, - XE_SA_BO_MANAGER_FLAG_SHADOW); - - if (IS_ERR(sa_manager)) { - xe_sriov_err(xe, "Suballocator init failed with error: %pe\n", - sa_manager); - err = PTR_ERR(sa_manager); + pool = xe_mem_pool_init(tile, bb_pool_size, sizeof(u32), + XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY); + if (IS_ERR(pool)) { + xe_sriov_err(xe, "xe_mem_pool_init failed with error: %pe\n", + pool); + err = PTR_ERR(pool); return err; } - offset = 0; - xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP, - bb_pool_size); - xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP, - bb_pool_size); + pool_cpu_addr = xe_mem_pool_cpu_addr(pool); + memset(pool_cpu_addr, 0, bb_pool_size); - offset = bb_pool_size - sizeof(u32); - xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END); - xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END); + last_dw_addr = pool_cpu_addr + (bb_pool_size / sizeof(u32)) - 1; + *last_dw_addr = MI_BATCH_BUFFER_END; - ctx->mem.ccs_bb_pool = sa_manager; + /** + * Sync the main copy and shadow copy so that the shadow copy is + * replica of main copy. We sync only BBs after init part. So, we + * need to make sure the main pool and shadow copy are in sync after + * this point. This is needed as GuC may read the BB commands from + * shadow copy. + */ + xe_mem_pool_sync(pool); + ctx->mem.ccs_bb_pool = pool; return 0; } static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx) { - u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool); + u64 addr = xe_mem_pool_gpu_addr(ctx->mem.ccs_bb_pool); struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q); u32 dw[10], i = 0; @@ -388,7 +392,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe) #define XE_SRIOV_VF_CCS_RW_BB_ADDR_OFFSET (2 * sizeof(u32)) void xe_sriov_vf_ccs_rw_update_bb_addr(struct xe_sriov_vf_ccs_ctx *ctx) { - u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool); + u64 addr = xe_mem_pool_gpu_addr(ctx->mem.ccs_bb_pool); struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q); struct xe_device *xe = gt_to_xe(ctx->mig_q->gt); @@ -412,8 +416,8 @@ int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo) struct xe_device *xe = xe_bo_device(bo); enum xe_sriov_vf_ccs_rw_ctxs ctx_id; struct xe_sriov_vf_ccs_ctx *ctx; + struct xe_mem_pool_node *bb; struct xe_tile *tile; - struct xe_bb *bb; int err = 0; xe_assert(xe, IS_VF_CCS_READY(xe)); @@ -445,7 +449,7 @@ int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo) { struct xe_device *xe = xe_bo_device(bo); enum xe_sriov_vf_ccs_rw_ctxs ctx_id; - struct xe_bb *bb; + struct xe_mem_pool_node *bb; xe_assert(xe, IS_VF_CCS_READY(xe)); @@ -471,8 +475,8 @@ int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo) */ void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p) { - struct xe_sa_manager *bb_pool; enum xe_sriov_vf_ccs_rw_ctxs ctx_id; + struct xe_mem_pool *bb_pool; if (!IS_VF_CCS_READY(xe)) return; @@ -485,7 +489,7 @@ void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p) drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read"); drm_printf(p, "-------------------------\n"); - drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool)); + xe_mem_pool_dump(bb_pool, p); drm_puts(p, "\n"); } } diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h index 22c499943d2a39..6fc8f97ef3f4eb 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h @@ -17,9 +17,6 @@ enum xe_sriov_vf_ccs_rw_ctxs { XE_SRIOV_VF_CCS_CTX_COUNT }; -struct xe_migrate; -struct xe_sa_manager; - /** * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data. */ @@ -33,7 +30,7 @@ struct xe_sriov_vf_ccs_ctx { /** @mem: memory data */ struct { /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */ - struct xe_sa_manager *ccs_bb_pool; + struct xe_mem_pool *ccs_bb_pool; } mem; }; diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index f8de6a4bf1897b..0b78ec2bc6a4e4 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -97,7 +97,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { { XE_RTP_NAME("Tuning: Set STLB Bank Hash Mode to 4KB"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3510, XE_RTP_END_VERSION_UNDEFINED), IS_INTEGRATED), - XE_RTP_ACTIONS(FIELD_SET(XEHP_GAMSTLB_CTRL, BANK_HASH_MODE, + XE_RTP_ACTIONS(FIELD_SET(GAMSTLB_CTRL, BANK_HASH_MODE, BANK_HASH_4KB_MODE)) }, }; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 56e2db50bb36a6..a717a2b8dea3c9 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3658,6 +3658,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || + XE_IOCTL_DBG(xe, !IS_DGFX(xe) && coh_mode == XE_COH_NONE && + is_cpu_addr_mirror) || XE_IOCTL_DBG(xe, xe_device_is_l2_flush_optimized(xe) && (op == DRM_XE_VM_BIND_OP_MAP_USERPTR || is_cpu_addr_mirror) && @@ -4156,7 +4158,8 @@ int xe_vm_get_property_ioctl(struct drm_device *drm, void *data, int ret = 0; if (XE_IOCTL_DBG(xe, (args->reserved[0] || args->reserved[1] || - args->reserved[2]))) + args->reserved[2] || args->extensions || + args->pad))) return -EINVAL; vm = xe_vm_lookup(xef, args->vm_id); diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c index 66f00d3f5c0703..c78906dea82bec 100644 --- a/drivers/gpu/drm/xe/xe_vm_madvise.c +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c @@ -621,6 +621,45 @@ static int xe_madvise_purgeable_retained_to_user(const struct xe_madvise_details return 0; } +static bool check_pat_args_are_sane(struct xe_device *xe, + struct xe_vmas_in_madvise_range *madvise_range, + u16 pat_index) +{ + u16 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); + int i; + + /* + * Using coh_none with CPU cached buffers is not allowed on iGPU. + * On iGPU the GPU shares the LLC with the CPU, so with coh_none + * the GPU bypasses CPU caches and reads directly from DRAM, + * potentially seeing stale sensitive data from previously freed + * pages. On dGPU this restriction does not apply, because the + * platform does not provide a non-coherent system memory access + * path that would violate the DMA coherency contract. + */ + if (coh_mode != XE_COH_NONE || IS_DGFX(xe)) + return true; + + for (i = 0; i < madvise_range->num_vmas; i++) { + struct xe_vma *vma = madvise_range->vmas[i]; + struct xe_bo *bo = xe_vma_bo(vma); + + if (bo) { + /* BO with WB caching + COH_NONE is not allowed */ + if (XE_IOCTL_DBG(xe, bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) + return false; + /* Imported dma-buf without caching info, assume cached */ + if (XE_IOCTL_DBG(xe, !bo->cpu_caching)) + return false; + } else if (XE_IOCTL_DBG(xe, xe_vma_is_cpu_addr_mirror(vma) || + xe_vma_is_userptr(vma))) + /* System memory (userptr/SVM) is always CPU cached */ + return false; + } + + return true; +} + static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas, int num_vmas, u32 atomic_val) { @@ -750,6 +789,14 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil } } + if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) { + if (!check_pat_args_are_sane(xe, &madvise_range, + args->pat_index.val)) { + err = -EINVAL; + goto free_vmas; + } + } + if (madvise_range.has_bo_vmas) { if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) { if (!check_bo_args_are_sane(vm, madvise_range.vmas, diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 546296f0220b5f..4b1cbced06bec2 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -743,14 +743,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS)) }, - { XE_RTP_NAME("14019988906"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) - }, - { XE_RTP_NAME("14019877138"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) - }, { XE_RTP_NAME("14021490052"), XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(FF_MODE, diff --git a/drivers/mailbox/cix-mailbox.c b/drivers/mailbox/cix-mailbox.c index 443620e8ae37f4..43c76cdab24a20 100644 --- a/drivers/mailbox/cix-mailbox.c +++ b/drivers/mailbox/cix-mailbox.c @@ -12,8 +12,6 @@ #include #include -#include "mailbox.h" - /* * The maximum transmission size is 32 words or 128 bytes. */ @@ -405,7 +403,7 @@ static int cix_mbox_startup(struct mbox_chan *chan) int index = cp->index, ret; u32 val; - ret = request_irq(priv->irq, cix_mbox_isr, 0, + ret = request_irq(priv->irq, cix_mbox_isr, IRQF_NO_SUSPEND, dev_name(priv->dev), chan); if (ret) { dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq); @@ -415,7 +413,7 @@ static int cix_mbox_startup(struct mbox_chan *chan) switch (cp->type) { case CIX_MBOX_TYPE_DB: /* Overwrite txdone_method for DB channel */ - chan->txdone_method = TXDONE_BY_ACK; + chan->txdone_method = MBOX_TXDONE_BY_ACK; fallthrough; case CIX_MBOX_TYPE_REG: if (priv->dir == CIX_MBOX_TX) { diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c index 5f2d3b81c1dbec..d2355b128ba433 100644 --- a/drivers/mailbox/exynos-mailbox.c +++ b/drivers/mailbox/exynos-mailbox.c @@ -99,7 +99,6 @@ static int exynos_mbox_probe(struct platform_device *pdev) struct mbox_controller *mbox; struct mbox_chan *chans; struct clk *pclk; - int i; exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL); if (!exynos_mbox) @@ -129,9 +128,6 @@ static int exynos_mbox_probe(struct platform_device *pdev) mbox->ops = &exynos_mbox_chan_ops; mbox->of_xlate = exynos_mbox_of_xlate; - for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++) - chans[i].mbox = mbox; - exynos_mbox->mbox = mbox; platform_set_drvdata(pdev, exynos_mbox); diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c index 17c29e960fbf89..9b727a2b54a57b 100644 --- a/drivers/mailbox/hi3660-mailbox.c +++ b/drivers/mailbox/hi3660-mailbox.c @@ -15,8 +15,6 @@ #include #include -#include "mailbox.h" - #define MBOX_CHAN_MAX 32 #define MBOX_RX 0x0 diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c index f77741ce42e728..69d15b6283e97b 100644 --- a/drivers/mailbox/hi6220-mailbox.c +++ b/drivers/mailbox/hi6220-mailbox.c @@ -79,12 +79,12 @@ struct hi6220_mbox { /* region for mailbox */ void __iomem *base; - unsigned int chan_num; - struct hi6220_mbox_chan *mchan; - void *irq_map_chan[MBOX_CHAN_MAX]; struct mbox_chan *chan; struct mbox_controller controller; + + unsigned int chan_num; + struct hi6220_mbox_chan mchan[] __counted_by(chan_num); }; static void mbox_set_state(struct hi6220_mbox *mbox, @@ -267,16 +267,12 @@ static int hi6220_mbox_probe(struct platform_device *pdev) struct hi6220_mbox *mbox; int i, err; - mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + mbox = devm_kzalloc(dev, struct_size(mbox, mchan, MBOX_CHAN_MAX), GFP_KERNEL); if (!mbox) return -ENOMEM; - mbox->dev = dev; mbox->chan_num = MBOX_CHAN_MAX; - mbox->mchan = devm_kcalloc(dev, - mbox->chan_num, sizeof(*mbox->mchan), GFP_KERNEL); - if (!mbox->mchan) - return -ENOMEM; + mbox->dev = dev; mbox->chan = devm_kcalloc(dev, mbox->chan_num, sizeof(*mbox->chan), GFP_KERNEL); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 003f9236c35e09..246a9a9e395206 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -23,8 +23,6 @@ #include #include -#include "mailbox.h" - #define IMX_MU_CHANS 24 /* TX0/RX0/RXDB[0-3] */ #define IMX_MU_SCU_CHANS 6 @@ -734,7 +732,7 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox, p_chan = &mbox->chans[chan]; if (type == IMX_MU_TYPE_TXDB_V2) - p_chan->txdone_method = TXDONE_BY_ACK; + p_chan->txdone_method = MBOX_TXDONE_BY_ACK; return p_chan; } diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c index b4b5bdd503cfa3..b6c9ecbbc8ec6e 100644 --- a/drivers/mailbox/mailbox-sti.c +++ b/drivers/mailbox/mailbox-sti.c @@ -21,8 +21,6 @@ #include #include -#include "mailbox.h" - #define STI_MBOX_INST_MAX 4 /* RAM saving: Max supported instances */ #define STI_MBOX_CHAN_MAX 20 /* RAM saving: Max supported channels */ diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 3a28ab5c42e575..7b6ef033e77a9f 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -28,8 +28,6 @@ #define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) -static bool mbox_data_ready; - struct mbox_test_device { struct device *dev; void __iomem *tx_mmio; @@ -42,6 +40,7 @@ struct mbox_test_device { spinlock_t lock; struct mutex mutex; wait_queue_head_t waitq; + bool data_ready; struct fasync_struct *async_queue; struct dentry *root_debugfs_dir; }; @@ -162,7 +161,7 @@ static bool mbox_test_message_data_ready(struct mbox_test_device *tdev) unsigned long flags; spin_lock_irqsave(&tdev->lock, flags); - data_ready = mbox_data_ready; + data_ready = tdev->data_ready; spin_unlock_irqrestore(&tdev->lock, flags); return data_ready; @@ -227,7 +226,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, *(touser + l) = '\0'; memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN); - mbox_data_ready = false; + tdev->data_ready = false; spin_unlock_irqrestore(&tdev->lock, flags); @@ -297,7 +296,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message) message, MBOX_MAX_MSG_LEN); memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN); } - mbox_data_ready = true; + tdev->data_ready = true; spin_unlock_irqrestore(&tdev->lock, flags); wake_up_interruptible(&tdev->waitq); @@ -336,7 +335,7 @@ mbox_test_request_channel(struct platform_device *pdev, const char *name) client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL); if (!client) - return ERR_PTR(-ENOMEM); + return NULL; client->dev = &pdev->dev; client->rx_callback = mbox_test_receive_message; @@ -355,67 +354,80 @@ mbox_test_request_channel(struct platform_device *pdev, const char *name) return channel; } +static void __iomem *mbox_test_ioremap(struct platform_device *pdev, unsigned int res_num) +{ + struct resource *res; + void __iomem *mmio; + + res = platform_get_resource(pdev, IORESOURCE_MEM, res_num); + if (!res) + return NULL; + + mmio = devm_ioremap_resource(&pdev->dev, res); + if (PTR_ERR(mmio) == -EBUSY) { + dev_info(&pdev->dev, "trying workaround with plain ioremap\n"); + return devm_ioremap(&pdev->dev, res->start, resource_size(res)); + } + + return IS_ERR(mmio) ? NULL : mmio; +} + static int mbox_test_probe(struct platform_device *pdev) { struct mbox_test_device *tdev; - struct resource *res; - resource_size_t size; int ret; tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); if (!tdev) return -ENOMEM; + tdev->dev = &pdev->dev; + spin_lock_init(&tdev->lock); + mutex_init(&tdev->mutex); + init_waitqueue_head(&tdev->waitq); + platform_set_drvdata(pdev, tdev); + /* It's okay for MMIO to be NULL */ - tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (PTR_ERR(tdev->tx_mmio) == -EBUSY) { - /* if reserved area in SRAM, try just ioremap */ - size = resource_size(res); - tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); - } else if (IS_ERR(tdev->tx_mmio)) { - tdev->tx_mmio = NULL; - } + tdev->tx_mmio = mbox_test_ioremap(pdev, 0); /* If specified, second reg entry is Rx MMIO */ - tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res); - if (PTR_ERR(tdev->rx_mmio) == -EBUSY) { - size = resource_size(res); - tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); - } else if (IS_ERR(tdev->rx_mmio)) { + tdev->rx_mmio = mbox_test_ioremap(pdev, 1); + if (!tdev->rx_mmio) tdev->rx_mmio = tdev->tx_mmio; - } tdev->tx_channel = mbox_test_request_channel(pdev, "tx"); tdev->rx_channel = mbox_test_request_channel(pdev, "rx"); - if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel)) + if (!tdev->tx_channel && !tdev->rx_channel) return -EPROBE_DEFER; /* If Rx is not specified but has Rx MMIO, then Rx = Tx */ if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio)) tdev->rx_channel = tdev->tx_channel; - tdev->dev = &pdev->dev; - platform_set_drvdata(pdev, tdev); - - spin_lock_init(&tdev->lock); - mutex_init(&tdev->mutex); - if (tdev->rx_channel) { tdev->rx_buffer = devm_kzalloc(&pdev->dev, MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->rx_buffer) - return -ENOMEM; + if (!tdev->rx_buffer) { + ret = -ENOMEM; + goto err_free_chans; + } } ret = mbox_test_add_debugfs(pdev, tdev); if (ret) - return ret; + goto err_free_chans; - init_waitqueue_head(&tdev->waitq); dev_info(&pdev->dev, "Successfully registered\n"); return 0; + +err_free_chans: + if (tdev->tx_channel) + mbox_free_channel(tdev->tx_channel); + if (tdev->rx_channel && tdev->rx_channel != tdev->tx_channel) + mbox_free_channel(tdev->rx_channel); + return ret; } static void mbox_test_remove(struct platform_device *pdev) @@ -426,7 +438,7 @@ static void mbox_test_remove(struct platform_device *pdev) if (tdev->tx_channel) mbox_free_channel(tdev->tx_channel); - if (tdev->rx_channel) + if (tdev->rx_channel && tdev->rx_channel != tdev->tx_channel) mbox_free_channel(tdev->rx_channel); } diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 617ba505691d37..bbc9fd75a95f7c 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -18,8 +18,6 @@ #include #include -#include "mailbox.h" - static LIST_HEAD(mbox_cons); static DEFINE_MUTEX(con_mutex); @@ -52,7 +50,7 @@ static void msg_submit(struct mbox_chan *chan) int err = -EBUSY; scoped_guard(spinlock_irqsave, &chan->lock) { - if (!chan->msg_count || chan->active_req) + if (!chan->msg_count || chan->active_req != MBOX_NO_MSG) break; count = chan->msg_count; @@ -74,7 +72,7 @@ static void msg_submit(struct mbox_chan *chan) } } - if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { + if (!err && (chan->txdone_method & MBOX_TXDONE_BY_POLL)) { /* kick start the timer immediately to avoid delays */ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); @@ -87,13 +85,13 @@ static void tx_tick(struct mbox_chan *chan, int r) scoped_guard(spinlock_irqsave, &chan->lock) { mssg = chan->active_req; - chan->active_req = NULL; + chan->active_req = MBOX_NO_MSG; } /* Submit next message */ msg_submit(chan); - if (!mssg) + if (mssg == MBOX_NO_MSG) return; /* Notify the client */ @@ -114,7 +112,7 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) for (i = 0; i < mbox->num_chans; i++) { struct mbox_chan *chan = &mbox->chans[i]; - if (chan->active_req && chan->cl) { + if (chan->active_req != MBOX_NO_MSG && chan->cl) { txdone = chan->mbox->ops->last_tx_done(chan); if (txdone) tx_tick(chan, 0); @@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(mbox_chan_received_data); */ void mbox_chan_txdone(struct mbox_chan *chan, int r) { - if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { + if (unlikely(!(chan->txdone_method & MBOX_TXDONE_BY_IRQ))) { dev_err(chan->mbox->dev, "Controller can't run the TX ticker\n"); return; @@ -185,7 +183,7 @@ EXPORT_SYMBOL_GPL(mbox_chan_txdone); */ void mbox_client_txdone(struct mbox_chan *chan, int r) { - if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { + if (unlikely(!(chan->txdone_method & MBOX_TXDONE_BY_ACK))) { dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); return; } @@ -218,6 +216,29 @@ bool mbox_client_peek_data(struct mbox_chan *chan) } EXPORT_SYMBOL_GPL(mbox_client_peek_data); +/** + * mbox_chan_tx_slots_available - Query the number of available TX queue slots. + * @chan: Mailbox channel to query. + * + * Clients may call this to check how many messages can be queued via + * mbox_send_message() before the channel's TX queue is full. This helps + * clients avoid the -ENOBUFS error without needing to increase + * MBOX_TX_QUEUE_LEN. + * This can be called from atomic context. + * + * Return: Number of available slots in the channel's TX queue. + */ +unsigned int mbox_chan_tx_slots_available(struct mbox_chan *chan) +{ + unsigned int ret; + + guard(spinlock_irqsave)(&chan->lock); + ret = MBOX_TX_QUEUE_LEN - chan->msg_count; + + return ret; +} +EXPORT_SYMBOL_GPL(mbox_chan_tx_slots_available); + /** * mbox_send_message - For client to submit a message to be * sent to the remote. @@ -246,7 +267,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) { int t; - if (!chan || !chan->cl) + if (!chan || !chan->cl || mssg == MBOX_NO_MSG) return -EINVAL; t = add_to_rbuf(chan, mssg); @@ -319,12 +340,12 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) scoped_guard(spinlock_irqsave, &chan->lock) { chan->msg_free = 0; chan->msg_count = 0; - chan->active_req = NULL; + chan->active_req = MBOX_NO_MSG; chan->cl = cl; init_completion(&chan->tx_complete); - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) - chan->txdone_method = TXDONE_BY_ACK; + if (chan->txdone_method == MBOX_TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method = MBOX_TXDONE_BY_ACK; } if (chan->mbox->ops->startup) { @@ -341,7 +362,7 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) } /** - * mbox_bind_client - Request a mailbox channel. + * mbox_bind_client - Bind client to a mailbox channel. * @chan: The mailbox channel to bind the client to. * @cl: Identity of the client requesting the channel. * @@ -477,9 +498,9 @@ void mbox_free_channel(struct mbox_chan *chan) /* The queued TX requests are simply aborted, no callbacks are made */ scoped_guard(spinlock_irqsave, &chan->lock) { chan->cl = NULL; - chan->active_req = NULL; - if (chan->txdone_method == TXDONE_BY_ACK) - chan->txdone_method = TXDONE_BY_POLL; + chan->active_req = MBOX_NO_MSG; + if (chan->txdone_method == MBOX_TXDONE_BY_ACK) + chan->txdone_method = MBOX_TXDONE_BY_POLL; } module_put(chan->mbox->dev->driver->owner); @@ -505,18 +526,17 @@ int mbox_controller_register(struct mbox_controller *mbox) { int i, txdone; - /* Sanity check */ - if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) + if (!mbox || !mbox->dev || !mbox->ops || !mbox->chans || !mbox->num_chans) return -EINVAL; if (mbox->txdone_irq) - txdone = TXDONE_BY_IRQ; + txdone = MBOX_TXDONE_BY_IRQ; else if (mbox->txdone_poll) - txdone = TXDONE_BY_POLL; + txdone = MBOX_TXDONE_BY_POLL; else /* It has to be ACK then */ - txdone = TXDONE_BY_ACK; + txdone = MBOX_TXDONE_BY_ACK; - if (txdone == TXDONE_BY_POLL) { + if (txdone == MBOX_TXDONE_BY_POLL) { if (!mbox->ops->last_tx_done) { dev_err(mbox->dev, "last_tx_done method is absent\n"); @@ -532,6 +552,7 @@ int mbox_controller_register(struct mbox_controller *mbox) chan->cl = NULL; chan->mbox = mbox; + chan->active_req = MBOX_NO_MSG; chan->txdone_method = txdone; spin_lock_init(&chan->lock); } diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h deleted file mode 100644 index e1ec4efab693e4..00000000000000 --- a/drivers/mailbox/mailbox.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#ifndef __MAILBOX_H -#define __MAILBOX_H - -#include - -#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ -#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ -#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */ - -#endif /* __MAILBOX_H */ diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index d7c6b38888a379..e523c84b48088b 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -493,14 +493,14 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) if (curr_pa == end_pa - CMDQ_INST_SIZE || curr_pa == end_pa) { /* set to this task directly */ - writel(task->pa_base >> cmdq->pdata->shift, - thread->base + CMDQ_THR_CURR_ADDR); + gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); + writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); } else { cmdq_task_insert_into_thread(task); smp_mb(); /* modify jump before enable thread */ } - writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, - thread->base + CMDQ_THR_END_ADDR); + gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); + writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); cmdq_thread_resume(thread); } list_move_tail(&task->list_entry, &thread->task_busy_list); @@ -728,7 +728,7 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->mbox.ops = &cmdq_mbox_chan_ops; cmdq->mbox.of_xlate = cmdq_xlate; - /* make use of TXDONE_BY_ACK */ + /* make use of MBOX_TXDONE_BY_ACK */ cmdq->mbox.txdone_irq = false; cmdq->mbox.txdone_poll = false; diff --git a/drivers/mailbox/mtk-vcp-mailbox.c b/drivers/mailbox/mtk-vcp-mailbox.c index cedad575528fb4..1b291b8ea15ac0 100644 --- a/drivers/mailbox/mtk-vcp-mailbox.c +++ b/drivers/mailbox/mtk-vcp-mailbox.c @@ -50,7 +50,7 @@ static struct mbox_chan *mtk_vcp_mbox_xlate(struct mbox_controller *mbox, const struct of_phandle_args *sp) { if (sp->args_count) - return NULL; + return ERR_PTR(-EINVAL); return &mbox->chans[0]; } diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index d9f100c1889522..535ca8020877d2 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -22,8 +22,6 @@ #include #include -#include "mailbox.h" - #define MAILBOX_REVISION 0x000 #define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) #define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m)) @@ -240,7 +238,7 @@ static int omap_mbox_startup(struct omap_mbox *mbox) } if (mbox->send_no_irq) - mbox->chan->txdone_method = TXDONE_BY_ACK; + mbox->chan->txdone_method = MBOX_TXDONE_BY_ACK; omap_mbox_enable_irq(mbox, IRQ_RX); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 22e70af1ae5d14..636879ae1db76b 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -59,8 +59,6 @@ #include #include -#include "mailbox.h" - #define MBOX_IRQ_NAME "pcc-mbox" /** diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c index 4d966cb2ed0367..a1a7dee643567c 100644 --- a/drivers/mailbox/rockchip-mailbox.c +++ b/drivers/mailbox/rockchip-mailbox.c @@ -46,7 +46,7 @@ struct rockchip_mbox { /* The maximum size of buf for each channel */ u32 buf_size; - struct rockchip_mbox_chan *chans; + struct rockchip_mbox_chan chans[]; }; static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data) @@ -173,15 +173,10 @@ static int rockchip_mbox_probe(struct platform_device *pdev) drv_data = (const struct rockchip_mbox_data *) device_get_match_data(&pdev->dev); - mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL); + mb = devm_kzalloc(&pdev->dev, struct_size(mb, chans, drv_data->num_chans), GFP_KERNEL); if (!mb) return -ENOMEM; - mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans, - sizeof(*mb->chans), GFP_KERNEL); - if (!mb->chans) - return -ENOMEM; - mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans, sizeof(*mb->mbox.chans), GFP_KERNEL); if (!mb->mbox.chans) diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index ed9a0bb2bcd844..500fa77c7d53c9 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -16,8 +16,6 @@ #include -#include "mailbox.h" - #define HSP_INT_IE(x) (0x100 + ((x) * 4)) #define HSP_INT_IV 0x300 #define HSP_INT_IR 0x304 @@ -497,7 +495,7 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan, mbox_chan_txdone(chan, 0); /* Wait until channel is empty */ - if (chan->active_req != NULL) + if (chan->active_req != MBOX_NO_MSG) continue; return 0; @@ -516,7 +514,7 @@ static int tegra_hsp_mailbox_startup(struct mbox_chan *chan) struct tegra_hsp *hsp = mb->channel.hsp; unsigned long flags; - chan->txdone_method = TXDONE_BY_IRQ; + chan->txdone_method = MBOX_TXDONE_BY_IRQ; /* * Shared mailboxes start out as consumers by default. FULL and EMPTY diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 83378c033c7285..028b9ca8ce52db 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -216,6 +216,7 @@ struct bitmap { }; static struct workqueue_struct *md_bitmap_wq; +static struct attribute_group md_bitmap_internal_group; static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, int chunksize, bool init); @@ -2580,6 +2581,30 @@ static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize) return __bitmap_resize(bitmap, blocks, chunksize, false); } +static bool bitmap_none_enabled(void *data, bool flush) +{ + return false; +} + +static int bitmap_none_create(struct mddev *mddev) +{ + return 0; +} + +static int bitmap_none_load(struct mddev *mddev) +{ + return 0; +} + +static void bitmap_none_destroy(struct mddev *mddev) +{ +} + +static int bitmap_none_get_stats(void *data, struct md_bitmap_stats *stats) +{ + return -ENOENT; +} + static ssize_t location_show(struct mddev *mddev, char *page) { @@ -2618,7 +2643,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len) goto out; } - bitmap_destroy(mddev); + sysfs_unmerge_group(&mddev->kobj, &md_bitmap_internal_group); + md_bitmap_destroy_nosysfs(mddev); + mddev->bitmap_id = ID_BITMAP_NONE; + if (!mddev_set_bitmap_ops_nosysfs(mddev)) + goto none_err; mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; @@ -2654,16 +2683,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len) } mddev->bitmap_info.offset = offset; - rv = bitmap_create(mddev); + md_bitmap_destroy_nosysfs(mddev); + mddev->bitmap_id = ID_BITMAP; + if (!mddev_set_bitmap_ops_nosysfs(mddev)) + goto bitmap_err; + + rv = md_bitmap_create_nosysfs(mddev); if (rv) - goto out; + goto create_err; - rv = bitmap_load(mddev); + rv = mddev->bitmap_ops->load(mddev); if (rv) { mddev->bitmap_info.offset = 0; - bitmap_destroy(mddev); - goto out; + goto load_err; } + + rv = sysfs_merge_group(&mddev->kobj, + &md_bitmap_internal_group); + if (rv) + goto merge_err; } } if (!mddev->external) { @@ -2679,6 +2717,22 @@ location_store(struct mddev *mddev, const char *buf, size_t len) if (rv) return rv; return len; + +merge_err: + mddev->bitmap_info.offset = 0; +load_err: + md_bitmap_destroy_nosysfs(mddev); +create_err: + mddev->bitmap_info.offset = 0; + mddev->bitmap_id = ID_BITMAP_NONE; + if (!mddev_set_bitmap_ops_nosysfs(mddev)) + rv = -ENOENT; + goto out; +bitmap_err: + rv = -ENOENT; +none_err: + mddev->bitmap_info.offset = 0; + goto out; } static struct md_sysfs_entry bitmap_location = @@ -2955,8 +3009,12 @@ static struct md_sysfs_entry max_backlog_used = __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset); -static struct attribute *md_bitmap_attrs[] = { +static struct attribute *md_bitmap_common_attrs[] = { &bitmap_location.attr, + NULL +}; + +static struct attribute *md_bitmap_internal_attrs[] = { &bitmap_space.attr, &bitmap_timeout.attr, &bitmap_backlog.attr, @@ -2967,9 +3025,41 @@ static struct attribute *md_bitmap_attrs[] = { NULL }; -static struct attribute_group md_bitmap_group = { +static struct attribute_group md_bitmap_common_group = { .name = "bitmap", - .attrs = md_bitmap_attrs, + .attrs = md_bitmap_common_attrs, +}; + +static struct attribute_group md_bitmap_internal_group = { + .name = "bitmap", + .attrs = md_bitmap_internal_attrs, +}; + +static const struct attribute_group *bitmap_groups[] = { + &md_bitmap_common_group, + &md_bitmap_internal_group, + NULL, +}; + +static const struct attribute_group *bitmap_none_groups[] = { + &md_bitmap_common_group, + NULL, +}; + +static struct bitmap_operations bitmap_none_ops = { + .head = { + .type = MD_BITMAP, + .id = ID_BITMAP_NONE, + .name = "none", + }, + + .enabled = bitmap_none_enabled, + .create = bitmap_none_create, + .load = bitmap_none_load, + .destroy = bitmap_none_destroy, + .get_stats = bitmap_none_get_stats, + + .groups = bitmap_none_groups, }; static struct bitmap_operations bitmap_ops = { @@ -3013,21 +3103,38 @@ static struct bitmap_operations bitmap_ops = { .set_pages = bitmap_set_pages, .free = md_bitmap_free, - .group = &md_bitmap_group, + .groups = bitmap_groups, }; int md_bitmap_init(void) { + int err; + md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!md_bitmap_wq) return -ENOMEM; - return register_md_submodule(&bitmap_ops.head); + err = register_md_submodule(&bitmap_none_ops.head); + if (err) + goto err_wq; + + err = register_md_submodule(&bitmap_ops.head); + if (err) + goto err_none; + + return 0; + +err_none: + unregister_md_submodule(&bitmap_none_ops.head); +err_wq: + destroy_workqueue(md_bitmap_wq); + return err; } void md_bitmap_exit(void) { - destroy_workqueue(md_bitmap_wq); unregister_md_submodule(&bitmap_ops.head); + unregister_md_submodule(&bitmap_none_ops.head); + destroy_workqueue(md_bitmap_wq); } diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h index b42a28fa83a0f5..214f623c7e790b 100644 --- a/drivers/md/md-bitmap.h +++ b/drivers/md/md-bitmap.h @@ -125,7 +125,7 @@ struct bitmap_operations { void (*set_pages)(void *data, unsigned long pages); void (*free)(void *data); - struct attribute_group *group; + const struct attribute_group **groups; }; /* the bitmap API */ diff --git a/drivers/md/md-llbitmap.c b/drivers/md/md-llbitmap.c index 9e7e6b1a6f1575..1adc5b11782164 100644 --- a/drivers/md/md-llbitmap.c +++ b/drivers/md/md-llbitmap.c @@ -1738,6 +1738,11 @@ static struct attribute_group md_llbitmap_group = { .attrs = md_llbitmap_attrs, }; +static const struct attribute_group *md_llbitmap_groups[] = { + &md_llbitmap_group, + NULL, +}; + static struct bitmap_operations llbitmap_ops = { .head = { .type = MD_BITMAP, @@ -1774,7 +1779,7 @@ static struct bitmap_operations llbitmap_ops = { .dirty_bits = llbitmap_dirty_bits, .write_all = llbitmap_write_all, - .group = &md_llbitmap_group, + .groups = md_llbitmap_groups, }; int md_llbitmap_init(void) diff --git a/drivers/md/md.c b/drivers/md/md.c index 5fb5ae8368bac2..8b568eee87433a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -396,27 +396,19 @@ bool md_handle_request(struct mddev *mddev, struct bio *bio) { check_suspended: if (is_suspended(mddev, bio)) { - DEFINE_WAIT(__wait); /* Bail out if REQ_NOWAIT is set for the bio */ if (bio->bi_opf & REQ_NOWAIT) { bio_wouldblock_error(bio); return true; } - for (;;) { - prepare_to_wait(&mddev->sb_wait, &__wait, - TASK_UNINTERRUPTIBLE); - if (!is_suspended(mddev, bio)) - break; - schedule(); - } - finish_wait(&mddev->sb_wait, &__wait); + wait_event(mddev->sb_wait, !is_suspended(mddev, bio)); } if (!percpu_ref_tryget_live(&mddev->active_io)) goto check_suspended; if (!mddev->pers->make_request(mddev, bio)) { percpu_ref_put(&mddev->active_io); - if (!mddev->gendisk && mddev->pers->prepare_suspend) + if (mddev_is_dm(mddev) && mddev->pers->prepare_suspend) return false; goto check_suspended; } @@ -687,13 +679,38 @@ static void active_io_release(struct percpu_ref *ref) static void no_op(struct percpu_ref *r) {} -static bool mddev_set_bitmap_ops(struct mddev *mddev) +static void md_bitmap_sysfs_add(struct mddev *mddev) +{ + if (sysfs_update_groups(&mddev->kobj, mddev->bitmap_ops->groups)) + pr_warn("md: cannot register extra bitmap attributes for %s\n", + mdname(mddev)); + else + /* + * Inform user with KOBJ_CHANGE about new bitmap + * attributes. + */ + kobject_uevent(&mddev->kobj, KOBJ_CHANGE); +} + +static void md_bitmap_sysfs_del(struct mddev *mddev) +{ + int nr_groups = 0; + + for (nr_groups = 0; mddev->bitmap_ops->groups[nr_groups]; nr_groups++) + ; + + while (--nr_groups >= 1) + sysfs_unmerge_group(&mddev->kobj, + mddev->bitmap_ops->groups[nr_groups]); + sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->groups[0]); +} + +bool mddev_set_bitmap_ops_nosysfs(struct mddev *mddev) { - struct bitmap_operations *old = mddev->bitmap_ops; struct md_submodule_head *head; - if (mddev->bitmap_id == ID_BITMAP_NONE || - (old && old->head.id == mddev->bitmap_id)) + if (mddev->bitmap_ops && + mddev->bitmap_ops->head.id == mddev->bitmap_id) return true; xa_lock(&md_submodule); @@ -711,18 +728,6 @@ static bool mddev_set_bitmap_ops(struct mddev *mddev) mddev->bitmap_ops = (void *)head; xa_unlock(&md_submodule); - - if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) { - if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group)) - pr_warn("md: cannot register extra bitmap attributes for %s\n", - mdname(mddev)); - else - /* - * Inform user with KOBJ_CHANGE about new bitmap - * attributes. - */ - kobject_uevent(&mddev->kobj, KOBJ_CHANGE); - } return true; err: @@ -730,15 +735,6 @@ static bool mddev_set_bitmap_ops(struct mddev *mddev) return false; } -static void mddev_clear_bitmap_ops(struct mddev *mddev) -{ - if (!mddev_is_dm(mddev) && mddev->bitmap_ops && - mddev->bitmap_ops->group) - sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group); - - mddev->bitmap_ops = NULL; -} - int mddev_init(struct mddev *mddev) { int err = 0; @@ -4279,7 +4275,7 @@ bitmap_type_show(struct mddev *mddev, char *page) xa_lock(&md_submodule); xa_for_each(&md_submodule, i, head) { - if (head->type != MD_BITMAP) + if (head->type != MD_BITMAP || head->id == ID_BITMAP_NONE) continue; if (mddev->bitmap_id == head->id) @@ -6059,10 +6055,7 @@ static struct attribute *md_default_attrs[] = { &md_logical_block_size.attr, NULL, }; - -static const struct attribute_group md_default_group = { - .attrs = md_default_attrs, -}; +ATTRIBUTE_GROUPS(md_default); static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, @@ -6087,11 +6080,6 @@ static const struct attribute_group md_redundancy_group = { .attrs = md_redundancy_attrs, }; -static const struct attribute_group *md_attr_groups[] = { - &md_default_group, - NULL, -}; - static ssize_t md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { @@ -6174,7 +6162,7 @@ static const struct sysfs_ops md_sysfs_ops = { static const struct kobj_type md_ktype = { .release = md_kobj_release, .sysfs_ops = &md_sysfs_ops, - .default_groups = md_attr_groups, + .default_groups = md_default_groups, }; int mdp_major = 0; @@ -6539,7 +6527,7 @@ static enum md_submodule_id md_bitmap_get_id_from_sb(struct mddev *mddev) return id; } -static int md_bitmap_create(struct mddev *mddev) +int md_bitmap_create_nosysfs(struct mddev *mddev) { enum md_submodule_id orig_id = mddev->bitmap_id; enum md_submodule_id sb_id; @@ -6548,8 +6536,10 @@ static int md_bitmap_create(struct mddev *mddev) if (mddev->bitmap_id == ID_BITMAP_NONE) return -EINVAL; - if (!mddev_set_bitmap_ops(mddev)) + if (!mddev_set_bitmap_ops_nosysfs(mddev)) { + mddev->bitmap_id = orig_id; return -ENOENT; + } err = mddev->bitmap_ops->create(mddev); if (!err) @@ -6560,37 +6550,72 @@ static int md_bitmap_create(struct mddev *mddev) * doesn't match, and mdadm is not the latest version to set * bitmap_type, set bitmap_ops based on the disk version. */ - mddev_clear_bitmap_ops(mddev); + mddev->bitmap_ops = NULL; sb_id = md_bitmap_get_id_from_sb(mddev); - if (sb_id == ID_BITMAP_NONE || sb_id == orig_id) + if (sb_id == ID_BITMAP_NONE || sb_id == orig_id) { + mddev->bitmap_id = orig_id; return err; + } pr_info("md: %s: bitmap version mismatch, switching from %d to %d\n", mdname(mddev), orig_id, sb_id); mddev->bitmap_id = sb_id; - if (!mddev_set_bitmap_ops(mddev)) { + if (!mddev_set_bitmap_ops_nosysfs(mddev)) { mddev->bitmap_id = orig_id; return -ENOENT; } err = mddev->bitmap_ops->create(mddev); if (err) { - mddev_clear_bitmap_ops(mddev); + mddev->bitmap_ops = NULL; mddev->bitmap_id = orig_id; } return err; } -static void md_bitmap_destroy(struct mddev *mddev) +static int md_bitmap_create(struct mddev *mddev) +{ + int err; + + err = md_bitmap_create_nosysfs(mddev); + if (err) + return err; + + if (!mddev_is_dm(mddev) && mddev->bitmap_ops->groups) + md_bitmap_sysfs_add(mddev); + + return 0; +} + +void md_bitmap_destroy_nosysfs(struct mddev *mddev) { if (!md_bitmap_registered(mddev)) return; mddev->bitmap_ops->destroy(mddev); - mddev_clear_bitmap_ops(mddev); + mddev->bitmap_ops = NULL; +} + +static void md_bitmap_destroy(struct mddev *mddev) +{ + if (!mddev_is_dm(mddev) && mddev->bitmap_ops && + mddev->bitmap_ops->groups) + md_bitmap_sysfs_del(mddev); + + md_bitmap_destroy_nosysfs(mddev); +} + +static void md_bitmap_set_none(struct mddev *mddev) +{ + mddev->bitmap_id = ID_BITMAP_NONE; + if (!mddev_set_bitmap_ops_nosysfs(mddev)) + return; + + if (!mddev_is_dm(mddev) && mddev->bitmap_ops->groups) + md_bitmap_sysfs_add(mddev); } int md_run(struct mddev *mddev) @@ -6713,7 +6738,7 @@ int md_run(struct mddev *mddev) } /* dm-raid expect sync_thread to be frozen until resume */ - if (mddev->gendisk) + if (!mddev_is_dm(mddev)) mddev->recovery = 0; /* may be over-ridden by personality */ @@ -6802,6 +6827,10 @@ int md_run(struct mddev *mddev) if (mddev->sb_flags) md_update_sb(mddev, 0); + if (IS_ENABLED(CONFIG_MD_BITMAP) && !mddev->bitmap_info.file && + !mddev->bitmap_info.offset) + md_bitmap_set_none(mddev); + md_new_event(); return 0; @@ -7747,7 +7776,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd) { int err = 0; - if (!md_bitmap_registered(mddev)) + if (!md_bitmap_registered(mddev) || + mddev->bitmap_id == ID_BITMAP_NONE) return -EINVAL; if (mddev->pers) { @@ -7812,10 +7842,12 @@ static int set_bitmap_file(struct mddev *mddev, int fd) if (err) { md_bitmap_destroy(mddev); + md_bitmap_set_none(mddev); fd = -1; } } else if (fd < 0) { md_bitmap_destroy(mddev); + md_bitmap_set_none(mddev); } } @@ -8122,12 +8154,16 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->bitmap_info.default_offset; mddev->bitmap_info.space = mddev->bitmap_info.default_space; + mddev->bitmap_id = ID_BITMAP; rv = md_bitmap_create(mddev); if (!rv) rv = mddev->bitmap_ops->load(mddev); - if (rv) + if (rv) { md_bitmap_destroy(mddev); + mddev->bitmap_info.offset = 0; + md_bitmap_set_none(mddev); + } } else { struct md_bitmap_stats stats; @@ -8155,6 +8191,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) } md_bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; + md_bitmap_set_none(mddev); } } md_update_sb(mddev, 1); @@ -9341,9 +9378,11 @@ static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone) static void md_end_clone_io(struct bio *bio) { - struct md_io_clone *md_io_clone = bio->bi_private; + struct md_io_clone *md_io_clone = container_of(bio, struct md_io_clone, + bio_clone); struct bio *orig_bio = md_io_clone->orig_bio; struct mddev *mddev = md_io_clone->mddev; + struct completion *reshape_completion = bio->bi_private; if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) md_bitmap_end(mddev, md_io_clone); @@ -9355,7 +9394,10 @@ static void md_end_clone_io(struct bio *bio) bio_end_io_acct(orig_bio, md_io_clone->start_time); bio_put(bio); - bio_endio(orig_bio); + if (unlikely(reshape_completion)) + complete(reshape_completion); + else + bio_endio(orig_bio); percpu_ref_put(&mddev->active_io); } @@ -9380,7 +9422,7 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio) } clone->bi_end_io = md_end_clone_io; - clone->bi_private = md_io_clone; + clone->bi_private = NULL; *bio = clone; } @@ -9391,26 +9433,6 @@ void md_account_bio(struct mddev *mddev, struct bio **bio) } EXPORT_SYMBOL_GPL(md_account_bio); -void md_free_cloned_bio(struct bio *bio) -{ - struct md_io_clone *md_io_clone = bio->bi_private; - struct bio *orig_bio = md_io_clone->orig_bio; - struct mddev *mddev = md_io_clone->mddev; - - if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) - md_bitmap_end(mddev, md_io_clone); - - if (bio->bi_status && !orig_bio->bi_status) - orig_bio->bi_status = bio->bi_status; - - if (md_io_clone->start_time) - bio_end_io_acct(orig_bio, md_io_clone->start_time); - - bio_put(bio); - percpu_ref_put(&mddev->active_io); -} -EXPORT_SYMBOL_GPL(md_free_cloned_bio); - /* md_allow_write(mddev) * Calling this ensures that the array is marked 'active' so that writes * may proceed without blocking. It is important to call this before diff --git a/drivers/md/md.h b/drivers/md/md.h index d6f5482e247908..52c37808604646 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -920,7 +920,6 @@ extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); void md_account_bio(struct mddev *mddev, struct bio **bio); -void md_free_cloned_bio(struct bio *bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev, @@ -935,6 +934,9 @@ extern void md_allow_write(struct mddev *mddev); extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern int md_check_no_bitmap(struct mddev *mddev); +bool mddev_set_bitmap_ops_nosysfs(struct mddev *mddev); +int md_bitmap_create_nosysfs(struct mddev *mddev); +void md_bitmap_destroy_nosysfs(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); @@ -1015,7 +1017,7 @@ static inline int mddev_suspend_and_lock(struct mddev *mddev) static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev) { mddev_suspend(mddev, false); - mutex_lock(&mddev->reconfig_mutex); + mddev_lock_nointr(mddev); } static inline void mddev_unlock_and_resume(struct mddev *mddev) diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 942cd47eb52dac..aeec5b9a1dd5c5 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -490,12 +490,20 @@ static int rebalance_children(struct shadow_spine *s, if (le32_to_cpu(n->header.nr_entries) == 1) { struct dm_block *child; + int is_shared; dm_block_t b = value64(n, 0); + r = dm_tm_block_is_shared(info->tm, b, &is_shared); + if (r) + return r; + r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); if (r) return r; + if (is_shared) + inc_children(info->tm, dm_block_data(child), vt); + memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index c33099925f230e..56a56a4da4f83f 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -293,8 +293,13 @@ static inline bool raid1_should_read_first(struct mddev *mddev, * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is * submitted to the underlying disks, hence don't record badblocks or retry * in this case. + * + * BLK_STS_INVAL means the bio was not valid for the underlying device. This + * is a user error, not a device failure, so retrying or recording bad blocks + * would be wrong. */ static inline bool raid1_should_handle_error(struct bio *bio) { - return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT)); + return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT)) && + bio->bi_status != BLK_STS_INVAL; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ba91f7e61920d4..64d970e2ef50fd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1510,21 +1510,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mddev->cluster_ops->area_resyncing(mddev, WRITE, bio->bi_iter.bi_sector, bio_end_sector(bio))) { - DEFINE_WAIT(w); if (bio->bi_opf & REQ_NOWAIT) { bio_wouldblock_error(bio); return; } - for (;;) { - prepare_to_wait(&conf->wait_barrier, - &w, TASK_IDLE); - if (!mddev->cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, - bio_end_sector(bio))) - break; - schedule(); - } - finish_wait(&conf->wait_barrier, &w); + wait_event_idle(conf->wait_barrier, + !mddev->cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, + bio_end_sector(bio))); } /* diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4901ebe45c8755..39085e7dd6d26d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3791,6 +3791,8 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) nc = layout & 255; fc = (layout >> 8) & 255; fo = layout & (1<<16); + if (!nc || !fc) + return -1; geo->raid_disks = disks; geo->near_copies = nc; geo->far_copies = fc; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6e79829c5acb67..0d76e82f4506e6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6217,7 +6217,12 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) mempool_free(ctx, conf->ctx_pool); if (res == STRIPE_WAIT_RESHAPE) { - md_free_cloned_bio(bi); + DECLARE_COMPLETION_ONSTACK(done); + WRITE_ONCE(bi->bi_private, &done); + + bio_endio(bi); + + wait_for_completion(&done); return false; } diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 8aa3753aaaa1d3..0b076790bd9df6 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -100,6 +100,17 @@ spinand_fill_page_read_op(struct spinand_device *spinand, u64 addr) return op; } +static struct spi_mem_op +spinand_fill_page_read_packed_op(struct spinand_device *spinand, u64 addr) +{ + struct spi_mem_op op = spinand->op_templates->page_read; + + op.cmd.opcode |= addr >> 16; + op.addr.val = addr & 0xFFFF; + + return op; +} + struct spi_mem_op spinand_fill_prog_exec_op(struct spinand_device *spinand, u64 addr) { @@ -453,7 +464,10 @@ static int spinand_load_page_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, &req->pos); - struct spi_mem_op op = SPINAND_OP(spinand, page_read, row); + bool packed = spinand->flags & SPINAND_ODTR_PACKED_PAGE_READ; + struct spi_mem_op op = packed ? + SPINAND_OP(spinand, page_read_packed, row) : + SPINAND_OP(spinand, page_read, row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -1489,9 +1503,13 @@ static int spinand_init_odtr_instruction_set(struct spinand_device *spinand) if (!spi_mem_supports_op(spinand->spimem, &tmpl->blk_erase)) return -EOPNOTSUPP; - tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_8D_8D_0_OP(0); - if (!spi_mem_supports_op(spinand->spimem, &tmpl->page_read)) + if (spinand->flags & SPINAND_ODTR_PACKED_PAGE_READ) + tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_PACKED_8D_8D_0_OP(0); + else + tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_8D_8D_0_OP(0); + if (!spi_mem_supports_op(spinand->spimem, &tmpl->page_read)) { return -EOPNOTSUPP; + } tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_8D_8D_0_OP(0); if (!spi_mem_supports_op(spinand->spimem, &tmpl->prog_exec)) diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index ad22774096e612..7cc0f0091430c1 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -99,7 +99,7 @@ static SPINAND_OP_VARIANTS(update_cache_variants, #define SPINAND_WINBOND_WRITE_VCR_8D_8D_8D(reg, buf) \ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x81, 8), \ - SPI_MEM_DTR_OP_ADDR(4, reg, 8), \ + SPI_MEM_DTR_OP_ADDR(4, reg << 8, 8), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_DTR_OP_DATA_OUT(2, buf, 8)) @@ -518,7 +518,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, &update_cache_octal_variants), - 0, + SPINAND_ODTR_PACKED_PAGE_READ, SPINAND_INFO_VENDOR_OPS(&winbond_w35_ops), SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL), SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)), @@ -529,7 +529,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, &write_cache_octal_variants, &update_cache_octal_variants), - 0, + SPINAND_ODTR_PACKED_PAGE_READ, SPINAND_INFO_VENDOR_OPS(&winbond_w35_ops), SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL), SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)), diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c index fa6956144d2e44..14ba1680c31547 100644 --- a/drivers/mtd/spi-nor/debugfs.c +++ b/drivers/mtd/spi-nor/debugfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include @@ -92,7 +93,8 @@ static int spi_nor_params_show(struct seq_file *s, void *data) seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes); seq_puts(s, "flags\t\t"); - spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names)); + spi_nor_print_flags(s, nor->flags, snor_f_names, + ARRAY_SIZE(snor_f_names)); seq_puts(s, "\n"); seq_puts(s, "\nopcodes\n"); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 0df3208783ad9e..da5866ba069995 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -529,6 +529,9 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, struct in6_addr saddr; struct socket *sock = rcu_dereference(bareudp->sock); + if (!sock) + return -ESHUTDOWN; + dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, &info->key, sport, bareudp->port, info->key.tos, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index af7f74cfdc086c..f0aa7d2f21717a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1029,6 +1029,7 @@ static void ad_cond_set_peer_notif(struct port *port) static void ad_mux_machine(struct port *port, bool *update_slave_arr) { struct bonding *bond = __get_bond_by_port(port); + struct aggregator *aggregator; mux_states_t last_state; /* keep current State Machine state to compare later if it was @@ -1036,6 +1037,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) */ last_state = port->sm_mux_state; + aggregator = rcu_dereference(port->aggregator); if (port->sm_vars & AD_PORT_BEGIN) { port->sm_mux_state = AD_MUX_DETACHED; } else { @@ -1055,7 +1057,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) * cycle to update ready variable, we check * READY_N and update READY here */ - __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); + __set_agg_ports_ready(aggregator, __agg_ports_are_ready(aggregator)); port->sm_mux_state = AD_MUX_DETACHED; break; } @@ -1070,7 +1072,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) * update ready variable, we check READY_N and update * READY here */ - __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); + __set_agg_ports_ready(aggregator, __agg_ports_are_ready(aggregator)); /* if the wait_while_timer expired, and the port is * in READY state, move to ATTACHED state @@ -1086,7 +1088,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) { - if (port->aggregator->is_active) { + if (aggregator->is_active) { int state = AD_MUX_COLLECTING_DISTRIBUTING; if (!bond->params.coupled_control) @@ -1102,9 +1104,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) * cycle to update ready variable, we check * READY_N and update READY here */ - __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); + __set_agg_ports_ready(aggregator, __agg_ports_are_ready(aggregator)); port->sm_mux_state = AD_MUX_DETACHED; - } else if (port->aggregator->is_active) { + } else if (aggregator->is_active) { port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; } @@ -1115,7 +1117,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) * sure that a collecting distributing * port in an active aggregator is enabled */ - if (port->aggregator->is_active && + if (aggregator->is_active && !__port_is_collecting_distributing(port)) { __enable_port(port); *update_slave_arr = true; @@ -1134,7 +1136,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) */ struct slave *slave = port->slave; - if (port->aggregator->is_active && + if (aggregator->is_active && bond_is_slave_rx_disabled(slave)) { ad_enable_collecting(port); *update_slave_arr = true; @@ -1154,8 +1156,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) * sure that a collecting distributing * port in an active aggregator is enabled */ - if (port->aggregator && - port->aggregator->is_active && + if (aggregator && + aggregator->is_active && !__port_is_collecting_distributing(port)) { __enable_port(port); *update_slave_arr = true; @@ -1187,7 +1189,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0); break; case AD_MUX_ATTACHED: - if (port->aggregator->is_active) + if (aggregator->is_active) port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; else @@ -1561,9 +1563,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) bond = __get_bond_by_port(port); /* if the port is connected to other aggregator, detach it */ - if (port->aggregator) { + temp_aggregator = rcu_dereference(port->aggregator); + if (temp_aggregator) { /* detach the port from its former aggregator */ - temp_aggregator = port->aggregator; for (curr_port = temp_aggregator->lag_ports; curr_port; last_port = curr_port, curr_port = curr_port->next_port_in_aggregator) { @@ -1586,7 +1588,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) /* clear the port's relations to this * aggregator */ - port->aggregator = NULL; + RCU_INIT_POINTER(port->aggregator, NULL); port->next_port_in_aggregator = NULL; port->actor_port_aggregator_identifier = 0; @@ -1609,7 +1611,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) port->slave->bond->dev->name, port->slave->dev->name, port->actor_port_number, - port->aggregator->aggregator_identifier); + temp_aggregator->aggregator_identifier); } } /* search on all aggregators for a suitable aggregator for this port */ @@ -1633,15 +1635,15 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) ) ) { /* attach to the founded aggregator */ - port->aggregator = aggregator; + rcu_assign_pointer(port->aggregator, aggregator); port->actor_port_aggregator_identifier = - port->aggregator->aggregator_identifier; + aggregator->aggregator_identifier; port->next_port_in_aggregator = aggregator->lag_ports; - port->aggregator->num_of_ports++; + aggregator->num_of_ports++; aggregator->lag_ports = port; slave_dbg(bond->dev, slave->dev, "Port %d joined LAG %d (existing LAG)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + aggregator->aggregator_identifier); /* mark this port as selected */ port->sm_vars |= AD_PORT_SELECTED; @@ -1656,39 +1658,40 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) if (!found) { if (free_aggregator) { /* assign port a new aggregator */ - port->aggregator = free_aggregator; port->actor_port_aggregator_identifier = - port->aggregator->aggregator_identifier; + free_aggregator->aggregator_identifier; /* update the new aggregator's parameters * if port was responsed from the end-user */ if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS) /* if port is full duplex */ - port->aggregator->is_individual = false; + free_aggregator->is_individual = false; else - port->aggregator->is_individual = true; + free_aggregator->is_individual = true; - port->aggregator->actor_admin_aggregator_key = + free_aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; - port->aggregator->actor_oper_aggregator_key = + free_aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; - port->aggregator->partner_system = + free_aggregator->partner_system = port->partner_oper.system; - port->aggregator->partner_system_priority = + free_aggregator->partner_system_priority = port->partner_oper.system_priority; - port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; - port->aggregator->receive_state = 1; - port->aggregator->transmit_state = 1; - port->aggregator->lag_ports = port; - port->aggregator->num_of_ports++; + free_aggregator->partner_oper_aggregator_key = port->partner_oper.key; + free_aggregator->receive_state = 1; + free_aggregator->transmit_state = 1; + free_aggregator->lag_ports = port; + free_aggregator->num_of_ports++; + + rcu_assign_pointer(port->aggregator, free_aggregator); /* mark this port as selected */ port->sm_vars |= AD_PORT_SELECTED; slave_dbg(bond->dev, port->slave->dev, "Port %d joined LAG %d (new LAG)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + free_aggregator->aggregator_identifier); } else { slave_err(bond->dev, port->slave->dev, "Port %d did not find a suitable aggregator\n", @@ -1700,13 +1703,12 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) * in all aggregator's ports, else set ready=FALSE in all * aggregator's ports */ - __set_agg_ports_ready(port->aggregator, - __agg_ports_are_ready(port->aggregator)); + aggregator = rcu_dereference(port->aggregator); + __set_agg_ports_ready(aggregator, __agg_ports_are_ready(aggregator)); - aggregator = __get_first_agg(port); - ad_agg_selection_logic(aggregator, update_slave_arr); + ad_agg_selection_logic(__get_first_agg(port), update_slave_arr); - if (!port->aggregator->is_active) + if (!aggregator->is_active) port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION; } @@ -2075,13 +2077,15 @@ static void ad_initialize_port(struct port *port, const struct bond_params *bond */ static void ad_enable_collecting(struct port *port) { - if (port->aggregator->is_active) { + struct aggregator *aggregator = rcu_dereference(port->aggregator); + + if (aggregator->is_active) { struct slave *slave = port->slave; slave_dbg(slave->bond->dev, slave->dev, "Enabling collecting on port %d (LAG %d)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + aggregator->aggregator_identifier); __enable_collecting_port(port); } } @@ -2093,11 +2097,13 @@ static void ad_enable_collecting(struct port *port) */ static void ad_disable_distributing(struct port *port, bool *update_slave_arr) { - if (port->aggregator && __agg_has_partner(port->aggregator)) { + struct aggregator *aggregator = rcu_dereference(port->aggregator); + + if (aggregator && __agg_has_partner(aggregator)) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Disabling distributing on port %d (LAG %d)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + aggregator->aggregator_identifier); __disable_distributing_port(port); /* Slave array needs an update */ *update_slave_arr = true; @@ -2114,11 +2120,13 @@ static void ad_disable_distributing(struct port *port, bool *update_slave_arr) static void ad_enable_collecting_distributing(struct port *port, bool *update_slave_arr) { - if (port->aggregator->is_active) { + struct aggregator *aggregator = rcu_dereference(port->aggregator); + + if (aggregator->is_active) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Enabling port %d (LAG %d)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + aggregator->aggregator_identifier); __enable_port(port); /* Slave array needs update */ *update_slave_arr = true; @@ -2135,11 +2143,13 @@ static void ad_enable_collecting_distributing(struct port *port, static void ad_disable_collecting_distributing(struct port *port, bool *update_slave_arr) { - if (port->aggregator && __agg_has_partner(port->aggregator)) { + struct aggregator *aggregator = rcu_dereference(port->aggregator); + + if (aggregator && __agg_has_partner(aggregator)) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Disabling port %d (LAG %d)\n", port->actor_port_number, - port->aggregator->aggregator_identifier); + aggregator->aggregator_identifier); __disable_port(port); /* Slave array needs an update */ *update_slave_arr = true; @@ -2379,7 +2389,7 @@ void bond_3ad_unbind_slave(struct slave *slave) */ for (temp_port = aggregator->lag_ports; temp_port; temp_port = temp_port->next_port_in_aggregator) { - temp_port->aggregator = new_aggregator; + rcu_assign_pointer(temp_port->aggregator, new_aggregator); temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; } @@ -2848,15 +2858,16 @@ int bond_3ad_set_carrier(struct bonding *bond) int __bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { - struct aggregator *aggregator = NULL; + struct aggregator *aggregator = NULL, *tmp; struct list_head *iter; struct slave *slave; struct port *port; bond_for_each_slave_rcu(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); - if (port->aggregator && port->aggregator->is_active) { - aggregator = port->aggregator; + tmp = rcu_dereference(port->aggregator); + if (tmp && tmp->is_active) { + aggregator = tmp; break; } } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c7baa5c4bf40ad..af82a3df2c5d38 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1433,7 +1433,7 @@ static void bond_poll_controller(struct net_device *bond_dev) if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct aggregator *agg = - SLAVE_AD_INFO(slave)->port.aggregator; + rcu_dereference(SLAVE_AD_INFO(slave)->port.aggregator); if (agg && agg->aggregator_identifier != ad_info.aggregator_id) @@ -5179,15 +5179,16 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) spin_unlock_bh(&bond->mode_lock); agg_id = ad_info.aggregator_id; } + rcu_read_lock(); bond_for_each_slave(bond, slave, iter) { if (skipslave == slave) continue; all_slaves->arr[all_slaves->count++] = slave; if (BOND_MODE(bond) == BOND_MODE_8023AD) { - struct aggregator *agg; + const struct aggregator *agg; - agg = SLAVE_AD_INFO(slave)->port.aggregator; + agg = rcu_dereference(SLAVE_AD_INFO(slave)->port.aggregator); if (!agg || agg->aggregator_identifier != agg_id) continue; } @@ -5199,6 +5200,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) usable_slaves->arr[usable_slaves->count++] = slave; } + rcu_read_unlock(); bond_set_slave_arr(bond, usable_slaves, all_slaves); return ret; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index ea1a80e658aeb2..c7d3e0602c831d 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -66,27 +66,29 @@ static int bond_fill_slave_info(struct sk_buff *skb, const struct port *ad_port; ad_port = &SLAVE_AD_INFO(slave)->port; - agg = SLAVE_AD_INFO(slave)->port.aggregator; + rcu_read_lock(); + agg = rcu_dereference(SLAVE_AD_INFO(slave)->port.aggregator); if (agg) { if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, agg->aggregator_identifier)) - goto nla_put_failure; + goto nla_put_failure_rcu; if (nla_put_u8(skb, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, ad_port->actor_oper_port_state)) - goto nla_put_failure; + goto nla_put_failure_rcu; if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, ad_port->partner_oper.port_state)) - goto nla_put_failure; + goto nla_put_failure_rcu; if (nla_put_u8(skb, IFLA_BOND_SLAVE_AD_CHURN_ACTOR_STATE, ad_port->sm_churn_actor_state)) - goto nla_put_failure; + goto nla_put_failure_rcu; if (nla_put_u8(skb, IFLA_BOND_SLAVE_AD_CHURN_PARTNER_STATE, ad_port->sm_churn_partner_state)) - goto nla_put_failure; + goto nla_put_failure_rcu; } + rcu_read_unlock(); if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO, SLAVE_AD_INFO(slave)->port_priority)) @@ -95,6 +97,8 @@ static int bond_fill_slave_info(struct sk_buff *skb, return 0; +nla_put_failure_rcu: + rcu_read_unlock(); nla_put_failure: return -EMSGSIZE; } diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index e34f8030519135..3714aab1a3d9c5 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -188,6 +188,7 @@ static void bond_info_show_master(struct seq_file *seq) } } +/* Note: runs under rcu_read_lock() */ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave) { @@ -214,7 +215,7 @@ static void bond_info_show_slave(struct seq_file *seq, if (BOND_MODE(bond) == BOND_MODE_8023AD) { const struct port *port = &SLAVE_AD_INFO(slave)->port; - const struct aggregator *agg = port->aggregator; + const struct aggregator *agg = rcu_dereference(port->aggregator); if (agg) { seq_printf(seq, "Aggregator ID: %d\n", diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 36d0e8440b5b94..fc6fe7181789d3 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -62,10 +62,15 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf) const struct aggregator *agg; if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) { - agg = SLAVE_AD_INFO(slave)->port.aggregator; - if (agg) - return sysfs_emit(buf, "%d\n", - agg->aggregator_identifier); + rcu_read_lock(); + agg = rcu_dereference(SLAVE_AD_INFO(slave)->port.aggregator); + if (agg) { + ssize_t res = sysfs_emit(buf, "%d\n", + agg->aggregator_identifier); + rcu_read_unlock(); + return res; + } + rcu_read_unlock(); } return sysfs_emit(buf, "N/A\n"); @@ -78,7 +83,7 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf) if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) { ad_port = &SLAVE_AD_INFO(slave)->port; - if (ad_port->aggregator) + if (rcu_access_pointer(ad_port->aggregator)) return sysfs_emit(buf, "%u\n", ad_port->actor_oper_port_state); } @@ -93,7 +98,7 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf) if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) { ad_port = &SLAVE_AD_INFO(slave)->port; - if (ad_port->aggregator) + if (rcu_access_pointer(ad_port->aggregator)) return sysfs_emit(buf, "%u\n", ad_port->partner_oper.port_state); } diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 2bb0a3ff981008..f8b3d53bccadbd 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -847,13 +847,24 @@ static void airoha_qdma_wake_netdev_txqs(struct airoha_queue *q) { struct airoha_qdma *qdma = q->qdma; struct airoha_eth *eth = qdma->eth; - int i; + int i, qid = q - &qdma->q_tx[0]; for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; + int j; + + if (!port) + continue; + + if (port->qdma != qdma) + continue; - if (port && port->qdma == qdma) - netif_tx_wake_all_queues(port->dev); + for (j = 0; j < port->dev->num_tx_queues; j++) { + if (airoha_qdma_get_txq(qdma, j) != qid) + continue; + + netif_wake_subqueue(port->dev, j); + } } q->txq_stopped = false; } @@ -929,10 +940,9 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) q->queued--; if (skb) { - u16 queue = skb_get_queue_mapping(skb); struct netdev_queue *txq; - txq = netdev_get_tx_queue(skb->dev, queue); + txq = skb_get_tx_queue(skb->dev, skb); netdev_tx_completed_queue(txq, 1, skb->len); dev_kfree_skb_any(skb); } @@ -1737,14 +1747,11 @@ static int airoha_dev_stop(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_qdma *qdma = port->qdma; - int i, err; + int i; netif_tx_disable(dev); - err = airoha_set_vip_for_gdm_port(port, false); - if (err) - return err; - - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) + airoha_set_vip_for_gdm_port(port, false); + for (i = 0; i < dev->num_tx_queues; i++) netdev_tx_reset_subqueue(dev, i); airoha_set_gdm_port_fwd_cfg(qdma->eth, REG_GDM_FWD_CFG(port->id), @@ -1997,12 +2004,12 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct netdev_queue *txq; struct airoha_queue *q; LIST_HEAD(tx_list); + int i = 0, qid; void *data; - int i, qid; u16 index; u8 fport; - qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); + qid = airoha_qdma_get_txq(qdma, skb_get_queue_mapping(skb)); tag = airoha_get_dsa_tag(skb, dev); msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, @@ -2039,7 +2046,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, spin_lock_bh(&q->lock); - txq = netdev_get_tx_queue(dev, qid); + txq = skb_get_tx_queue(dev, skb); nr_frags = 1 + skb_shinfo(skb)->nr_frags; if (q->queued + nr_frags >= q->ndesc) { @@ -2057,7 +2064,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, list); index = e - q->entry; - for (i = 0; i < nr_frags; i++) { + while (true) { struct airoha_qdma_desc *desc = &q->desc[index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr; @@ -2069,7 +2076,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, goto error_unmap; list_move_tail(&e->list, &tx_list); - e->skb = i ? NULL : skb; + e->skb = i == nr_frags - 1 ? skb : NULL; e->dma_addr = addr; e->dma_len = len; @@ -2088,6 +2095,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); + if (++i == nr_frags) + break; + data = skb_frag_address(frag); len = skb_frag_size(frag); } @@ -2095,17 +2105,16 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); netdev_tx_sent_queue(txq, skb->len); + if (q->ndesc - q->queued < q->free_thr) { + netif_tx_stop_queue(txq); + q->txq_stopped = true; + } if (netif_xmit_stopped(txq) || !netdev_xmit_more()) airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); - if (q->ndesc - q->queued < q->free_thr) { - netif_tx_stop_queue(txq); - q->txq_stopped = true; - } - spin_unlock_bh(&q->lock); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index e389d2fe3b86f0..4fad3acc3ccf3e 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -631,6 +631,11 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val); #define airoha_qdma_clear(qdma, offset, val) \ airoha_rmw((qdma)->regs, (offset), (val), 0) +static inline u16 airoha_qdma_get_txq(struct airoha_qdma *qdma, u16 qid) +{ + return qid % ARRAY_SIZE(qdma->q_tx); +} + static inline bool airoha_is_lan_gdm_port(struct airoha_gdm_port *port) { /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 58cc3147afe26f..73e051d26b9d88 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1756,6 +1756,27 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p) return 0; } +static netdev_features_t ibmveth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* Some physical adapters do not support segmentation offload with + * MSS < 224. Disable GSO for such packets to avoid adapter freeze. + * Note: Single-segment packets (gso_segs == 1) don't need this check + * as they bypass the LSO path and are transmitted without segmentation. + */ + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_size < IBMVETH_MIN_LSO_MSS) { + netdev_warn_once(dev, + "MSS %u too small for LSO, disabling GSO\n", + skb_shinfo(skb)->gso_size); + features &= ~NETIF_F_GSO_MASK; + } + } + + return vlan_features_check(skb, features); +} + static const struct net_device_ops ibmveth_netdev_ops = { .ndo_open = ibmveth_open, .ndo_stop = ibmveth_close, @@ -1767,6 +1788,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_set_features = ibmveth_set_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ibmveth_set_mac_addr, + .ndo_features_check = ibmveth_features_check, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmveth_poll_controller, #endif diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 068f99df133ec0..d87713668ed300 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -37,6 +37,7 @@ #define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL #define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL +#define IBMVETH_MIN_LSO_MSS 224 /* Minimum MSS for LSO */ /* hcall macros */ #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e9fb0a0919e376..050f8241ef5e6b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -158,11 +158,10 @@ struct iavf_vlan { enum iavf_vlan_state_t { IAVF_VLAN_INVALID, IAVF_VLAN_ADD, /* filter needs to be added */ - IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */ - IAVF_VLAN_ACTIVE, /* filter is accepted by PF */ - IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */ - IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */ - IAVF_VLAN_REMOVE, /* filter needs to be removed from list */ + IAVF_VLAN_ADDING, /* ADD sent to PF, waiting for response */ + IAVF_VLAN_ACTIVE, /* PF confirmed, filter is in HW */ + IAVF_VLAN_REMOVE, /* filter queued for DEL from PF */ + IAVF_VLAN_REMOVING, /* DEL sent to PF, waiting for response */ }; struct iavf_vlan_filter { diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 3c1465cf05159a..d2914c511e1e03 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -757,10 +757,10 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); } else if (f->state == IAVF_VLAN_REMOVE) { - /* Re-add the filter since we cannot tell whether the - * pending delete has already been processed by the PF. - * A duplicate add is harmless. - */ + /* DEL not yet sent to PF, cancel it */ + f->state = IAVF_VLAN_ACTIVE; + } else if (f->state == IAVF_VLAN_REMOVING) { + /* DEL already sent to PF, re-add after completion */ f->state = IAVF_VLAN_ADD; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); @@ -791,37 +791,19 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) list_del(&f->list); kfree(f); adapter->num_vlan_filters--; - } else { + } else if (f->state != IAVF_VLAN_REMOVING) { f->state = IAVF_VLAN_REMOVE; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); } + /* If REMOVING, DEL is already sent to PF; completion + * handler will free the filter when PF confirms. + */ } spin_unlock_bh(&adapter->mac_vlan_list_lock); } -/** - * iavf_restore_filters - * @adapter: board private structure - * - * Restore existing non MAC filters when VF netdev comes back up - **/ -static void iavf_restore_filters(struct iavf_adapter *adapter) -{ - struct iavf_vlan_filter *f; - - /* re-add all VLAN filters */ - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->state == IAVF_VLAN_INACTIVE) - f->state = IAVF_VLAN_ADD; - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; -} /** * iavf_get_num_vlans_added - get number of VLANs added @@ -1246,13 +1228,12 @@ static void iavf_up_complete(struct iavf_adapter *adapter) } /** - * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF - * yet and mark other to be removed. + * iavf_clear_mac_filters - Remove MAC filters not sent to PF yet and mark + * others to be removed. * @adapter: board private structure **/ -static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) +static void iavf_clear_mac_filters(struct iavf_adapter *adapter) { - struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1271,11 +1252,6 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) } } - /* disable all VLAN filters */ - list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, - list) - vlf->state = IAVF_VLAN_DISABLE; - spin_unlock_bh(&adapter->mac_vlan_list_lock); } @@ -1371,7 +1347,7 @@ void iavf_down(struct iavf_adapter *adapter) iavf_napi_disable_all(adapter); iavf_irq_disable(adapter); - iavf_clear_mac_vlan_filters(adapter); + iavf_clear_mac_filters(adapter); iavf_clear_cloud_filters(adapter); iavf_clear_fdir_filters(adapter); iavf_clear_adv_rss_conf(adapter); @@ -1388,8 +1364,6 @@ void iavf_down(struct iavf_adapter *adapter) */ if (!list_empty(&adapter->mac_filter_list)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; - if (!list_empty(&adapter->vlan_filter_list)) - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; if (!list_empty(&adapter->cloud_filter_list)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; if (!list_empty(&adapter->fdir_list_head)) @@ -4494,8 +4468,6 @@ static int iavf_open(struct net_device *netdev) iavf_add_filter(adapter, adapter->hw.mac.addr); spin_unlock_bh(&adapter->mac_vlan_list_lock); - /* Restore filters that were removed with IFF_DOWN */ - iavf_restore_filters(adapter); iavf_restore_fdir_filters(adapter); iavf_configure(adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index a52c100dcbc56d..4f2defd2331b17 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -746,7 +746,7 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->state == IAVF_VLAN_IS_NEW) { + if (f->state == IAVF_VLAN_ADDING) { list_del(&f->list); kfree(f); adapter->num_vlan_filters--; @@ -812,7 +812,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->state == IAVF_VLAN_ADD) { vvfl->vlan_id[i] = f->vlan.vid; i++; - f->state = IAVF_VLAN_IS_NEW; + f->state = IAVF_VLAN_ADDING; if (i == count) break; } @@ -874,7 +874,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) vlan->tpid = f->vlan.tpid; i++; - f->state = IAVF_VLAN_IS_NEW; + f->state = IAVF_VLAN_ADDING; } } @@ -911,22 +911,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - /* since VLAN capabilities are not allowed, we dont want to send - * a VLAN delete request because it will most likely fail and - * create unnecessary errors/noise, so just free the VLAN - * filters marked for removal to enable bailing out before - * sending a virtchnl message - */ if (f->state == IAVF_VLAN_REMOVE && !VLAN_FILTERING_ALLOWED(adapter)) { list_del(&f->list); kfree(f); adapter->num_vlan_filters--; - } else if (f->state == IAVF_VLAN_DISABLE && - !VLAN_FILTERING_ALLOWED(adapter)) { - f->state = IAVF_VLAN_INACTIVE; - } else if (f->state == IAVF_VLAN_REMOVE || - f->state == IAVF_VLAN_DISABLE) { + } else if (f->state == IAVF_VLAN_REMOVE) { count++; } } @@ -958,18 +948,10 @@ void iavf_del_vlans(struct iavf_adapter *adapter) vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->state == IAVF_VLAN_DISABLE) { - vvfl->vlan_id[i] = f->vlan.vid; - f->state = IAVF_VLAN_INACTIVE; - i++; - if (i == count) - break; - } else if (f->state == IAVF_VLAN_REMOVE) { + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->state == IAVF_VLAN_REMOVE) { vvfl->vlan_id[i] = f->vlan.vid; - list_del(&f->list); - kfree(f); - adapter->num_vlan_filters--; + f->state = IAVF_VLAN_REMOVING; i++; if (i == count) break; @@ -1006,9 +988,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter) vvfl_v2->vport_id = adapter->vsi_res->vsi_id; vvfl_v2->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->state == IAVF_VLAN_DISABLE || - f->state == IAVF_VLAN_REMOVE) { + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->state == IAVF_VLAN_REMOVE) { struct virtchnl_vlan_supported_caps *filtering_support = &adapter->vlan_v2_caps.filtering.filtering_support; struct virtchnl_vlan *vlan; @@ -1022,13 +1003,7 @@ void iavf_del_vlans(struct iavf_adapter *adapter) vlan->tci = f->vlan.vid; vlan->tpid = f->vlan.tpid; - if (f->state == IAVF_VLAN_DISABLE) { - f->state = IAVF_VLAN_INACTIVE; - } else { - list_del(&f->list); - kfree(f); - adapter->num_vlan_filters--; - } + f->state = IAVF_VLAN_REMOVING; i++; if (i == count) break; @@ -2391,10 +2366,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); wake_up(&adapter->vc_waitqueue); break; - case VIRTCHNL_OP_DEL_VLAN: - dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", - iavf_stat_str(&adapter->hw, v_retval)); - break; case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); @@ -2905,17 +2876,42 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_ADD_VLAN_V2: { struct iavf_vlan_filter *f; + if (v_retval) + break; + spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->state == IAVF_VLAN_IS_NEW) + if (f->state == IAVF_VLAN_ADDING) f->state = IAVF_VLAN_ACTIVE; } spin_unlock_bh(&adapter->mac_vlan_list_lock); } break; + case VIRTCHNL_OP_DEL_VLAN: + case VIRTCHNL_OP_DEL_VLAN_V2: { + struct iavf_vlan_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, + list) { + if (f->state == IAVF_VLAN_REMOVING) { + if (v_retval) { + /* PF rejected DEL, keep filter */ + f->state = IAVF_VLAN_ACTIVE; + } else { + list_del(&f->list); + kfree(f); + adapter->num_vlan_filters--; + } + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + } + break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: /* PF enabled vlan strip on this VF. * Update netdev->features if needed to be in sync with ethtool. diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 6144cee8034d77..641d6e289d5ce6 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -1245,6 +1245,8 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) return err; } + ice_init_dev_hw(pf); + /* load MSI-X values */ ice_set_min_max_msix(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index ce11fea122d03e..b617a6bff89134 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1126,8 +1126,6 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; - ice_init_dev_hw(hw->back); - mutex_init(&hw->tnl_lock); ice_init_chk_recipe_reuse_support(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 62f75701d65205..27b460926baced 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -1154,6 +1154,32 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv, extack, ICE_DPLL_PIN_TYPE_INPUT); } +/** + * ice_dpll_sw_pin_notify_peer - notify the paired SW pin after a state change + * @d: pointer to dplls struct + * @changed: the SW pin that was explicitly changed (already notified by dpll core) + * + * SMA and U.FL pins share physical signal paths in pairs (SMA1/U.FL1 and + * SMA2/U.FL2). When one pin's routing changes via the PCA9575 GPIO + * expander, the paired pin's state may also change. Send a change + * notification for the peer pin so userspace consumers monitoring the + * peer via dpll netlink learn about the update. + * + * Context: Called from dpll_pin_ops callbacks after pf->dplls.lock is + * released. Uses __dpll_pin_change_ntf() because dpll_lock is + * still held by the dpll netlink layer. + */ +static void ice_dpll_sw_pin_notify_peer(struct ice_dplls *d, + struct ice_dpll_pin *changed) +{ + struct ice_dpll_pin *peer; + + peer = (changed >= d->sma && changed < d->sma + ICE_DPLL_PIN_SW_NUM) ? + &d->ufl[changed->idx] : &d->sma[changed->idx]; + if (peer->pin) + __dpll_pin_change_ntf(peer->pin); +} + /** * ice_dpll_sma_direction_set - set direction of SMA pin * @p: pointer to a pin @@ -1171,6 +1197,8 @@ static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p, enum dpll_pin_direction direction, struct netlink_ext_ack *extack) { + struct ice_dplls *d = &p->pf->dplls; + struct ice_dpll_pin *peer; u8 data; int ret; @@ -1189,8 +1217,9 @@ static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p, case ICE_DPLL_PIN_SW_2_IDX: if (direction == DPLL_PIN_DIRECTION_INPUT) { data &= ~ICE_SMA2_DIR_EN; + data |= ICE_SMA2_UFL2_RX_DIS; } else { - data &= ~ICE_SMA2_TX_EN; + data &= ~(ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS); data |= ICE_SMA2_DIR_EN; } break; @@ -1202,6 +1231,34 @@ static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p, ret = ice_dpll_pin_state_update(p->pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE, extack); + if (ret) + return ret; + + /* When a direction change activates the paired U.FL pin, enable + * its backing CGU pin so the pin reports as connected. Without + * this the U.FL routing is correct but the CGU pin stays disabled + * and userspace sees the pin as disconnected. Do not disable the + * backing pin when U.FL becomes inactive because the SMA pin may + * still be using it. + */ + peer = &d->ufl[p->idx]; + if (peer->active) { + struct ice_dpll_pin *target; + enum ice_dpll_pin_type type; + + if (peer->output) { + target = peer->output; + type = ICE_DPLL_PIN_TYPE_OUTPUT; + } else { + target = peer->input; + type = ICE_DPLL_PIN_TYPE_INPUT; + } + ret = ice_dpll_pin_enable(&p->pf->hw, target, + d->eec.dpll_idx, type, extack); + if (!ret) + ret = ice_dpll_pin_state_update(p->pf, target, + type, extack); + } return ret; } @@ -1253,6 +1310,14 @@ ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv, data &= ~ICE_SMA1_MASK; enable = true; } else if (state == DPLL_PIN_STATE_DISCONNECTED) { + /* Skip if U.FL1 is not active, setting TX_EN + * while DIR_EN is set would also deactivate + * the paired SMA1 output. + */ + if (data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)) { + ret = 0; + goto unlock; + } data |= ICE_SMA1_TX_EN; enable = false; } else { @@ -1267,6 +1332,15 @@ ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv, data &= ~ICE_SMA2_UFL2_RX_DIS; enable = true; } else if (state == DPLL_PIN_STATE_DISCONNECTED) { + /* Skip if U.FL2 is not active, setting + * UFL2_RX_DIS could also disable the paired + * SMA2 input. + */ + if (!(data & ICE_SMA2_DIR_EN) || + (data & ICE_SMA2_UFL2_RX_DIS)) { + ret = 0; + goto unlock; + } data |= ICE_SMA2_UFL2_RX_DIS; enable = false; } else { @@ -1296,6 +1370,8 @@ ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv, unlock: mutex_unlock(&pf->dplls.lock); + if (!ret) + ice_dpll_sw_pin_notify_peer(&pf->dplls, p); return ret; } @@ -1414,6 +1490,8 @@ ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv, unlock: mutex_unlock(&pf->dplls.lock); + if (!ret) + ice_dpll_sw_pin_notify_peer(&pf->dplls, sma); return ret; } @@ -1609,6 +1687,8 @@ ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv, mutex_lock(&pf->dplls.lock); ret = ice_dpll_sma_direction_set(p, direction, extack); mutex_unlock(&pf->dplls.lock); + if (!ret) + ice_dpll_sw_pin_notify_peer(&pf->dplls, p); return ret; } @@ -1915,7 +1995,10 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, d->active_input == p->input->pin)) *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; else if (d->phase_offset_monitor_period) - *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; + *phase_offset = (p->input && + p->direction == DPLL_PIN_DIRECTION_INPUT ? + p->input->phase_offset : + p->phase_offset) * ICE_DPLL_PHASE_OFFSET_FACTOR; else *phase_offset = 0; mutex_unlock(&pf->dplls.lock); @@ -2609,6 +2692,27 @@ static u64 ice_generate_clock_id(struct ice_pf *pf) return pci_get_dsn(pf->pdev); } +/** + * ice_dpll_pin_ntf - notify pin change including any SW pin wrappers + * @dplls: pointer to dplls struct + * @pin: the dpll_pin that changed + * + * Send a change notification for @pin and for any registered SMA/U.FL pin + * whose backing CGU input matches @pin. + */ +static void ice_dpll_pin_ntf(struct ice_dplls *dplls, struct dpll_pin *pin) +{ + dpll_pin_change_ntf(pin); + for (int i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) { + if (dplls->sma[i].pin && dplls->sma[i].input && + dplls->sma[i].input->pin == pin) + dpll_pin_change_ntf(dplls->sma[i].pin); + if (dplls->ufl[i].pin && dplls->ufl[i].input && + dplls->ufl[i].input->pin == pin) + dpll_pin_change_ntf(dplls->ufl[i].pin); + } +} + /** * ice_dpll_notify_changes - notify dpll subsystem about changes * @d: pointer do dpll @@ -2617,6 +2721,7 @@ static u64 ice_generate_clock_id(struct ice_pf *pf) */ static void ice_dpll_notify_changes(struct ice_dpll *d) { + struct ice_dplls *dplls = &d->pf->dplls; bool pin_notified = false; if (d->prev_dpll_state != d->dpll_state) { @@ -2625,17 +2730,17 @@ static void ice_dpll_notify_changes(struct ice_dpll *d) } if (d->prev_input != d->active_input) { if (d->prev_input) - dpll_pin_change_ntf(d->prev_input); + ice_dpll_pin_ntf(dplls, d->prev_input); d->prev_input = d->active_input; if (d->active_input) { - dpll_pin_change_ntf(d->active_input); + ice_dpll_pin_ntf(dplls, d->active_input); pin_notified = true; } } if (d->prev_phase_offset != d->phase_offset) { d->prev_phase_offset = d->phase_offset; if (!pin_notified && d->active_input) - dpll_pin_change_ntf(d->active_input); + ice_dpll_pin_ntf(dplls, d->active_input); } } @@ -2664,6 +2769,7 @@ static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf) /** * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes + * @dplls: pointer to dplls struct * @pins: array of ice_dpll_pin pointers registered within dpll subsystem * @pin_num: number of pins * @phase_offset_ntf_mask: bitmask of pin indexes to notify @@ -2673,15 +2779,14 @@ static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf) * * Context: Must be called while pf->dplls.lock is released. */ -static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins, +static void ice_dpll_pins_notify_mask(struct ice_dplls *dplls, + struct ice_dpll_pin *pins, u8 pin_num, u32 phase_offset_ntf_mask) { - int i = 0; - - for (i = 0; i < pin_num; i++) - if (phase_offset_ntf_mask & (1 << i)) - dpll_pin_change_ntf(pins[i].pin); + for (int i = 0; i < pin_num; i++) + if (phase_offset_ntf_mask & BIT(i)) + ice_dpll_pin_ntf(dplls, pins[i].pin); } /** @@ -2857,7 +2962,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ice_dpll_notify_changes(de); ice_dpll_notify_changes(dp); if (phase_offset_ntf) - ice_dpll_pins_notify_mask(d->inputs, d->num_inputs, + ice_dpll_pins_notify_mask(d, d->inputs, d->num_inputs, phase_offset_ntf); resched: @@ -4014,6 +4119,7 @@ static int ice_dpll_init_info_sw_pins(struct ice_pf *pf) struct ice_dpll_pin *pin; u32 phase_adj_max, caps; int i, ret; + u8 data; if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP) input_idx_offset = ICE_E810_RCLK_PINS_NUM; @@ -4073,6 +4179,22 @@ static int ice_dpll_init_info_sw_pins(struct ice_pf *pf) } ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max); } + + /* Initialize the SMA control register to a known-good default state. + * Without this write the PCA9575 GPIO expander retains its power-on + * default (all outputs high) which makes all SW pins appear inactive. + * Set SMA1 and SMA2 as active inputs, disable U.FL1 output and + * U.FL2 input. + */ + ret = ice_read_sma_ctrl(&pf->hw, &data); + if (ret) + return ret; + data &= ~ICE_ALL_SMA_MASK; + data |= ICE_SMA1_TX_EN | ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; + ret = ice_write_sma_ctrl(&pf->hw, data); + if (ret) + return ret; + ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE, NULL); if (ret) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5f92377d4dfc28..1d1947a7fe1191 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5245,6 +5245,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return err; } + ice_init_dev_hw(pf); + adapter = ice_adapter_get(pdev); if (IS_ERR(adapter)) { err = PTR_ERR(adapter); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 772f6b07340d47..b1f46707dcc004 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -804,7 +804,12 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_ctrl_invalidate_vsi(vf); ice_vf_pre_vsi_rebuild(vf); - ice_vf_rebuild_vsi(vf); + if (ice_vf_rebuild_vsi(vf)) { + dev_err(dev, "VF %u VSI rebuild failed, leaving VF disabled\n", + vf->vf_id); + mutex_unlock(&vf->cfg_lock); + continue; + } ice_vf_post_vsi_rebuild(vf); ice_eswitch_attach_vf(pf, vf); diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index d842c60dfc100d..e5c6f81af48be9 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -531,7 +531,7 @@ static int efx_devlink_info_running_versions(struct efx_nic *efx, if (rc || outlength < MC_CMD_GET_VERSION_OUT_LEN) { netif_err(efx, drv, efx->net_dev, "mcdi MC_CMD_GET_VERSION failed\n"); - return rc; + return rc ?: -EIO; } /* Handle previous output */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ca68248dbc781a..3591755ea30be5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5549,9 +5549,12 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) break; /* Prefetch the next RX descriptor */ - rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, - priv->dma_conf.dma_rx_size); - next_entry = rx_q->cur_rx; + next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + if (unlikely(next_entry == rx_q->dirty_rx)) + break; + + rx_q->cur_rx = next_entry; np = stmmac_get_rx_desc(priv, rx_q, next_entry); @@ -5686,7 +5689,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dma_dir = page_pool_get_dma_dir(rx_q->page_pool); bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; - limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); if (netif_msg_rx_status(priv)) { void *rx_head = stmmac_get_rx_desc(priv, rx_q, 0); @@ -5733,9 +5735,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) if (unlikely(status & dma_own)) break; - rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, - priv->dma_conf.dma_rx_size); - next_entry = rx_q->cur_rx; + next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + if (unlikely(next_entry == rx_q->dirty_rx)) + break; + + rx_q->cur_rx = next_entry; np = stmmac_get_rx_desc(priv, rx_q, next_entry); diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index 15fe4d1163c1cd..ee2913758e54ed 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -496,8 +496,6 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) u8 *pecp; int rc; - fs = mctp_i2c_get_tx_flow_state(midev, skb); - hdr = (void *)skb_mac_header(skb); /* Sanity check that packet contents matches skb length, * and can't exceed MCTP_I2C_BUFSZ @@ -509,6 +507,8 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) return; } + fs = mctp_i2c_get_tx_flow_state(midev, skb); + if (skb_tailroom(skb) >= 1) { /* Linear case with space, we can just append the PEC */ skb_put(skb, 1); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 205384dab89a69..57dd6821a8aa90 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -752,7 +752,7 @@ static ssize_t enabled_store(struct config_item *item, unregister_netcons_consoles(); } - ret = strnlen(buf, count); + ret = count; /* Deferred cleanup */ netconsole_process_cleanups(); out_unlock: @@ -781,7 +781,7 @@ static ssize_t release_store(struct config_item *item, const char *buf, nt->release = release; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -807,7 +807,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf, goto out_unlock; nt->extended = extended; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -817,6 +817,13 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); + size_t len = count; + + /* Account for a trailing newline appended by tools like echo */ + if (len && buf[len - 1] == '\n') + len--; + if (len >= IFNAMSIZ) + return -ENAMETOOLONG; dynamic_netconsole_mutex_lock(); if (nt->state == STATE_ENABLED) { @@ -830,7 +837,7 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf, trim_newline(nt->np.dev_name, IFNAMSIZ); dynamic_netconsole_mutex_unlock(); - return strnlen(buf, count); + return count; } static ssize_t local_port_store(struct config_item *item, const char *buf, @@ -849,7 +856,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf, ret = kstrtou16(buf, 10, &nt->np.local_port); if (ret < 0) goto out_unlock; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -871,7 +878,7 @@ static ssize_t remote_port_store(struct config_item *item, ret = kstrtou16(buf, 10, &nt->np.remote_port); if (ret < 0) goto out_unlock; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -896,7 +903,7 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf, goto out_unlock; nt->np.ipv6 = !!ipv6; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -921,7 +928,7 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf, goto out_unlock; nt->np.ipv6 = !!ipv6; - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -957,7 +964,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf, goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); - ret = strnlen(buf, count); + ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); return ret; @@ -1072,26 +1079,30 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf, size_t count) { struct userdatum *udm = to_userdatum(item); + char old_value[MAX_EXTRADATA_VALUE_LEN]; struct netconsole_target *nt; struct userdata *ud; ssize_t ret; - if (count > MAX_EXTRADATA_VALUE_LEN) + if (count >= MAX_EXTRADATA_VALUE_LEN) return -EMSGSIZE; mutex_lock(&netconsole_subsys.su_mutex); dynamic_netconsole_mutex_lock(); - - ret = strscpy(udm->value, buf, sizeof(udm->value)); - if (ret < 0) - goto out_unlock; + /* Snapshot for rollback if update_userdata() fails below */ + strscpy(old_value, udm->value, sizeof(old_value)); + /* count is bounded above, so strscpy() cannot truncate here */ + strscpy(udm->value, buf, sizeof(udm->value)); trim_newline(udm->value, sizeof(udm->value)); ud = to_userdata(item->ci_parent); nt = userdata_to_target(ud); ret = update_userdata(nt); - if (ret < 0) + if (ret < 0) { + /* Restore the previous value so it matches the live payload */ + strscpy(udm->value, old_value, sizeof(udm->value)); goto out_unlock; + } ret = count; out_unlock: dynamic_netconsole_mutex_unlock(); @@ -1133,7 +1144,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item, disable_sysdata_feature(nt, SYSDATA_MSGID); unlock_ok: - ret = strnlen(buf, count); + ret = count; dynamic_netconsole_mutex_unlock(); mutex_unlock(&netconsole_subsys.su_mutex); return ret; @@ -1162,7 +1173,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item, disable_sysdata_feature(nt, SYSDATA_RELEASE); unlock_ok: - ret = strnlen(buf, count); + ret = count; dynamic_netconsole_mutex_unlock(); mutex_unlock(&netconsole_subsys.su_mutex); return ret; @@ -1191,7 +1202,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item, disable_sysdata_feature(nt, SYSDATA_TASKNAME); unlock_ok: - ret = strnlen(buf, count); + ret = count; dynamic_netconsole_mutex_unlock(); mutex_unlock(&netconsole_subsys.su_mutex); return ret; @@ -1225,7 +1236,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item, disable_sysdata_feature(nt, SYSDATA_CPU_NR); unlock_ok: - ret = strnlen(buf, count); + ret = count; dynamic_netconsole_mutex_unlock(); mutex_unlock(&netconsole_subsys.su_mutex); return ret; diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 1e06e781c835f0..f00fc2f9ebdec7 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -829,7 +829,7 @@ static struct sk_buff *nsim_dev_trap_skb_build(void) skb->protocol = htons(ETH_P_IP); skb_set_network_header(skb, skb->len); - iph = skb_put(skb, sizeof(struct iphdr)); + iph = skb_put_zero(skb, sizeof(struct iphdr)); iph->protocol = IPPROTO_UDP; iph->saddr = in_aton("192.0.2.1"); iph->daddr = in_aton("198.51.100.1"); diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 1f381d7b13ff3b..96a7d255f50fd9 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -31,6 +31,7 @@ #define DP83869_RGMIICTL 0x0032 #define DP83869_STRAP_STS1 0x006e #define DP83869_RGMIIDCTL 0x0086 +#define DP83869_ANA_PLL_PROG_PI 0x00c6 #define DP83869_RXFCFG 0x0134 #define DP83869_RXFPMD1 0x0136 #define DP83869_RXFPMD2 0x0137 @@ -826,12 +827,22 @@ static int dp83869_config_init(struct phy_device *phydev) dp83869_config_port_mirroring(phydev); /* Clock output selection if muxing property is set */ - if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK) + if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK) { + /* + * Table 7-121 in datasheet says we have to set register 0xc6 + * to value 0x10 before CLK_O_SEL can be modified. + */ + ret = phy_write_mmd(phydev, DP83869_DEVADDR, + DP83869_ANA_PLL_PROG_PI, 0x10); + if (ret) + return ret; + ret = phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG, DP83869_IO_MUX_CFG_CLK_O_SEL_MASK, dp83869->clk_output_sel << DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT); + } if (phy_interface_is_rgmii(phydev)) { ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIIDCTL, diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 4cda0643afb6eb..c880c95c41a5ec 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -683,6 +683,7 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); + unsigned int skb_len; int count, res; /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */ @@ -694,6 +695,8 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + skb_len = skb->len; + netif_stop_queue(netdev); dev->tx_skb = skb; usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), @@ -707,9 +710,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, netdev->stats.tx_errors++; netif_start_queue(netdev); } + /* + * The URB was not submitted, so write_bulk_callback() will + * never run to free dev->tx_skb. Drop the skb here and + * clear tx_skb to avoid leaving a stale pointer. + */ + dev->tx_skb = NULL; + dev_kfree_skb_any(skb); } else { netdev->stats.tx_packets++; - netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_bytes += skb_len; netif_trans_update(netdev); } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 2cf2dbd1c12fc9..46209917ae4d69 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1034,6 +1034,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, err: port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; + synchronize_net(); return ret; } @@ -1053,10 +1054,16 @@ static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev, } /* inverse of do_vrf_add_slave */ -static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) +static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev, + bool needs_sync) { netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; + /* Make sure that concurrent RCU readers that identified the device + * as a VRF port see a VRF master or no master at all. + */ + if (needs_sync) + synchronize_net(); cycle_netdev(port_dev, NULL); @@ -1065,7 +1072,7 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { - return do_vrf_del_slave(dev, port_dev); + return do_vrf_del_slave(dev, port_dev, true); } static void vrf_dev_uninit(struct net_device *dev) @@ -1619,7 +1626,7 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head) struct list_head *iter; netdev_for_each_lower_dev(dev, port_dev, iter) - vrf_del_slave(dev, port_dev); + do_vrf_del_slave(dev, port_dev, false); vrf_map_unregister_dev(dev); @@ -1751,7 +1758,7 @@ static int vrf_device_event(struct notifier_block *unused, goto out; vrf_dev = netdev_master_upper_dev_get(dev); - vrf_del_slave(vrf_dev, dev); + do_vrf_del_slave(vrf_dev, dev, false); } out: return NOTIFY_DONE; diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index d17c701c7888b3..08c27bb438b597 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -317,6 +317,7 @@ #define TRF7970A_RSSI_OSC_STATUS_RSSI_MASK (BIT(2) | BIT(1) | BIT(0)) #define TRF7970A_RSSI_OSC_STATUS_RSSI_X_MASK (BIT(5) | BIT(4) | BIT(3)) #define TRF7970A_RSSI_OSC_STATUS_RSSI_OSC_OK BIT(6) +#define TRF7970A_RSSI_OSC_STATUS_RSSI_NOISE_LEVEL 1 #define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0) #define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1) @@ -1300,7 +1301,7 @@ static int trf7970a_is_rf_field(struct trf7970a *trf, bool *is_rf_field) if (ret) return ret; - if (rssi & TRF7970A_RSSI_OSC_STATUS_RSSI_MASK) + if ((rssi & TRF7970A_RSSI_OSC_STATUS_RSSI_MASK) > TRF7970A_RSSI_OSC_STATUS_RSSI_NOISE_LEVEL) *is_rf_field = true; else *is_rf_field = false; diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index 2d325fb9308366..77f1d22512f8f1 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -351,18 +351,29 @@ struct nvme_dhchap_key *nvme_auth_transform_key( } EXPORT_SYMBOL_GPL(nvme_auth_transform_key); +/** + * nvme_auth_augmented_challenge() - Compute the augmented DH-HMAC-CHAP challenge + * @hmac_id: Hash algorithm identifier + * @skey: Session key + * @skey_len: Length of @skey + * @challenge: Challenge value + * @aug: Output buffer for the augmented challenge + * @hlen: Hash output length (length of @challenge and @aug) + * + * NVMe base specification 8.3.5.5.4: The augmented challenge is computed + * applying the HMAC function using the hash function H() selected by the + * HashID parameter ... with the hash of the ephemeral DH key ... as HMAC key + * to the challenge C (i.e., Ca = HMAC(H(g^xy mod p), C)). + * + * As the session key skey is already H(g^xy mod p) per section 8.3.5.5.9, use + * it directly as the HMAC key without additional hashing. + * + * Return: 0 on success, negative errno on failure. + */ int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len, const u8 *challenge, u8 *aug, size_t hlen) { - u8 hashed_key[NVME_AUTH_MAX_DIGEST_SIZE]; - int ret; - - ret = nvme_auth_hash(hmac_id, skey, skey_len, hashed_key); - if (ret) - return ret; - ret = nvme_auth_hmac(hmac_id, hashed_key, hlen, challenge, hlen, aug); - memzero_explicit(hashed_key, sizeof(hashed_key)); - return ret; + return nvme_auth_hmac(hmac_id, skey, skey_len, challenge, hlen, aug); } EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge); @@ -403,33 +414,76 @@ int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, } EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey); -int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, - const u8 *ctrl_key, size_t ctrl_key_len, - u8 *sess_key, size_t sess_key_len) +/** + * nvme_auth_gen_session_key() - Generate an ephemeral session key + * @dh_tfm: Diffie-Hellman transform with local private key already set + * @public_key: Peer's public key + * @public_key_len: Length of @public_key + * @sess_key: Output buffer for the session key + * @sess_key_len: Size of @sess_key buffer + * @hash_id: Hash algorithm identifier + * + * NVMe base specification 8.3.5.5.9: The session key Ks shall be computed from + * the ephemeral DH key (i.e., g^xy mod p) ... by applying the hash function + * H() selected by the HashID parameter ... (i.e., Ks = H(g^xy mod p)). + * + * Return: 0 on success, negative errno on failure. + */ +int nvme_auth_gen_session_key(struct crypto_kpp *dh_tfm, + const u8 *public_key, size_t public_key_len, + u8 *sess_key, size_t sess_key_len, u8 hash_id) { struct kpp_request *req; struct crypto_wait wait; struct scatterlist src, dst; + u8 *dh_secret; + size_t dh_secret_len, hash_len; int ret; - req = kpp_request_alloc(dh_tfm, GFP_KERNEL); - if (!req) + hash_len = nvme_auth_hmac_hash_len(hash_id); + if (!hash_len) { + pr_warn("%s: invalid hash algorithm %d\n", __func__, hash_id); + return -EINVAL; + } + + if (sess_key_len != hash_len) { + pr_warn("%s: sess_key buffer missized (%zu != %zu)\n", + __func__, sess_key_len, hash_len); + return -EINVAL; + } + + dh_secret_len = crypto_kpp_maxsize(dh_tfm); + dh_secret = kzalloc(dh_secret_len, GFP_KERNEL); + if (!dh_secret) return -ENOMEM; + req = kpp_request_alloc(dh_tfm, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out_free_secret; + } + crypto_init_wait(&wait); - sg_init_one(&src, ctrl_key, ctrl_key_len); - kpp_request_set_input(req, &src, ctrl_key_len); - sg_init_one(&dst, sess_key, sess_key_len); - kpp_request_set_output(req, &dst, sess_key_len); + sg_init_one(&src, public_key, public_key_len); + kpp_request_set_input(req, &src, public_key_len); + sg_init_one(&dst, dh_secret, dh_secret_len); + kpp_request_set_output(req, &dst, dh_secret_len); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); - kpp_request_free(req); + + if (ret) + goto out_free_secret; + + ret = nvme_auth_hash(hash_id, dh_secret, dh_secret_len, sess_key); + +out_free_secret: + kfree_sensitive(dh_secret); return ret; } -EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret); +EXPORT_SYMBOL_GPL(nvme_auth_gen_session_key); int nvme_auth_parse_key(const char *secret, struct nvme_dhchap_key **ret_key) { diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index ed61b97fde59f7..423c9c628e7bfa 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1267,11 +1267,7 @@ static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) { - struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); - - if (anv->ctrl.admin_q) - blk_put_queue(anv->ctrl.admin_q); - put_device(anv->dev); + put_device(ctrl->dev); } static const struct nvme_ctrl_ops nvme_ctrl_ops = { diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index bbedbe181c8a63..16de4499a8e7d5 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -535,11 +535,12 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, put_unaligned_le16(chap->transaction, buf); nvme_auth_hmac_update(&hmac, buf, 2); - memset(buf, 0, 4); + *buf = chap->sc_c; nvme_auth_hmac_update(&hmac, buf, 1); nvme_auth_hmac_update(&hmac, "Controller", 10); nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn, strlen(ctrl->opts->subsysnqn)); + memset(buf, 0, 4); nvme_auth_hmac_update(&hmac, buf, 1); nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn, strlen(ctrl->opts->host->nqn)); @@ -587,7 +588,7 @@ static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, } gen_sesskey: - chap->sess_key_len = chap->host_key_len; + chap->sess_key_len = chap->hash_len; chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); if (!chap->sess_key) { chap->sess_key_len = 0; @@ -595,16 +596,17 @@ static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, return -ENOMEM; } - ret = nvme_auth_gen_shared_secret(chap->dh_tfm, - chap->ctrl_key, chap->ctrl_key_len, - chap->sess_key, chap->sess_key_len); + ret = nvme_auth_gen_session_key(chap->dh_tfm, + chap->ctrl_key, chap->ctrl_key_len, + chap->sess_key, chap->sess_key_len, + chap->hash_id); if (ret) { dev_dbg(ctrl->device, - "failed to generate shared secret, error %d\n", ret); + "failed to generate session key, error %d\n", ret); chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return ret; } - dev_dbg(ctrl->device, "shared secret %*ph\n", + dev_dbg(ctrl->device, "session key %*ph\n", (int)chap->sess_key_len, chap->sess_key); return 0; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1e33af94c24b97..dc388e24caadeb 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -454,11 +454,10 @@ void nvme_end_req(struct request *req) blk_mq_end_request(req, status); } -void nvme_complete_rq(struct request *req) +static void __nvme_complete_rq(struct request *req) { struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; - trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); /* @@ -493,6 +492,12 @@ void nvme_complete_rq(struct request *req) return; } } + +void nvme_complete_rq(struct request *req) +{ + trace_nvme_complete_rq(req); + __nvme_complete_rq(req); +} EXPORT_SYMBOL_GPL(nvme_complete_rq); void nvme_complete_batch_req(struct request *req) @@ -513,7 +518,7 @@ blk_status_t nvme_host_path_error(struct request *req) { nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; blk_mq_set_request_complete(req); - nvme_complete_rq(req); + __nvme_complete_rq(req); return BLK_STS_OK; } EXPORT_SYMBOL_GPL(nvme_host_path_error); @@ -3044,7 +3049,7 @@ static const struct nvme_core_quirk_entry core_quirks[] = { * * The device is left in a state where it is also not possible * to use "nvme set-feature" to disable APST, but booting with - * nvme_core.default_ps_max_latency=0 works. + * nvme_core.default_ps_max_latency_us=0 works. */ .vid = 0x1e0f, .mn = "KCD6XVUL6T40", @@ -4083,7 +4088,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) mutex_unlock(&ctrl->subsys->lock); #ifdef CONFIG_NVME_MULTIPATH - cancel_delayed_work(&head->remove_work); + if (cancel_delayed_work(&head->remove_work)) + module_put(THIS_MODULE); #endif return 0; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e1bb4707183cae..e4f4528fe2a2d6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3968,3 +3968,4 @@ module_exit(nvme_fc_exit_module); MODULE_DESCRIPTION("NVMe host FC transport driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("nvme-fc"); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index ba00f0b72b858c..263161cb8ac06c 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -231,16 +231,12 @@ bool nvme_mpath_clear_current_path(struct nvme_ns *ns) bool changed = false; int node; - if (!head) - goto out; - for_each_node(node) { if (ns == rcu_access_pointer(head->current_path[node])) { rcu_assign_pointer(head->current_path[node], NULL); changed = true; } } -out: return changed; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index db5fc9bf662723..9fd04cd7c5cb13 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2241,6 +2241,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) static const struct blk_mq_ops nvme_mq_admin_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_pci_complete_rq, + .commit_rqs = nvme_commit_rqs, .init_hctx = nvme_admin_init_hctx, .init_request = nvme_pci_init_request, .timeout = nvme_timeout, @@ -4104,6 +4105,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c5f, 0x0555), /* Memblaze Pblaze5 adapter */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, + { PCI_DEVICE(0x144d, 0xa808), /* Samsung PM981/983 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 57111139e84fa9..f77c960f7632d1 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2189,6 +2189,13 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) nvme_rdma_reconnect_or_remove(ctrl, ret); } +static bool nvme_rdma_supports_pci_p2pdma(struct nvme_ctrl *ctrl) +{ + struct nvme_rdma_ctrl *r_ctrl = to_rdma_ctrl(ctrl); + + return ib_dma_pci_p2p_dma_supported(r_ctrl->device->dev); +} + static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .name = "rdma", .module = THIS_MODULE, @@ -2203,6 +2210,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .get_address = nvmf_get_address, .stop_ctrl = nvme_rdma_stop_ctrl, .get_virt_boundary = nvme_get_virt_boundary, + .supports_pci_p2pdma = nvme_rdma_supports_pci_p2pdma, }; /* @@ -2432,3 +2440,4 @@ module_exit(nvme_rdma_cleanup_module); MODULE_DESCRIPTION("NVMe host RDMA transport driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("nvme-rdma"); diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 7bf2e972126b19..e59758616f277e 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -883,10 +883,26 @@ static ssize_t tls_keyring_show(struct device *dev, } static DEVICE_ATTR_RO(tls_keyring); +static ssize_t tls_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + const char *mode; + + if (ctrl->opts->tls) + mode = "tls"; + else + mode = "concat"; + + return sysfs_emit(buf, "%s\n", mode); +} +static DEVICE_ATTR_RO(tls_mode); + static struct attribute *nvme_tls_attrs[] = { &dev_attr_tls_key.attr, &dev_attr_tls_configured_key.attr, &dev_attr_tls_keyring.attr, + &dev_attr_tls_mode.attr, NULL, }; @@ -908,6 +924,9 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj, if (a == &dev_attr_tls_keyring.attr && !ctrl->opts->keyring) return 0; + if (a == &dev_attr_tls_mode.attr && + !ctrl->opts->tls && !ctrl->opts->concat) + return 0; return a->mode; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 243dab830dc84f..15d36d6a728e80 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1438,18 +1438,32 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; - unsigned int noreclaim_flag; + unsigned int noio_flag; if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; page_frag_cache_drain(&queue->pf_cache); - noreclaim_flag = memalloc_noreclaim_save(); - /* ->sock will be released by fput() */ - fput(queue->sock->file); + /** + * Prevent memory reclaim from triggering block I/O during socket + * teardown. The socket release path fput -> tcp_close -> + * tcp_disconnect -> tcp_send_active_reset may allocate memory, and + * allowing reclaim to issue I/O could deadlock if we're being called + * from block device teardown (e.g., del_gendisk -> elevator cleanup) + * which holds locks that the I/O completion path needs. + */ + noio_flag = memalloc_noio_save(); + + /** + * Release the socket synchronously. During reset in + * nvme_reset_ctrl_work(), queue teardown is immediately followed by + * re-allocation. fput() defers socket cleanup to delayed_fput_work + * in workqueue context, which can race with new queue setup. + */ + __fput_sync(queue->sock->file); queue->sock = NULL; - memalloc_noreclaim_restore(noreclaim_flag); + memalloc_noio_restore(noio_flag); kfree(queue->pdu); mutex_destroy(&queue->send_mutex); @@ -1901,8 +1915,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, err_rcv_pdu: kfree(queue->pdu); err_sock: - /* ->sock will be released by fput() */ - fput(queue->sock->file); + /* Use sync variant - see nvme_tcp_free_queue() for explanation */ + __fput_sync(queue->sock->file); queue->sock = NULL; err_destroy_mutex: mutex_destroy(&queue->send_mutex); @@ -3071,3 +3085,4 @@ module_exit(nvme_tcp_cleanup_module); MODULE_DESCRIPTION("NVMe host TCP transport driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("nvme-tcp"); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index e4fd1caadfb002..01b799e92ae673 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -687,12 +687,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | NVME_CTRL_CMIC_ANA; - /* Limit MDTS according to transport capability */ - if (ctrl->ops->get_mdts) - id->mdts = ctrl->ops->get_mdts(ctrl); - else - id->mdts = 0; - + /* Limit MDTS according to port config or transport capability */ + id->mdts = nvmet_ctrl_mdts(req); id->cntlid = cpu_to_le16(ctrl->cntlid); id->ver = cpu_to_le32(ctrl->subsys->ver); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index b34610e2f19d4d..9a2eccdc8b1332 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -229,9 +229,6 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset) void nvmet_auth_sq_free(struct nvmet_sq *sq) { cancel_delayed_work(&sq->auth_expired_work); -#ifdef CONFIG_NVME_TARGET_TCP_TLS - sq->tls_key = NULL; -#endif kfree(sq->dhchap_c1); sq->dhchap_c1 = NULL; kfree(sq->dhchap_c2); @@ -402,11 +399,12 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, put_unaligned_le16(req->sq->dhchap_tid, buf); nvme_auth_hmac_update(&hmac, buf, 2); - memset(buf, 0, 4); + *buf = req->sq->sc_c; nvme_auth_hmac_update(&hmac, buf, 1); nvme_auth_hmac_update(&hmac, "Controller", 10); nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn, strlen(ctrl->subsys->subsysnqn)); + memset(buf, 0, 4); nvme_auth_hmac_update(&hmac, buf, 1); nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn)); nvme_auth_hmac_final(&hmac, response); @@ -449,18 +447,19 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, struct nvmet_ctrl *ctrl = req->sq->ctrl; int ret; - req->sq->dhchap_skey_len = ctrl->dh_keysize; + req->sq->dhchap_skey_len = nvme_auth_hmac_hash_len(ctrl->shash_id); req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); if (!req->sq->dhchap_skey) return -ENOMEM; - ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm, - pkey, pkey_size, - req->sq->dhchap_skey, - req->sq->dhchap_skey_len); + ret = nvme_auth_gen_session_key(ctrl->dh_tfm, + pkey, pkey_size, + req->sq->dhchap_skey, + req->sq->dhchap_skey_len, + ctrl->shash_id); if (ret) - pr_debug("failed to compute shared secret, err %d\n", ret); + pr_debug("failed to compute session key, err %d\n", ret); else - pr_debug("%s: shared secret %*ph\n", __func__, + pr_debug("%s: session key %*ph\n", __func__, (int)req->sq->dhchap_skey_len, req->sq->dhchap_skey); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 463348c7f097bc..b88f897f06e25e 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -301,6 +301,31 @@ static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, CONFIGFS_ATTR(nvmet_, param_max_queue_size); +static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page) +{ + struct nvmet_port *port = to_nvmet_port(item); + + return snprintf(page, PAGE_SIZE, "%d\n", port->mdts); +} + +static ssize_t nvmet_param_mdts_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_port *port = to_nvmet_port(item); + int ret; + + if (nvmet_is_port_enabled(port, __func__)) + return -EACCES; + ret = kstrtoint(page, 0, &port->mdts); + if (ret) { + pr_err("Invalid value '%s' for mdts\n", page); + return -EINVAL; + } + return count; +} + +CONFIGFS_ATTR(nvmet_, param_mdts); + #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_param_pi_enable_show(struct config_item *item, char *page) @@ -1995,6 +2020,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { &nvmet_attr_addr_tsas, &nvmet_attr_param_inline_data_size, &nvmet_attr_param_max_queue_size, + &nvmet_attr_param_mdts, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_attr_param_pi_enable, #endif @@ -2053,6 +2079,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, INIT_LIST_HEAD(&port->referrals); port->inline_data_size = -1; /* < 0 == let the transport choose */ port->max_queue_size = -1; /* < 0 == let the transport choose */ + port->mdts = -1; /* < 0 == let the transport choose */ port->disc_addr.trtype = NVMF_TRTYPE_MAX; port->disc_addr.portid = cpu_to_le16(portid); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 45f686175feaaf..62dd59b9aa4f1c 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -370,6 +370,14 @@ int nvmet_enable_port(struct nvmet_port *port) NVMET_MIN_QUEUE_SIZE, NVMET_MAX_QUEUE_SIZE); + /* + * If the transport didn't set the mdts properly, then clamp it to the + * target limits. Also set default values in case the transport didn't + * set it at all. + */ + if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS) + port->mdts = 0; + port->enabled = true; port->tr_ops = ops; return 0; @@ -1743,7 +1751,7 @@ static void nvmet_ctrl_free(struct kref *ref) nvmet_stop_keep_alive_timer(ctrl); - flush_work(&ctrl->async_event_work); + cancel_work_sync(&ctrl->async_event_work); cancel_work_sync(&ctrl->fatal_err_work); nvmet_destroy_auth(ctrl); diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index b9ab80c7a69418..f1e613e7c63e51 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -395,10 +395,9 @@ void nvmet_execute_auth_send(struct nvmet_req *req) goto complete; } /* Final states, clear up variables */ - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { - nvmet_auth_sq_free(req->sq); + nvmet_auth_sq_free(req->sq); + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) nvmet_ctrl_fatal_error(ctrl); - } complete: nvmet_req_complete(req, status); @@ -574,7 +573,9 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, d, al); kfree(d); done: - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) + nvmet_auth_sq_free(req->sq); + else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { nvmet_auth_sq_free(req->sq); nvmet_ctrl_fatal_error(ctrl); } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 50070cfb782aed..3305a88684ecab 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -214,6 +214,7 @@ struct nvmet_port { bool enabled; int inline_data_size; int max_queue_size; + int mdts; const struct nvmet_fabrics_ops *tr_ops; bool pi_enable; }; @@ -673,6 +674,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, #define NVMET_MAX_QUEUE_SIZE 1024 #define NVMET_NR_QUEUES 128 #define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1) +#define NVMET_MAX_MDTS 255 /* * Nice round number that makes a list of nsids fit into a page. @@ -761,6 +763,17 @@ static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl) return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI; } +/* Limit MDTS according to port config or transport capability */ +static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u8 mdts = req->port->mdts; + + if (!ctrl->ops->get_mdts) + return mdts; + return min_not_zero(ctrl->ops->get_mdts(ctrl), mdts); +} + #ifdef CONFIG_NVME_TARGET_PASSTHRU void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 4b8b02341ddc38..164a564ba3b4e9 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -349,9 +349,7 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) cmd->req.sg = NULL; } -static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); - -static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) +static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct bio_vec *iov = cmd->iov; struct scatterlist *sg; @@ -364,22 +362,19 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) offset = cmd->rbytes_done; cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; - if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { - nvmet_tcp_fatal_error(cmd->queue); - return; - } + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) + return -EPROTO; + sg = &cmd->req.sg[cmd->sg_idx]; sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; while (length) { - if (!sg_remaining) { - nvmet_tcp_fatal_error(cmd->queue); - return; - } - if (!sg->length || sg->length <= sg_offset) { - nvmet_tcp_fatal_error(cmd->queue); - return; - } + if (!sg_remaining) + return -EPROTO; + + if (!sg->length || sg->length <= sg_offset) + return -EPROTO; + u32 iov_len = min_t(u32, length, sg->length - sg_offset); bvec_set_page(iov, sg_page(sg), iov_len, @@ -394,24 +389,29 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, nr_pages, cmd->pdu_len); -} - -static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) -{ - queue->rcv_state = NVMET_TCP_RECV_ERR; - if (queue->nvme_sq.ctrl) - nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); - else - kernel_sock_shutdown(queue->sock, SHUT_RDWR); + return 0; } static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) { + /* + * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path. + * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has + * already been consumed and queue teardown has started. + * + * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues + * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work() + * cancels it, the queue must not keep that old receive state. + * Otherwise the next nvmet_tcp_io_work() run can reach + * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again. + * + * That is why queue->rcv_state needs to be updated before we return. + */ queue->rcv_state = NVMET_TCP_RECV_ERR; - if (status == -EPIPE || status == -ECONNRESET) + if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl) kernel_sock_shutdown(queue->sock, SHUT_RDWR); else - nvmet_tcp_fatal_error(queue); + nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); } static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) @@ -887,7 +887,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { pr_err("bad nvme-tcp pdu length (%d)\n", le32_to_cpu(icreq->hdr.plen)); - nvmet_tcp_fatal_error(queue); return -EPROTO; } @@ -922,16 +921,29 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) iov.iov_len = sizeof(*icresp); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (ret < 0) { + spin_lock_bh(&queue->state_lock); + if (queue->state == NVMET_TCP_Q_DISCONNECTING) { + spin_unlock_bh(&queue->state_lock); + return -ESHUTDOWN; + } queue->state = NVMET_TCP_Q_FAILED; + spin_unlock_bh(&queue->state_lock); return ret; /* queue removal will cleanup */ } + spin_lock_bh(&queue->state_lock); + if (queue->state == NVMET_TCP_Q_DISCONNECTING) { + spin_unlock_bh(&queue->state_lock); + /* Tell nvmet_tcp_socket_error() teardown is in progress. */ + return -ESHUTDOWN; + } queue->state = NVMET_TCP_Q_LIVE; + spin_unlock_bh(&queue->state_lock); nvmet_prepare_receive_pdu(queue); return 0; } -static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, +static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) { size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); @@ -947,19 +959,22 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, if (!nvme_is_write(cmd->req.cmd) || !data_len || data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); - return; + return 0; } ret = nvmet_tcp_map_data(cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); - nvmet_tcp_fatal_error(queue); - return; + return -EPROTO; } queue->rcv_state = NVMET_TCP_RECV_DATA; - nvmet_tcp_build_pdu_iovec(cmd); cmd->flags |= NVMET_TCP_F_INIT_FAILED; + ret = nvmet_tcp_build_pdu_iovec(cmd); + if (unlikely(ret)) + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); + + return ret; } static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) @@ -1011,7 +1026,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) goto err_proto; } cmd->pdu_recv = 0; - nvmet_tcp_build_pdu_iovec(cmd); + if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) { + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); + goto err_proto; + } queue->cmd = cmd; queue->rcv_state = NVMET_TCP_RECV_DATA; @@ -1019,7 +1037,6 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) err_proto: /* FIXME: use proper transport errors */ - nvmet_tcp_fatal_error(queue); return -EPROTO; } @@ -1034,7 +1051,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) if (hdr->type != nvme_tcp_icreq) { pr_err("unexpected pdu type (%d) before icreq\n", hdr->type); - nvmet_tcp_fatal_error(queue); return -EPROTO; } return nvmet_tcp_handle_icreq(queue); @@ -1043,7 +1059,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) if (unlikely(hdr->type == nvme_tcp_icreq)) { pr_err("queue %d: received icreq pdu in state %d\n", queue->idx, queue->state); - nvmet_tcp_fatal_error(queue); return -EPROTO; } @@ -1060,7 +1075,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", queue->idx, queue->nr_cmds, queue->send_list_len, nvme_cmd->common.opcode); - nvmet_tcp_fatal_error(queue); return -ENOMEM; } @@ -1074,17 +1088,16 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) le32_to_cpu(req->cmd->common.dptr.sgl.length), le16_to_cpu(req->cqe->status)); - nvmet_tcp_handle_req_failure(queue, queue->cmd, req); - return 0; + return nvmet_tcp_handle_req_failure(queue, queue->cmd, req); } ret = nvmet_tcp_map_data(queue->cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); if (nvmet_tcp_has_inline_data(queue->cmd)) - nvmet_tcp_fatal_error(queue); - else - nvmet_req_complete(req, ret); + return -EPROTO; + + nvmet_req_complete(req, ret); ret = -EAGAIN; goto out; } @@ -1092,8 +1105,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) if (nvmet_tcp_need_data_in(queue->cmd)) { if (nvmet_tcp_has_inline_data(queue->cmd)) { queue->rcv_state = NVMET_TCP_RECV_DATA; - nvmet_tcp_build_pdu_iovec(queue->cmd); - return 0; + ret = nvmet_tcp_build_pdu_iovec(queue->cmd); + if (unlikely(ret)) + pr_err("queue %d: failed to build PDU iovec\n", + queue->idx); + return ret; } /* send back R2T */ nvmet_tcp_queue_response(&queue->cmd->req); @@ -1204,7 +1220,6 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { pr_err("unexpected pdu type %d\n", hdr->type); - nvmet_tcp_fatal_error(queue); return -EIO; } @@ -1218,16 +1233,12 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) } if (queue->hdr_digest && - nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { - nvmet_tcp_fatal_error(queue); /* fatal */ + nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) return -EPROTO; - } if (queue->data_digest && - nvmet_tcp_check_ddgst(queue, &queue->pdu)) { - nvmet_tcp_fatal_error(queue); /* fatal */ + nvmet_tcp_check_ddgst(queue, &queue->pdu)) return -EPROTO; - } return nvmet_tcp_done_recv_pdu(queue); } @@ -1310,9 +1321,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) queue->idx, cmd->req.cmd->common.command_id, queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), le32_to_cpu(cmd->exp_ddgst)); - nvmet_req_uninit(&cmd->req); + if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED)) + nvmet_req_uninit(&cmd->req); nvmet_tcp_free_cmd_buffers(cmd); - nvmet_tcp_fatal_error(queue); ret = -EPROTO; goto out; } diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index aeaf73b54c3a3b..f00921931eb64d 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -69,7 +69,6 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) { u8 zasl = req->sq->ctrl->subsys->zasl; - struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl_zns *id; u16 status; @@ -79,10 +78,7 @@ void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) goto out; } - if (ctrl->ops->get_mdts) - id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); - else - id->zasl = zasl; + id->zasl = min_not_zero(nvmet_ctrl_mdts(req), zasl); status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index e8002526cfb05f..d71dac9436e3ea 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -1231,6 +1231,7 @@ config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY tristate "Raspberry Pi 7-inch touchscreen panel ATTINY regulator" depends on ARM || ARM64 || COMPILE_TEST depends on BACKLIGHT_CLASS_DEVICE + depends on GPIOLIB depends on I2C select REGMAP_I2C help diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 4d8f09910a4653..7416f941e5b6cf 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -85,14 +85,6 @@ config HMC_DRV transfer cache size from its default value 0.5MB to N bytes. If N is zero, then no caching is performed. -config SCLP_OFB - def_bool n - prompt "Support for Open-for-Business SCLP Event" - depends on S390 - help - This option enables the Open-for-Business interface to the s390 - Service Element. - config S390_UV_UAPI def_tristate m prompt "Ultravisor userspace API" diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 9cfbe3fc3dca7e..8c77e8c44fc23c 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -80,14 +80,11 @@ static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) static struct sclp_register sclp_conf_register = { -#ifdef CONFIG_SCLP_OFB .send_mask = EVTYP_CONFMGMDATA_MASK, -#endif .receive_mask = EVTYP_CONFMGMDATA_MASK, .receiver_fn = sclp_conf_receiver_fn, }; -#ifdef CONFIG_SCLP_OFB static int sclp_ofb_send_req(char *ev_data, size_t len) { static DEFINE_MUTEX(send_mutex); @@ -143,11 +140,9 @@ static const struct bin_attribute ofb_bin_attr = { }, .write = sysfs_ofb_data_write, }; -#endif static int __init sclp_ofb_setup(void) { -#ifdef CONFIG_SCLP_OFB struct kset *ofb_kset; int rc; @@ -159,7 +154,6 @@ static int __init sclp_ofb_setup(void) kset_unregister(ofb_kset); return rc; } -#endif return 0; } diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 7adb2573f50d42..c36c54ecd354b4 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -395,7 +395,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) switch (req_op(rq)) { case REQ_OP_WRITE: - if (!cd->writeable) + if (get_disk_ro(cd->disk)) goto out; SCpnt->cmnd[0] = WRITE_10; cd->cdi.media_written = 1; @@ -681,6 +681,7 @@ static int sr_probe(struct scsi_device *sdev) error = -ENOMEM; if (get_capabilities(cd)) goto fail_minor; + cdrom_probe_write_features(&cd->cdi); sr_vendor_init(cd); set_capacity(disk, cd->capacity); @@ -899,14 +900,6 @@ static int get_capabilities(struct scsi_cd *cd) /*else I don't think it can close its tray cd->cdi.mask |= CDC_CLOSE_TRAY; */ - /* - * if DVD-RAM, MRW-W or CD-RW, we are randomly writable - */ - if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != - (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { - cd->writeable = 1; - } - kfree(buffer); return 0; } diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index dc899277b3a441..2d92f9cb6fec7c 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -35,7 +35,6 @@ typedef struct scsi_cd { struct scsi_device *device; unsigned int vendor; /* vendor code, see sr_vendor.c */ unsigned long ms_offset; /* for reading multisession-CD's */ - unsigned writeable : 1; unsigned use:1; /* is this device still supportable */ unsigned xa_flag:1; /* CD has XA sectors ? */ unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c index 70b5867f89e19c..4fb4c1acd3bb90 100644 --- a/drivers/soundwire/generic_bandwidth_allocation.c +++ b/drivers/soundwire/generic_bandwidth_allocation.c @@ -301,39 +301,35 @@ static int sdw_add_element_group_count(struct sdw_group *group, int num = group->count; int i; - for (i = 0; i <= num; i++) { + for (i = 0; i < num; i++) { if (rate == group->rates[i] && lane == group->lanes[i]) - break; - - if (i != num) - continue; + return 0; + } - if (group->count >= group->max_size) { - unsigned int *rates; - unsigned int *lanes; + if (group->count >= group->max_size) { + unsigned int *rates; + unsigned int *lanes; - group->max_size += 1; - rates = krealloc(group->rates, - (sizeof(int) * group->max_size), - GFP_KERNEL); - if (!rates) - return -ENOMEM; + rates = krealloc_array(group->rates, group->max_size + 1, + sizeof(*group->rates), GFP_KERNEL); + if (!rates) + return -ENOMEM; - group->rates = rates; + group->rates = rates; - lanes = krealloc(group->lanes, - (sizeof(int) * group->max_size), - GFP_KERNEL); - if (!lanes) - return -ENOMEM; + lanes = krealloc_array(group->lanes, group->max_size + 1, + sizeof(*group->lanes), GFP_KERNEL); + if (!lanes) + return -ENOMEM; - group->lanes = lanes; - } + group->lanes = lanes; - group->rates[group->count] = rate; - group->lanes[group->count++] = lane; + group->max_size += 1; } + group->rates[group->count] = rate; + group->lanes[group->count++] = lane; + return 0; } diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 913e95207ee1a4..ee951be1546577 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -51,6 +51,8 @@ struct wake_capable_part { }; static struct wake_capable_part wake_capable_list[] = { + {0x01fa, 0x2A30}, + {0x01fa, 0x2A3B}, {0x01fa, 0x4243}, {0x01fa, 0x4245}, {0x01fa, 0x4249}, diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index ff763b692078db..e0c49cbcb1bab4 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -244,8 +244,8 @@ int sdw_of_find_slaves(struct sdw_bus *bus) struct sdw_slave_id id; const __be32 *addr; - compat = of_get_property(node, "compatible", NULL); - if (!compat) + ret = of_property_read_string(node, "compatible", &compat); + if (ret) continue; ret = sscanf(compat, "sdw%01x%04hx%04hx%02hhx", &sdw_version, diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index c2a6709dcb1196..cbf7bd3d4e7bac 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -2246,11 +2246,15 @@ EXPORT_SYMBOL(sdw_stream_add_slave); * @slave: SDW Slave instance * @stream: SoundWire stream * - * This removes and frees port_rt and slave_rt from a stream + * This removes and frees port_rt and slave_rt from a stream. + * If stream is NULL or an ERR_PTR, do nothing and return 0. */ int sdw_stream_remove_slave(struct sdw_slave *slave, struct sdw_stream_runtime *stream) { + if (IS_ERR_OR_NULL(stream)) + return 0; + mutex_lock(&slave->bus->bus_lock); sdw_slave_port_free(slave, stream); diff --git a/drivers/spi/spi-amlogic-spisg.c b/drivers/spi/spi-amlogic-spisg.c index 19c5eba412ef4c..f9de2d2c92132b 100644 --- a/drivers/spi/spi-amlogic-spisg.c +++ b/drivers/spi/spi-amlogic-spisg.c @@ -794,6 +794,7 @@ static int aml_spisg_probe(struct platform_device *pdev) dma_set_max_seg_size(&pdev->dev, SPISG_BLOCK_MAX); + init_completion(&spisg->completion); ret = devm_request_irq(&pdev->dev, irq, aml_spisg_irq, 0, NULL, spisg); if (ret) { dev_err(&pdev->dev, "irq request failed\n"); @@ -806,8 +807,6 @@ static int aml_spisg_probe(struct platform_device *pdev) goto out_clk; } - init_completion(&spisg->completion); - pm_runtime_put(&spisg->pdev->dev); return 0; diff --git a/drivers/spi/spi-axiado.c b/drivers/spi/spi-axiado.c index 9057a0a8df4a86..649f149617cead 100644 --- a/drivers/spi/spi-axiado.c +++ b/drivers/spi/spi-axiado.c @@ -201,7 +201,7 @@ static void ax_spi_fill_tx_fifo(struct ax_spi *xspi) * then spi control did't work thoroughly, add one byte delay */ if (ax_spi_read(xspi, AX_SPI_IVR) & AX_SPI_IVR_TFOV) - usleep_range(10, 10); + udelay(10); if (xspi->tx_buf) ax_spi_write_b(xspi, AX_SPI_TXFIFO, *xspi->tx_buf++); else diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 65aff2e7026584..057381e56a7fd5 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1860,14 +1860,10 @@ static int cqspi_probe(struct platform_device *pdev) if (irq < 0) return -ENXIO; - ret = pm_runtime_set_active(dev); - if (ret) - return ret; - ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks); if (ret) { dev_err(dev, "Cannot enable QSPI clocks.\n"); - goto disable_rpm; + return ret; } /* Obtain QSPI reset control */ @@ -1962,10 +1958,11 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->sclk = 0; if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { - pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); } host->num_chipselect = cqspi->num_chipselect; @@ -1977,7 +1974,7 @@ static int cqspi_probe(struct platform_device *pdev) ret = cqspi_request_mmap_dma(cqspi); if (ret == -EPROBE_DEFER) { dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n"); - goto disable_controller; + goto disable_rpm; } } @@ -1995,14 +1992,16 @@ static int cqspi_probe(struct platform_device *pdev) release_dma_chan: if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); -disable_controller: - cqspi_controller_enable(cqspi, 0); -disable_clks: - if (pm_runtime_get_sync(&pdev->dev) >= 0) - clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); disable_rpm: - if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) + if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); + pm_runtime_dont_use_autosuspend(dev); + } + cqspi_controller_enable(cqspi, 0); +disable_clks: + clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); return ret; } @@ -2026,18 +2025,19 @@ static void cqspi_remove(struct platform_device *pdev) if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - cqspi_controller_enable(cqspi, 0); - - if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) ret = pm_runtime_get_sync(&pdev->dev); - if (ret >= 0) + if (ret >= 0) { + cqspi_controller_enable(cqspi, 0); clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); + } if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); } } diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 08d7dabe818dc9..891e2ba3695849 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -741,7 +741,6 @@ static int cdns_spi_probe(struct platform_device *pdev) /* Set to default valid value */ ctlr->max_speed_hz = xspi->clk_rate / 4; xspi->speed_hz = ctlr->max_speed_hz; - pm_runtime_put_autosuspend(&pdev->dev); } else { ctlr->mode_bits |= SPI_NO_CS; ctlr->target_abort = cdns_target_abort; @@ -752,12 +751,17 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_all; } + if (!spi_controller_is_target(ctlr)) + pm_runtime_put_autosuspend(&pdev->dev); + return ret; clk_dis_all: if (!spi_controller_is_target(ctlr)) { pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); } remove_ctlr: spi_controller_put(ctlr); @@ -776,16 +780,23 @@ static void cdns_spi_remove(struct platform_device *pdev) { struct spi_controller *ctlr = platform_get_drvdata(pdev); struct cdns_spi *xspi = spi_controller_get_devdata(ctlr); + int ret = 0; + + if (!spi_controller_is_target(ctlr)) + ret = pm_runtime_get_sync(&pdev->dev); spi_controller_get(ctlr); spi_unregister_controller(ctlr); - cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); + if (ret >= 0) + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); if (!spi_controller_is_target(ctlr)) { pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); } spi_controller_put(ctlr); diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 14cd1b9d9793e2..231fbcf0e7aafd 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -98,7 +98,6 @@ #define CR0_FRF_MICROWIRE 0x2 #define CR0_XFM_OFFSET 18 -#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET) #define CR0_XFM_TR 0x0 #define CR0_XFM_TO 0x1 #define CR0_XFM_RO 0x2 @@ -109,8 +108,6 @@ #define CR0_SOI_OFFSET 23 -#define CR0_MTM_OFFSET 0x21 - /* Bit fields in SER, 2bit */ #define SER_MASK 0x3 @@ -357,7 +354,8 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); /* When int_cs_inactive comes, spi target abort */ - if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) { + if (rs->cs_inactive && + (readl_relaxed(rs->regs + ROCKCHIP_SPI_ISR) & INT_CS_INACTIVE)) { ctlr->target_abort(ctlr); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); diff --git a/drivers/spi/spi-rzv2h-rspi.c b/drivers/spi/spi-rzv2h-rspi.c index f45af588463851..1655efda7d2090 100644 --- a/drivers/spi/spi-rzv2h-rspi.c +++ b/drivers/spi/spi-rzv2h-rspi.c @@ -579,7 +579,7 @@ static u32 rzv2h_rspi_setup_clock(struct rzv2h_rspi_priv *rspi, u32 hz) rspi->info->find_pclk_rate(rspi->pclk, hz, &best_clock); if (!best_clock.clk_rate) - return -EINVAL; + return 0; ret = clk_set_rate(best_clock.clk, best_clock.clk_rate); if (ret) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 15ba592236e845..725a49a0eee72e 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -1620,6 +1620,12 @@ static void privcmd_close(struct vm_area_struct *vma) kvfree(pages); } +static int privcmd_may_split(struct vm_area_struct *area, unsigned long addr) +{ + /* Forbid splitting, avoids double free via privcmd_close(). */ + return -EINVAL; +} + static vm_fault_t privcmd_fault(struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", @@ -1631,6 +1637,7 @@ static vm_fault_t privcmd_fault(struct vm_fault *vmf) static const struct vm_operations_struct privcmd_vm_ops = { .close = privcmd_close, + .may_split = privcmd_may_split, .fault = privcmd_fault }; diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index b1bb01ba82f880..91923242a5ae71 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c @@ -366,6 +366,8 @@ static ssize_t buildid_show(struct hyp_sysfs_attr *attr, char *buffer) ret = sprintf(buffer, ""); return ret; } + if (ret > PAGE_SIZE) + return -ENOSPC; buildid = kmalloc(sizeof(*buildid) + ret, GFP_KERNEL); if (!buildid) @@ -373,8 +375,10 @@ static ssize_t buildid_show(struct hyp_sysfs_attr *attr, char *buffer) buildid->len = ret; ret = HYPERVISOR_xen_version(XENVER_build_id, buildid); - if (ret > 0) - ret = sprintf(buffer, "%s", buildid->buf); + if (ret > 0) { + /* Build id is binary, not a string. */ + memcpy(buffer, buildid->buf, ret); + } kfree(buildid); return ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ca3e4b99aec2bd..2275189b786055 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4641,7 +4641,8 @@ int try_release_extent_buffer(struct folio *folio) * to read the block we will not block on anything. */ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info, - u64 bytenr, u64 owner_root, u64 gen, int level) + u64 bytenr, u64 owner_root, u64 gen, int level, + const struct btrfs_key *first_key) { struct btrfs_tree_parent_check check = { .level = level, @@ -4650,6 +4651,11 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info, struct extent_buffer *eb; int ret; + if (first_key) { + memcpy(&check.first_key, first_key, sizeof(struct btrfs_key)); + check.has_first_key = true; + } + eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); if (IS_ERR(eb)) return; @@ -4677,9 +4683,13 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info, */ void btrfs_readahead_node_child(struct extent_buffer *node, int slot) { + struct btrfs_key node_key; + + btrfs_node_key_to_cpu(node, &node_key, slot); btrfs_readahead_tree_block(node->fs_info, btrfs_node_blockptr(node, slot), btrfs_header_owner(node), btrfs_node_ptr_generation(node, slot), - btrfs_header_level(node) - 1); + btrfs_header_level(node) - 1, + &node_key); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index fd209233317f40..b310a5145cf693 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -287,7 +287,8 @@ static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb) } void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info, - u64 bytenr, u64 owner_root, u64 gen, int level); + u64 bytenr, u64 owner_root, u64 gen, int level, + const struct btrfs_key *first_key); void btrfs_readahead_node_child(struct extent_buffer *node, int slot); /* Note: this can be used in for loops without caching the value in a variable. */ diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 9efd1ec90f031f..472b3060e5ac32 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -259,7 +259,11 @@ int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, nr++; path->slots[0]--; } else { - ASSERT(0); + btrfs_err(fs_info, "unexpected free space tree key type %u", + found_key.type); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + goto out; } } @@ -405,7 +409,11 @@ int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans, nr++; } else { - ASSERT(0); + btrfs_err(fs_info, "unexpected free space tree key type %u", + found_key.type); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + goto out; } } @@ -1518,7 +1526,11 @@ int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans, nr++; path->slots[0]--; } else { - ASSERT(0); + btrfs_err(trans->fs_info, "unexpected free space tree key type %u", + found_key.type); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + return ret; } } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 40474014c03f13..906d5c21ebc477 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1153,7 +1153,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, + EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV, PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); if (async_extent->cb) @@ -4959,6 +4959,8 @@ static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry) if (ret) goto out; + btrfs_record_unlink_dir(trans, dir, inode, false); + /* now the directory is empty */ ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name); if (!ret) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b2e447f5005c16..a39460bf68a778 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5102,7 +5102,6 @@ static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *a return 0; } -#ifdef CONFIG_BTRFS_EXPERIMENTAL static int btrfs_ioctl_shutdown(struct btrfs_fs_info *fs_info, unsigned long arg) { int ret = 0; @@ -5134,10 +5133,12 @@ static int btrfs_ioctl_shutdown(struct btrfs_fs_info *fs_info, unsigned long arg case BTRFS_SHUTDOWN_FLAGS_NOLOGFLUSH: btrfs_force_shutdown(fs_info); break; + default: + ret = -EINVAL; + break; } return ret; } -#endif long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -5294,10 +5295,8 @@ long btrfs_ioctl(struct file *file, unsigned int #endif case BTRFS_IOC_SUBVOL_SYNC_WAIT: return btrfs_ioctl_subvol_sync(fs_info, argp); -#ifdef CONFIG_BTRFS_EXPERIMENTAL case BTRFS_IOC_SHUTDOWN: return btrfs_ioctl_shutdown(fs_info, arg); -#endif } return -ENOTTY; diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c index 638c4ad572c996..4b0186c83ad1d8 100644 --- a/fs/btrfs/raid-stripe-tree.c +++ b/fs/btrfs/raid-stripe-tree.c @@ -45,8 +45,11 @@ static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans, for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) { struct btrfs_raid_stride *stride = &extent->strides[i]; + u64 devid; u64 phys; + devid = btrfs_raid_stride_devid(leaf, stride); + btrfs_set_stack_raid_stride_devid(&newitem->strides[i], devid); phys = btrfs_raid_stride_physical(leaf, stride) + frontpad; btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys); } @@ -95,14 +98,26 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le while (1) { key.objectid = start; key.type = BTRFS_RAID_STRIPE_KEY; - key.offset = 0; + key.offset = (u64)-1; ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1); if (ret < 0) break; - if (path->slots[0] == btrfs_header_nritems(path->nodes[0])) - path->slots[0]--; + /* + * Search with offset=(u64)-1 ensures we land on the correct + * leaf even when the target entry is the first item on a leaf. + * Since no real entry has offset=(u64)-1, ret is always 1 and + * slot points past the last entry with objectid==start (or + * past the end of the leaf if that entry is the last item). + * Back up one slot to find the actual entry. + */ + if (path->slots[0] == 0) { + /* No entry with objectid <= start exists. */ + ret = 0; + break; + } + path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; @@ -123,7 +138,7 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le */ if (found_start > start) { if (slot == 0) { - ret = btrfs_previous_item(stripe_root, path, start, + ret = btrfs_previous_item(stripe_root, path, 0, BTRFS_RAID_STRIPE_KEY); if (ret) { if (ret > 0) @@ -139,7 +154,10 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le btrfs_item_key_to_cpu(leaf, &key, slot); found_start = key.objectid; found_end = found_start + key.offset; - ASSERT(found_start <= start); + if (found_start > start || found_end <= start) { + ret = -ENOENT; + break; + } } if (key.type != BTRFS_RAID_STRIPE_KEY) @@ -176,9 +194,19 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le /* The "right" item. */ ret = btrfs_duplicate_item(trans, stripe_root, path, &newkey); + if (ret == -EAGAIN) { + btrfs_release_path(path); + continue; + } if (ret) break; + /* + * btrfs_duplicate_item() may have triggered a leaf + * split via setup_leaf_for_split(), so we must refresh + * our leaf pointer from the path. + */ + leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_stripe_extent); @@ -195,8 +223,9 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le /* The "left" item. */ path->slots[0]--; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_partially_delete_raid_extent(trans, path, &key, - diff_start, 0); + ret = btrfs_partially_delete_raid_extent(trans, path, + &key, + diff_start, 0); break; } @@ -212,8 +241,11 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le if (found_start < start) { u64 diff_start = start - found_start; - btrfs_partially_delete_raid_extent(trans, path, &key, - diff_start, 0); + ret = btrfs_partially_delete_raid_extent(trans, path, + &key, + diff_start, 0); + if (ret) + break; start += (key.offset - diff_start); length -= (key.offset - diff_start); @@ -236,9 +268,10 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le if (found_end > end) { u64 diff_end = found_end - end; - btrfs_partially_delete_raid_extent(trans, path, &key, - key.offset - length, - length); + ret = btrfs_partially_delete_raid_extent(trans, path, + &key, + key.offset - length, + length); ASSERT(key.offset - diff_end == length); break; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1c42c5180bddd5..3ebaf5880125fa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2607,7 +2607,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, if (!block->key_ready) btrfs_readahead_tree_block(fs_info, block->bytenr, block->owner, 0, - block->level); + block->level, NULL); } /* Get first keys */ @@ -3876,7 +3876,7 @@ static int add_remap_tree_entries(struct btrfs_trans_handle *trans, struct btrfs ret = btrfs_insert_empty_items(trans, fs_info->remap_root, path, &batch); btrfs_release_path(path); - if (num_entries <= max_items) + if (ret || num_entries <= max_items) break; num_entries -= max_items; @@ -4174,6 +4174,12 @@ static int move_existing_remap(struct btrfs_fs_info *fs_info, return ret; } + if (ins.offset < length) { + spin_lock(&sinfo->lock); + btrfs_space_info_update_bytes_may_use(sinfo, ins.offset - length); + spin_unlock(&sinfo->lock); + } + dest_addr = ins.objectid; dest_length = ins.offset; @@ -5000,6 +5006,12 @@ static int do_remap_reloc_trans(struct btrfs_fs_info *fs_info, return ret; } + if (ins.offset < remap_length) { + spin_lock(&sinfo->lock); + btrfs_space_info_update_bytes_may_use(sinfo, ins.offset - remap_length); + spin_unlock(&sinfo->lock); + } + made_reservation = true; new_addr = ins.objectid; @@ -5023,21 +5035,27 @@ static int do_remap_reloc_trans(struct btrfs_fs_info *fs_info, if (bg_needs_free_space) { ret = btrfs_add_block_group_free_space(trans, dest_bg); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto fail; + } } ret = copy_remapped_data(fs_info, start, new_addr, length); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto fail; + } ret = btrfs_remove_from_free_space_tree(trans, new_addr, length); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto fail; + } ret = add_remap_entry(trans, path, src_bg, start, new_addr, length); if (ret) { - btrfs_add_to_free_space_tree(trans, new_addr, length); + btrfs_abort_transaction(trans, ret); goto fail; } diff --git a/fs/isofs/export.c b/fs/isofs/export.c index 421d247fae5230..78f80c1a5c54a0 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -24,7 +24,7 @@ isofs_export_iget(struct super_block *sb, { struct inode *inode; - if (block == 0) + if (block == 0 || block >= ISOFS_SB(sb)->s_nzones) return ERR_PTR(-ESTALE); inode = isofs_iget(sb, block, offset); if (IS_ERR(inode)) diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 8dd3911717e0cc..3ace3d6a55e796 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -10,20 +10,13 @@ #include #include "isofs.h" -/* - * ok, we cannot use strncmp, as the name is not in our data space. - * Thus we'll have to use isofs_match. No big problem. Match also makes - * some sanity tests. - */ static int isofs_cmp(struct dentry *dentry, const char *compare, int dlen) { - struct qstr qstr; - qstr.name = compare; - qstr.len = dlen; if (likely(!dentry->d_op)) return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen); - return dentry->d_op->d_compare(NULL, dentry->d_name.len, dentry->d_name.name, &qstr); + return dentry->d_op->d_compare(NULL, dentry->d_name.len, dentry->d_name.name, + &QSTR_LEN(compare, dlen)); } /* diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index 6fe6dbd0c740f7..1232fab59a4e68 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -101,6 +101,15 @@ static int rock_continue(struct rock_state *rs) goto out; } + if ((unsigned)rs->cont_extent >= ISOFS_SB(rs->inode->i_sb)->s_nzones) { + printk(KERN_NOTICE "rock: corrupted directory entry. " + "extent=%u out of volume (nzones=%lu)\n", + (unsigned)rs->cont_extent, + ISOFS_SB(rs->inode->i_sb)->s_nzones); + ret = -EIO; + goto out; + } + if (rs->cont_extent) { struct buffer_head *bh; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index bfe884d624e7b2..38290b9c07f7b4 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -457,7 +457,7 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, /* * Unlike file_handle, type and len of struct fanotify_fh are u8. * Traditionally, filesystem return handle_type < 0xff, but there - * is no enforecement for that in vfs. + * is no enforcement for that in vfs. */ BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff); if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2) diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index b7198c4744e3a4..2dac70b99b0d7c 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -388,7 +388,7 @@ static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } -static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) +struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) { struct hlist_node *node = NULL; diff --git a/fs/notify/mark.c b/fs/notify/mark.c index c2ed5b11b0fe63..e256b420100dc8 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -238,7 +238,12 @@ static struct inode *fsnotify_update_iref(struct fsnotify_mark_connector *conn, return inode; } -static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) +/* + * Calculate mask of events for a list of marks. + * + * Return true if any of the attached marks want to hold an inode reference. + */ +static bool __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { u32 new_mask = 0; bool want_iref = false; @@ -262,6 +267,34 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) */ WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask); + return want_iref; +} + +/* + * Calculate mask of events for a list of marks after attach/modify mark + * and get an inode reference for the connector if needed. + * + * A concurrent add of evictable mark and detach of non-evictable mark can + * lead to __fsnotify_recalc_mask() returning false want_iref, but in this + * case we defer clearing iref to fsnotify_recalc_mask_clear_iref() called + * from fsnotify_put_mark(). + */ +static void fsnotify_recalc_mask_set_iref(struct fsnotify_mark_connector *conn) +{ + bool has_iref = conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF; + bool want_iref = __fsnotify_recalc_mask(conn) || has_iref; + + (void) fsnotify_update_iref(conn, want_iref); +} + +/* + * Calculate mask of events for a list of marks after detach mark + * and return the inode object if its reference is no longer needed. + */ +static void *fsnotify_recalc_mask_clear_iref(struct fsnotify_mark_connector *conn) +{ + bool want_iref = __fsnotify_recalc_mask(conn); + return fsnotify_update_iref(conn, want_iref); } @@ -298,7 +331,7 @@ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) spin_lock(&conn->lock); update_children = !fsnotify_conn_watches_children(conn); - __fsnotify_recalc_mask(conn); + fsnotify_recalc_mask_set_iref(conn); update_children &= fsnotify_conn_watches_children(conn); spin_unlock(&conn->lock); /* @@ -419,7 +452,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) /* Update watched objects after detaching mark */ if (sb) fsnotify_update_sb_watchers(sb, conn); - objp = __fsnotify_recalc_mask(conn); + objp = fsnotify_recalc_mask_clear_iref(conn); type = conn->type; } WRITE_ONCE(mark->connector, NULL); @@ -457,9 +490,6 @@ EXPORT_SYMBOL_GPL(fsnotify_put_mark); */ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) { - if (!mark) - return true; - if (refcount_inc_not_zero(&mark->refcnt)) { spin_lock(&mark->lock); if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { @@ -500,15 +530,22 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) int type; fsnotify_foreach_iter_type(type) { + struct fsnotify_mark *mark = iter_info->marks[type]; + /* This can fail if mark is being removed */ - if (!fsnotify_get_mark_safe(iter_info->marks[type])) { - __release(&fsnotify_mark_srcu); - goto fail; + while (mark && !fsnotify_get_mark_safe(mark)) { + if (mark->group == iter_info->current_group) { + __release(&fsnotify_mark_srcu); + goto fail; + } + /* This is a mark in an unrelated group, skip */ + mark = fsnotify_next_mark(mark); + iter_info->marks[type] = mark; } } /* - * Now that both marks are pinned by refcount in the inode / vfsmount + * Now that all marks are pinned by refcount in the inode / vfsmount / etc * lists, we can drop SRCU lock, and safely resume the list iteration * once userspace returns. */ diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 656d802333e357..b1436b3151b989 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c @@ -125,7 +125,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, struct address_space *mapping; struct folio *folio; u8 *kaddr; - int pos, len; + int pos, len, err; u8 bit; struct ntfs_inode *ni = NTFS_I(vi); struct ntfs_volume *vol = ni->vol; @@ -201,8 +201,10 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, /* If we are not in the last page, deal with all subsequent pages. */ while (index < end_index) { - if (cnt <= 0) + if (cnt <= 0) { + err = -EIO; goto rollback; + } /* Update @index and get the next folio. */ folio_mark_dirty(folio); @@ -214,6 +216,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, ntfs_error(vi->i_sb, "Failed to map subsequent page (error %li), aborting.", PTR_ERR(folio)); + err = PTR_ERR(folio); goto rollback; } @@ -265,7 +268,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * - @count - @cnt is the number of bits that have been modified */ if (is_rollback) - return PTR_ERR(folio); + return err; if (count != cnt) pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt, value ? 0 : 1, true); @@ -274,14 +277,14 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, if (!pos) { /* Rollback was successful. */ ntfs_error(vi->i_sb, - "Failed to map subsequent page (error %li), aborting.", - PTR_ERR(folio)); + "Failed to map subsequent page (error %i), aborting.", + err); } else { /* Rollback failed. */ ntfs_error(vi->i_sb, - "Failed to map subsequent page (error %li) and rollback failed (error %i). Aborting and leaving inconsistent metadata. Unmount and run chkdsk.", - PTR_ERR(folio), pos); + "Failed to map subsequent page (error %i) and rollback failed (error %i). Aborting and leaving inconsistent metadata. Unmount and run chkdsk.", + err, pos); NVolSetErrors(NTFS_SB(vi->i_sb)); } - return PTR_ERR(folio); + return err; } diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index bfa904d2ce6659..20f5c7074bdd12 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -911,8 +911,8 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (next->flags & INDEX_ENTRY_NODE) { next = ntfs_index_walk_down(next, ictx); - if (!next) { - err = -EIO; + if (IS_ERR(next)) { + err = PTR_ERR(next); goto out; } } @@ -920,7 +920,14 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (next && !(next->flags & INDEX_ENTRY_END)) goto nextdir; - while ((next = ntfs_index_next(next, ictx)) != NULL) { + while (1) { + next = ntfs_index_next(next, ictx); + if (IS_ERR(next)) { + err = PTR_ERR(next); + goto out; + } + if (!next) + break; nextdir: /* Check the consistency of an index entry */ if (ntfs_index_entry_inconsistent(ictx, vol, next, COLLATION_FILE_NAME, diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c index 2080f396913785..a547bdcfa4561f 100644 --- a/fs/ntfs/index.c +++ b/fs/ntfs/index.c @@ -1969,15 +1969,19 @@ int ntfs_index_remove(struct ntfs_inode *dir_ni, const void *key, const u32 keyl struct index_entry *ntfs_index_walk_down(struct index_entry *ie, struct ntfs_index_context *ictx) { struct index_entry *entry; + struct index_block *ib; s64 vcn; entry = ie; do { vcn = ntfs_ie_get_vcn(entry); if (ictx->is_in_root) { + ib = kvzalloc(ictx->block_size, GFP_NOFS); + if (!ib) + return ERR_PTR(-ENOMEM); /* down from level zero */ ictx->ir = NULL; - ictx->ib = kvzalloc(ictx->block_size, GFP_NOFS); + ictx->ib = ib; ictx->pindex = 1; ictx->is_in_root = false; } else { @@ -1991,8 +1995,8 @@ struct index_entry *ntfs_index_walk_down(struct index_entry *ie, struct ntfs_ind ictx->entry = ntfs_ie_get_first(&ictx->ib->index); entry = ictx->entry; } else - entry = NULL; - } while (entry && (entry->flags & INDEX_ENTRY_NODE)); + entry = ERR_PTR(-EIO); + } while (!IS_ERR(entry) && (entry->flags & INDEX_ENTRY_NODE)); return entry; } @@ -2097,10 +2101,15 @@ struct index_entry *ntfs_index_next(struct index_entry *ie, struct ntfs_index_co /* walk down if it has a subnode */ if (flags & INDEX_ENTRY_NODE) { - if (!ictx->ia_ni) + if (!ictx->ia_ni) { ictx->ia_ni = ntfs_ia_open(ictx, ictx->idx_ni); + if (!ictx->ia_ni) + return ERR_PTR(-EIO); + } next = ntfs_index_walk_down(next, ictx); + if (IS_ERR(next)) + return next; } else { /* walk up it has no subnode, nor data */ diff --git a/fs/ntfs/iomap.c b/fs/ntfs/iomap.c index 74a4d3e971f4d2..dc7d8c893a699f 100644 --- a/fs/ntfs/iomap.c +++ b/fs/ntfs/iomap.c @@ -788,8 +788,7 @@ static int ntfs_write_iomap_end_resident(struct inode *inode, loff_t pos, ctx = ntfs_attr_get_search_ctx(ni, NULL); if (!ctx) { written = -ENOMEM; - mutex_unlock(&ni->mrec_lock); - return written; + goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, @@ -810,7 +809,8 @@ static int ntfs_write_iomap_end_resident(struct inode *inode, loff_t pos, memcpy(kattr + pos, iomap_inline_data(iomap, pos), written); mark_mft_record_dirty(ctx->ntfs_ino); err_out: - ntfs_attr_put_search_ctx(ctx); + if (ctx) + ntfs_attr_put_search_ctx(ctx); put_page(ipage); mutex_unlock(&ni->mrec_lock); return written; diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c index 10894de519c392..96c450e62efcae 100644 --- a/fs/ntfs/namei.c +++ b/fs/ntfs/namei.c @@ -945,7 +945,8 @@ static int ntfs_delete(struct ntfs_inode *ni, struct ntfs_inode *dir_ni, ni_mrec = actx->base_mrec ? actx->base_mrec : actx->mrec; ni_mrec->link_count = cpu_to_le16(le16_to_cpu(ni_mrec->link_count) - 1); - drop_nlink(VFS_I(ni)); + if (!S_ISDIR(VFS_I(ni)->i_mode)) + drop_nlink(VFS_I(ni)); mark_mft_record_dirty(ni); if (looking_for_dos_name) { @@ -955,6 +956,13 @@ static int ntfs_delete(struct ntfs_inode *ni, struct ntfs_inode *dir_ni, goto search; } + /* + * For directories, Drop VFS nlink only when mft record link count + * becomes zero. Because we fixes VFS nlink to 1 for directories. + */ + if (S_ISDIR(VFS_I(ni)->i_mode) && !le16_to_cpu(ni_mrec->link_count)) + drop_nlink(VFS_I(ni)); + /* * If hard link count is not equal to zero then we are done. In other * case there are no reference to this inode left, so we should free all @@ -1221,7 +1229,8 @@ static int __ntfs_link(struct ntfs_inode *ni, struct ntfs_inode *dir_ni, } /* Increment hard links count. */ ni_mrec->link_count = cpu_to_le16(le16_to_cpu(ni_mrec->link_count) + 1); - inc_nlink(VFS_I(ni)); + if (!S_ISDIR(vi->i_mode)) + inc_nlink(VFS_I(ni)); /* Done! */ mark_mft_record_dirty(ni); diff --git a/fs/ntfs/reparse.c b/fs/ntfs/reparse.c index 8f60ec6f66c19e..74713716813f22 100644 --- a/fs/ntfs/reparse.c +++ b/fs/ntfs/reparse.c @@ -505,7 +505,6 @@ int ntfs_reparse_set_wsl_symlink(struct ntfs_inode *ni, struct reparse_point *reparse; struct wsl_link_reparse_data *data; - utarget = (char *)NULL; len = ntfs_ucstonls(ni->vol, target, target_len, &utarget, 0); if (len <= 0) return -EINVAL; @@ -514,7 +513,7 @@ int ntfs_reparse_set_wsl_symlink(struct ntfs_inode *ni, reparse = kvzalloc(reparse_len, GFP_NOFS); if (!reparse) { err = -ENOMEM; - kvfree(utarget); + kfree(utarget); } else { data = (struct wsl_link_reparse_data *)reparse->reparse_data; reparse->reparse_tag = IO_REPARSE_TAG_LX_SYMLINK; @@ -528,6 +527,8 @@ int ntfs_reparse_set_wsl_symlink(struct ntfs_inode *ni, kvfree(reparse); if (!err) ni->target = utarget; + else + kfree(utarget); } return err; } diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index b213b4976d2b6b..da21dbeaaf6679 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c @@ -15,6 +15,8 @@ * Copyright (c) 2007-2022 Jean-Pierre Andre */ +#include + #include "ntfs.h" #include "attrib.h" @@ -739,6 +741,7 @@ struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume * int rlsize; /* Size of runlist buffer. */ u16 rlpos; /* Current runlist position in units of struct runlist_elements. */ u8 b; /* Current byte offset in buf. */ + u64 lowest_vcn; /* Raw on-disk lowest_vcn. */ #ifdef DEBUG /* Make sure attr exists and is non-resident. */ @@ -747,8 +750,14 @@ struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume * return ERR_PTR(-EINVAL); } #endif + lowest_vcn = le64_to_cpu(attr->data.non_resident.lowest_vcn); + /* Validate lowest_vcn from on-disk metadata to ensure it is sane. */ + if (overflows_type(lowest_vcn, vcn)) { + ntfs_error(vol->sb, "Invalid lowest_vcn in mapping pairs."); + return ERR_PTR(-EIO); + } /* Start at vcn = lowest_vcn and lcn 0. */ - vcn = le64_to_cpu(attr->data.non_resident.lowest_vcn); + vcn = lowest_vcn; lcn = 0; /* Get start of the mapping pairs array. */ buf = (u8 *)attr + @@ -823,8 +832,17 @@ struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume * * element. */ rl[rlpos].length = deltaxcn; - /* Increment the current vcn by the current run length. */ - vcn += deltaxcn; + /* + * Increment the current vcn by the current run length. + * Guard against s64 overflow from a crafted mapping + * pairs array to preserve the monotonically-increasing + * vcn invariant. + */ + if (unlikely(check_add_overflow(vcn, deltaxcn, &vcn))) { + ntfs_error(vol->sb, "VCN overflow in mapping pairs array."); + goto err_out; + } + /* * There might be no lcn change at all, as is the case for * sparse clusters on NTFS 3.0+, in which case we set the lcn diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 4a25afda9448a4..79d891f7df1a50 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -89,7 +89,6 @@ int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid); char *smb3_fs_context_fullpath(const struct smb3_fs_context *ctx, char dirsep); int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx); -int smb3_parse_opt(const char *options, const char *key, char **val); int cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs); bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs); int cifs_discard_remaining_data(struct TCP_Server_Info *server); diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index b9544eb0381b73..b63ec7ab6e5131 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -536,37 +536,6 @@ cifs_parse_smb_version(struct fs_context *fc, char *value, struct smb3_fs_contex return 0; } -int smb3_parse_opt(const char *options, const char *key, char **val) -{ - int rc = -ENOENT; - char *opts, *orig, *p; - - orig = opts = kstrdup(options, GFP_KERNEL); - if (!opts) - return -ENOMEM; - - while ((p = strsep(&opts, ","))) { - char *nval; - - if (!*p) - continue; - if (strncasecmp(p, key, strlen(key))) - continue; - nval = strchr(p, '='); - if (nval) { - if (nval == p) - continue; - *nval++ = 0; - *val = kstrdup(nval, GFP_KERNEL); - rc = !*val ? -ENOMEM : 0; - goto out; - } - } -out: - kfree(orig); - return rc; -} - /* * Remove duplicate path delimiters. Windows is supposed to do that * but there are some bugs that prevent rename from working if there are diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index c6dd282fc3a901..286912616c7339 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -230,7 +230,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, num_rqst = 0; server = cifs_pick_channel(ses); - vars = kzalloc_obj(*vars, GFP_ATOMIC); + vars = kzalloc_obj(*vars, GFP_KERNEL); if (vars == NULL) { rc = -ENOMEM; goto out; diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 7f346ee5028964..e6cb9b144530d2 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -111,10 +111,21 @@ smb2_add_credits(struct TCP_Server_Info *server, cifs_trace_rw_credits_zero_in_flight); } server->in_flight--; + + /* + * Rebalance credits when an op drains in_flight. For session setup, + * do this only when the total accumulated credits are high enough (>2) + * so that a newly established secondary channel can reserve credits for + * echoes and oplocks. We expect this to happen at the end of the final + * session setup response. + */ if (server->in_flight == 0 && ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) && ((optype & CIFS_OP_MASK) != CIFS_SESS_OP)) rc = change_conf(server); + else if (server->in_flight == 0 && + ((optype & CIFS_OP_MASK) == CIFS_SESS_OP) && *val > 2) + rc = change_conf(server); /* * Sometimes server returns 0 credits on oplock break ack - we need to * rebalance credits in this case. diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index fbbc0529743f8b..c5aac4946cbe52 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -540,24 +540,54 @@ int ksmbd_conn_transport_init(void) static void stop_sessions(void) { - struct ksmbd_conn *conn; + struct ksmbd_conn *conn, *target; struct ksmbd_transport *t; + bool any; int bkt; + /* + * Serialised via init_lock; no concurrent stop_sessions() can + * touch conn->stop_called, so writing it under the read lock is + * safe. + */ again: + target = NULL; + any = false; down_read(&conn_list_lock); hash_for_each(conn_list, bkt, conn, hlist) { - t = conn->transport; - ksmbd_conn_set_exiting(conn); - if (t->ops->shutdown) { - up_read(&conn_list_lock); + any = true; + if (conn->stop_called) + continue; + atomic_inc(&conn->refcnt); + conn->stop_called = true; + /* + * Mark the connection EXITING while still holding the + * read lock so the selection and the status transition + * happen together. Do not regress a connection that has + * already advanced to RELEASING on its own (e.g. the + * handler exited its receive loop for an unrelated + * reason). + */ + if (READ_ONCE(conn->status) != KSMBD_SESS_RELEASING) + ksmbd_conn_set_exiting(conn); + target = conn; + break; + } + up_read(&conn_list_lock); + + if (target) { + t = target->transport; + if (t->ops->shutdown) t->ops->shutdown(t); - down_read(&conn_list_lock); + if (atomic_dec_and_test(&target->refcnt)) { + ida_destroy(&target->async_ida); + t->ops->free_transport(t); + kfree(target); } + goto again; } - up_read(&conn_list_lock); - if (!hash_empty(conn_list)) { + if (any) { msleep(100); goto again; } diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index ae21a1bd4c70ef..de2d46941c930b 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -49,6 +49,7 @@ struct ksmbd_conn { struct mutex srv_mutex; int status; unsigned int cli_cap; + bool stop_called; union { __be32 inet_addr; #if IS_ENABLED(CONFIG_IPV6) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 21825a69c29a7e..47b7af631f7b35 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -3946,7 +3946,13 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, goto free_conv_name; } - struct_sz = readdir_info_level_struct_sz(info_level) + conv_len; + struct_sz = readdir_info_level_struct_sz(info_level); + if (struct_sz == -EOPNOTSUPP) { + rc = -EINVAL; + goto free_conv_name; + } + + struct_sz += conv_len; next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT); d_info->last_entry_off_align = next_entry_offset - struct_sz; diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 0788593b6a1d8b..6928e378fbbdcb 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c @@ -230,8 +230,12 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, } /* Verify the descriptor CRC */ - if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize || - le16_to_cpu(tag_p->descCRC) == crc_itu_t(0, + if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize) { + udf_err(sb, "block %u: CRC length %u exceeds block size\n", + block, le16_to_cpu(tag_p->descCRCLength)); + goto error_out; + } + if (le16_to_cpu(tag_p->descCRC) == crc_itu_t(0, bh->b_data + sizeof(struct tag), le16_to_cpu(tag_p->descCRCLength))) return bh; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b701b5f972cb4c..c41d9a7565cf11 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -17,6 +17,8 @@ #include #include +struct notifier_block; + struct acpi_handle_list { u32 count; acpi_handle *handles; diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 4e15583e0d254c..f72e00517eb3d1 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1386,6 +1386,12 @@ enum acpi_einj_command_status { #define ACPI_EINJ_CXL_MEM_FATAL (1<<17) #define ACPI_EINJ_VENDOR_DEFINED (1<<31) +/* EINJV2 error types from EINJV2_GET_ERROR_TYPE (ACPI 6.6) */ + +#define ACPI_EINJV2_PROCESSOR (1) +#define ACPI_EINJV2_MEMORY (1<<1) +#define ACPI_EINJV2_PCIE (1<<2) + /******************************************************************************* * * ERST - Error Record Serialization Table (ACPI 4.0) diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index bf391903443d51..0c5e5ed7b5e740 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -273,6 +273,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper); bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper, const struct drm_gem_object *obj); +#else +static inline bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper, + const struct drm_gem_object *obj) +{ + return false; +} #endif #endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index b907e6c2307d85..260d7968cf7202 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -108,6 +108,7 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); +extern void cdrom_probe_write_features(struct cdrom_device_info *cdi); extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 166933b82e27a6..d1203da56fc5f7 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -322,13 +322,13 @@ struct dma_buf { * @vmapping_counter: * * Used internally to refcnt the vmaps returned by dma_buf_vmap(). - * Protected by @lock. + * Protected by @resv. */ unsigned vmapping_counter; /** * @vmap_ptr: - * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. + * The current vmap ptr if @vmapping_counter > 0. Protected by @resv. */ struct iosys_map vmap_ptr; diff --git a/include/linux/dpll.h b/include/linux/dpll.h index b7277a8b484d26..f8037f1ab20b60 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -286,6 +286,7 @@ int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin, int dpll_device_change_ntf(struct dpll_device *dpll); +int __dpll_pin_change_ntf(struct dpll_pin *pin); int dpll_pin_change_ntf(struct dpll_pin *pin); int register_dpll_notifier(struct notifier_block *nb); diff --git a/include/linux/firmware.h b/include/linux/firmware.h index aae1b85ffc10e2..0fa3b027f02f16 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -110,6 +110,9 @@ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)); +void request_firmware_nowait_cancel(struct device *device, void *context, + void (*cont)(const struct firmware *fw, + void *context)); int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device); int request_firmware_into_buf(const struct firmware **firmware_p, @@ -157,6 +160,13 @@ static inline int request_firmware_nowait( return -EINVAL; } +static inline void request_firmware_nowait_cancel(struct device *device, + void *context, + void (*cont)(const struct firmware *fw, + void *context)) +{ +} + static inline void release_firmware(const struct firmware *fw) { } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 95985400d3d8e2..e5cde39d6e85d6 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -915,6 +915,7 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); +struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index c6eea9afb943d5..e5997120f45cfd 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -45,6 +45,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg); int mbox_flush(struct mbox_chan *chan, unsigned long timeout); void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +unsigned int mbox_chan_tx_slots_available(struct mbox_chan *chan); /* atomic */ void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ #endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index 80a427c7ca299f..dc93287a2a01ae 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -3,6 +3,7 @@ #ifndef __MAILBOX_CONTROLLER_H #define __MAILBOX_CONTROLLER_H +#include #include #include #include @@ -11,6 +12,13 @@ struct mbox_chan; +/* Sentinel value distinguishing "no active request" from "NULL message data" */ +#define MBOX_NO_MSG ((void *)-1) + +#define MBOX_TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ +#define MBOX_TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ +#define MBOX_TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */ + /** * struct mbox_chan_ops - methods to control mailbox channels * @send_data: The API asks the MBOX controller driver, in atomic @@ -54,10 +62,10 @@ struct mbox_chan_ops { /** * struct mbox_controller - Controller of a class of communication channels - * @dev: Device backing this controller - * @ops: Operators that work on each communication chan - * @chans: Array of channels - * @num_chans: Number of channels in the 'chans' array. + * @dev: Device backing this controller. Required. + * @ops: Operators that work on each communication chan. Required. + * @chans: Array of channels. Required. + * @num_chans: Number of channels in the 'chans' array. Required. * @txdone_irq: Indicates if the controller can report to API when * the last transmitted data was read by the remote. * Eg, if it has some TX ACK irq. @@ -70,6 +78,7 @@ struct mbox_chan_ops { * @of_xlate: Controller driver specific mapping of channel via DT * @poll_hrt: API private. hrtimer used to poll for TXDONE on all * channels. + * @poll_hrt_lock: API private. Lock protecting access to poll_hrt. * @node: API private. To hook into list of controllers. */ struct mbox_controller { diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 0c464eade1d660..4a5631906affb5 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -4,7 +4,7 @@ /* * Maple Tree - An RCU-safe adaptive tree for storing ranges * Copyright (c) 2018-2022 Oracle - * Authors: Liam R. Howlett + * Authors: Liam R. Howlett * Matthew Wilcox */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b776907152e89..af23453e9dbd0b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4391,7 +4391,7 @@ static inline void mmap_action_map_kernel_pages_full(struct vm_area_desc *desc, int mmap_action_prepare(struct vm_area_desc *desc); int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action); + struct mmap_action *action, bool is_compat); /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index cf3374580f744d..5d75cc5b057eee 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -226,6 +226,7 @@ struct mr_table_ops { /** * struct mr_table - a multicast routing table + * @work: used for table destruction * @list: entry within a list of multicast routing tables * @net: net where this table belongs * @ops: protocol specific operations @@ -243,6 +244,7 @@ struct mr_table_ops { * @mroute_reg_vif_num: PIM-device vif index */ struct mr_table { + struct rcu_work work; struct list_head list; possible_net_t net; struct mr_table_ops ops; @@ -274,6 +276,7 @@ void vif_device_init(struct vif_device *v, unsigned short flags, unsigned short get_iflink_mask); +void mr_table_free(struct mr_table *mrt); struct mr_table * mr_table_alloc(struct net *net, u32 id, struct mr_table_ops *ops, diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 58abd306ebe394..782984ba3a209a 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -290,6 +290,12 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) +#define SPINAND_PAGE_READ_PACKED_8D_8D_0_OP(addr) \ + SPI_MEM_OP(SPI_MEM_DTR_OP_PACKED_CMD(0x13, addr >> 16, 8), \ + SPI_MEM_DTR_OP_ADDR(2, addr & 0xffff, 8), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + #define SPINAND_PAGE_READ_FROM_CACHE_8D_8D_8D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9d, 8), \ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \ @@ -483,6 +489,7 @@ struct spinand_ecc_info { #define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2) #define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3) #define SPINAND_NO_RAW_ACCESS BIT(4) +#define SPINAND_ODTR_PACKED_PAGE_READ BIT(5) /** * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h index 682f8104634597..d674d8ab26e6b3 100644 --- a/include/linux/nvme-auth.h +++ b/include/linux/nvme-auth.h @@ -49,9 +49,9 @@ int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len, int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid); int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, u8 *host_key, size_t host_key_len); -int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, - const u8 *ctrl_key, size_t ctrl_key_len, - u8 *sess_key, size_t sess_key_len); +int nvme_auth_gen_session_key(struct crypto_kpp *dh_tfm, + const u8 *public_key, size_t public_key_len, + u8 *sess_key, size_t sess_key_len, u8 hash_id); int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len, const u8 *c1, const u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len); diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h index 7b5b9e20fbf5e6..0d5eaf4b100ca3 100644 --- a/include/linux/platform_data/asoc-pxa.h +++ b/include/linux/platform_data/asoc-pxa.h @@ -6,27 +6,6 @@ #include #include -/* - * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) - * a -1 value means no gpio will be used for reset - * @codec_pdata: AC97 codec platform_data - - * reset_gpio should only be specified for pxa27x CPUs where a silicon - * bug prevents correct operation of the reset line. If not specified, - * the default behaviour on these CPUs is to consider gpio 113 as the - * AC97 reset line, which is the default on most boards. - */ -typedef struct { - int (*startup)(struct snd_pcm_substream *, void *); - void (*shutdown)(struct snd_pcm_substream *, void *); - void (*suspend)(void *); - void (*resume)(void *); - void *priv; - int reset_gpio; - void *codec_pdata[AC97_BUS_MAX_DEVICES]; -} pxa2xx_audio_ops_t; - -extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); -extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio); +extern void pxa27x_configure_ac97reset(struct gpio_desc *reset_gpio, bool to_gpio); #endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2abba7552605c5..e3bc442256922b 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -261,6 +261,35 @@ static inline void list_replace_rcu(struct list_head *old, old->prev = LIST_POISON2; } +static inline void __list_splice_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + last->next = next; + first->prev = prev; + next->prev = last; + rcu_assign_pointer(list_next_rcu(prev), first); +} + +/** + * list_splice_rcu - splice a non-RCU list into an RCU-protected list, + * designed for stacks. + * @list: the non RCU-protected list to splice + * @head: the place in the existing RCU-protected list to splice + * + * The list pointed to by @head can be RCU-read traversed concurrently with + * this function. + */ +static inline void list_splice_rcu(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice_rcu(list, head, head->next); +} + /** * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index 015c8298bebc45..fc2f596a6df1bf 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,7 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, * @head_offset: Offset of rhash_head in struct to be hashed * @max_size: Maximum size while expanding * @min_size: Minimum size while shrinking + * @insecure_elasticity: Set to true to disable chain length checks * @automatic_shrinking: Enable automatic shrinking of tables * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object @@ -61,6 +63,7 @@ struct rhashtable_params { u16 head_offset; unsigned int max_size; u16 min_size; + bool insecure_elasticity; bool automatic_shrinking; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; @@ -75,6 +78,7 @@ struct rhashtable_params { * @p: Configuration parameters * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously + * @run_irq_work: Bounces the @run_work kick through hard IRQ context. * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list * @nelems: Number of elements in table @@ -86,6 +90,7 @@ struct rhashtable { struct rhashtable_params p; bool rhlist; struct work_struct run_work; + struct irq_work run_irq_work; struct mutex mutex; spinlock_t lock; atomic_t nelems; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 0480509a633933..ef5230cece364b 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -821,14 +822,15 @@ static __always_inline void *__rhashtable_insert_fast( goto out; } - if (elasticity <= 0) + if (elasticity <= 0 && !params.insecure_elasticity) goto slow_path; data = ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_max(ht, tbl))) goto out_unlock; - if (unlikely(rht_grow_above_100(ht, tbl))) + if (unlikely(rht_grow_above_100(ht, tbl)) && + !params.insecure_elasticity) goto slow_path; /* Inserting at head of list makes unlocking free. */ @@ -846,7 +848,7 @@ static __always_inline void *__rhashtable_insert_fast( rht_assign_unlock(tbl, bkt, obj, flags); if (rht_grow_above_75(ht, tbl)) - schedule_work(&ht->run_work); + irq_work_queue(&ht->run_irq_work); data = NULL; out: diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 578e520b6ee6cd..763eea4d80d87b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -202,7 +202,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define TP_CONDITION(args...) args /* - * Individual subsystem my have a separate configuration to + * Individual subsystem may have a separate configuration to * enable their tracepoints. By default, this file will create * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem * wants to be able to disable its tracepoints from being created diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c92d4a976246d2..05572c19e14b7a 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -243,7 +243,7 @@ typedef struct port { churn_state_t sm_churn_actor_state; churn_state_t sm_churn_partner_state; struct slave *slave; /* pointer to the bond slave that this port belongs to */ - struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ + struct aggregator __rcu *aggregator; /* pointer to an aggregator that this port related to */ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ u32 transaction_id; /* continuous number for identification of Marker PDU's; */ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2c0173d9309c83..cff7b773e9721d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1204,12 +1204,15 @@ struct nft_stats { struct u64_stats_sync syncp; }; +#define NFT_HOOK_REMOVE (1 << 0) + struct nft_hook { struct list_head list; struct list_head ops_list; struct rcu_head rcu; char ifname[IFNAMSIZ]; u8 ifnamelen; + u8 flags; }; struct nf_hook_ops *nft_hook_find_ops(const struct nft_hook *hook, @@ -1664,6 +1667,16 @@ struct nft_trans { u8 put_net:1; }; +/** + * struct nft_trans_hook - nf_tables hook update in transaction + * @list: used internally + * @hook: struct nft_hook with the device hook + */ +struct nft_trans_hook { + struct list_head list; + struct nft_hook *hook; +}; + /** * struct nft_trans_binding - nf_tables object with binding support in transaction * @nft_trans: base structure, MUST be first member diff --git a/include/net/netmem.h b/include/net/netmem.h index 507b74c9f52d72..78fe51e5756b10 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -127,6 +127,21 @@ static inline unsigned int net_iov_idx(const struct net_iov *niov) return niov - net_iov_owner(niov)->niovs; } +/* Initialize a niov: stamp the owning area, the memory provider type, + * and the page_type "no type" sentinel expected by the page-type API + * (see PAGE_TYPE_OPS in ) so that + * page_pool_set_pp_info() can later call __SetPageNetpp() on a niov + * cast to struct page. + */ +static inline void net_iov_init(struct net_iov *niov, + struct net_iov_area *owner, + enum net_iov_type type) +{ + niov->owner = owner; + niov->type = type; + niov->page_type = UINT_MAX; +} + /* netmem */ /** diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h index 882b849b9255e2..69b404c354f501 100644 --- a/include/sound/ac97/codec.h +++ b/include/sound/ac97/codec.h @@ -108,6 +108,9 @@ static inline void ac97_set_drvdata(struct ac97_codec_device *adev, dev_set_drvdata(ac97_codec_dev2dev(adev), data); } -void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev); +static inline void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev) +{ + return NULL; +} #endif diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h index 06b5afb7fa6b9f..d5895ea3922bde 100644 --- a/include/sound/ac97/controller.h +++ b/include/sound/ac97/controller.h @@ -62,14 +62,13 @@ struct ac97_controller_ops { #if IS_ENABLED(CONFIG_AC97_BUS_NEW) struct ac97_controller *snd_ac97_controller_register( const struct ac97_controller_ops *ops, struct device *dev, - unsigned short slots_available, void **codecs_pdata); + unsigned short slots_available); void snd_ac97_controller_unregister(struct ac97_controller *ac97_ctrl); #else static inline struct ac97_controller * snd_ac97_controller_register(const struct ac97_controller_ops *ops, struct device *dev, - unsigned short slots_available, - void **codecs_pdata) + unsigned short slots_available) { return ERR_PTR(-ENODEV); } diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h index 0a6f8dabf8c4ca..2d86f62f94082a 100644 --- a/include/sound/pxa2xx-lib.h +++ b/include/sound/pxa2xx-lib.h @@ -2,55 +2,7 @@ #ifndef PXA2XX_LIB_H #define PXA2XX_LIB_H -#include -#include - -/* PCM */ -struct snd_pcm_substream; -struct snd_pcm_hw_params; -struct snd_soc_pcm_runtime; -struct snd_pcm; -struct snd_soc_component; - -extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params); -extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd); -extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream); -extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream); -extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream); -extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream); -extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm); -extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component, - struct snd_soc_pcm_runtime *rtd); -extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component, - struct snd_pcm_substream *substream); -extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component, - struct snd_pcm_substream *substream); -extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params); -extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component, - struct snd_pcm_substream *substream); -extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component, - struct snd_pcm_substream *substream, int cmd); -extern snd_pcm_uframes_t -pxa2xx_soc_pcm_pointer(struct snd_soc_component *component, - struct snd_pcm_substream *substream); - -/* AC97 */ - -extern int pxa2xx_ac97_read(int slot, unsigned short reg); -extern int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val); - -extern bool pxa2xx_ac97_try_warm_reset(void); -extern bool pxa2xx_ac97_try_cold_reset(void); -extern void pxa2xx_ac97_finish_reset(void); - -extern int pxa2xx_ac97_hw_suspend(void); -extern int pxa2xx_ac97_hw_resume(void); - -extern int pxa2xx_ac97_hw_probe(struct platform_device *dev); -extern void pxa2xx_ac97_hw_remove(struct platform_device *dev); +#include /* modem registers, used by touchscreen driver */ u32 pxa2xx_ac97_read_modr(void); diff --git a/include/sound/sdca.h b/include/sound/sdca.h index 67ff3c88705d5e..2bdf4e333e0449 100644 --- a/include/sound/sdca.h +++ b/include/sound/sdca.h @@ -26,6 +26,8 @@ struct sdca_dev; * @name: Human-readable string. * @type: Function topology type. * @adr: ACPI address (used for SDCA register access). + * @duplicate: Internal flag to indicate if other functions of the same type + * exist. */ struct sdca_function_desc { struct fwnode_handle *node; @@ -33,6 +35,8 @@ struct sdca_function_desc { const char *name; u32 type; u8 adr; + + bool duplicate; }; /** diff --git a/include/sound/sdca_asoc.h b/include/sound/sdca_asoc.h index 46a61a52decc5b..ca35d5a44370b9 100644 --- a/include/sound/sdca_asoc.h +++ b/include/sound/sdca_asoc.h @@ -13,6 +13,7 @@ struct device; struct regmap; struct sdca_function_data; +struct sdca_pde_delay; struct snd_ctl_elem_value; struct snd_kcontrol; struct snd_kcontrol_new; @@ -99,4 +100,9 @@ int sdca_asoc_q78_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int sdca_asoc_q78_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); +int sdca_asoc_pde_poll_actual_ps(struct device *dev, struct regmap *regmap, + int function_id, int entity_id, + int from_ps, int to_ps, + const struct sdca_pde_delay *pde_delays, + int num_delays); #endif // __SDCA_ASOC_H__ diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h index 0e871c786513f6..b1489178b0ef89 100644 --- a/include/sound/sdca_function.h +++ b/include/sound/sdca_function.h @@ -1452,7 +1452,6 @@ static inline u32 sdca_range_search(struct sdca_control_range *range, } int sdca_parse_function(struct device *dev, struct sdw_slave *sdw, - struct sdca_function_desc *desc, struct sdca_function_data *function); const char *sdca_find_terminal_name(enum sdca_terminal_type type); diff --git a/include/sound/sdca_jack.h b/include/sound/sdca_jack.h index 3ec22046d3ebc2..181541f0f4d8c4 100644 --- a/include/sound/sdca_jack.h +++ b/include/sound/sdca_jack.h @@ -18,10 +18,13 @@ struct snd_soc_jack; * struct jack_state - Jack state structure to keep data between interrupts * @kctl: Pointer to the ALSA control attached to this jack * @jack: Pointer to the ASoC jack struct for this jack + * @mask: Possible reported jack status bits for this jack */ struct jack_state { struct snd_kcontrol *kctl; struct snd_soc_jack *jack; + + unsigned int mask; }; int sdca_jack_alloc_state(struct sdca_interrupt *interrupt); diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h index 30f508a5676610..ac749bb2b836dd 100644 --- a/include/sound/snd_wavefront.h +++ b/include/sound/snd_wavefront.h @@ -12,6 +12,7 @@ struct _snd_wavefront_midi; struct _snd_wavefront_card; struct _snd_wavefront; +struct snd_wss; typedef struct _snd_wavefront_midi snd_wavefront_midi_t; typedef struct _snd_wavefront_card snd_wavefront_card_t; @@ -46,6 +47,8 @@ extern void snd_wavefront_midi_enable_virtual (snd_wavefront_card_t *); extern void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *); extern void snd_wavefront_midi_interrupt (snd_wavefront_card_t *); extern int snd_wavefront_midi_start (snd_wavefront_card_t *); +void snd_wavefront_midi_suspend(snd_wavefront_card_t *card); +void snd_wavefront_midi_resume(snd_wavefront_card_t *card); struct _snd_wavefront { unsigned long irq; /* "you were one, one of the few ..." */ @@ -93,6 +96,7 @@ struct _snd_wavefront { int samples_used; /* how many */ char interrupts_are_midi; /* h/w MPU interrupts enabled ? */ char rom_samples_rdonly; /* can we write on ROM samples */ + char midi_in_to_synth; /* route external MIDI to synth */ spinlock_t irq_lock; wait_queue_head_t interrupt_sleeper; snd_wavefront_midi_t midi; /* ICS2115 MIDI interface */ @@ -101,6 +105,7 @@ struct _snd_wavefront { struct _snd_wavefront_card { snd_wavefront_t wavefront; + struct snd_wss *chip; #ifdef CONFIG_PNP struct pnp_dev *wss; struct pnp_dev *ctrl; @@ -110,8 +115,10 @@ struct _snd_wavefront_card { }; extern void snd_wavefront_internal_interrupt (snd_wavefront_card_t *card); +void snd_wavefront_cache_firmware(snd_wavefront_t *dev); extern int snd_wavefront_start (snd_wavefront_t *dev); extern int snd_wavefront_detect (snd_wavefront_card_t *card); +int snd_wavefront_resume_synth(snd_wavefront_card_t *card); extern int snd_wavefront_cmd (snd_wavefront_t *, int, unsigned char *, unsigned char *); diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h index 48908318367348..d713ab2f66203f 100644 --- a/include/sound/soc_sdw_utils.h +++ b/include/sound/soc_sdw_utils.h @@ -272,7 +272,11 @@ int asoc_sdw_ti_amp_init(struct snd_soc_card *card, struct asoc_sdw_codec_info *info, bool playback); int asoc_sdw_ti_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); +int asoc_sdw_ti_tac5xx2_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_dai *dai); int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card, const char *name_prefix); +int asoc_sdw_ti_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); +int asoc_sdw_ti_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); #endif diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 17ac1b7854405f..909fb7aea638eb 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -905,7 +905,8 @@ struct io_uring_buf_reg { __u32 ring_entries; __u16 bgid; __u16 flags; - __u64 resv[3]; + __u32 min_left; + __u32 resv[5]; }; /* argument for IORING_REGISTER_PBUF_STATUS */ diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 8da2ff79817081..63061aa1cab945 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -47,7 +47,7 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len) this_len = min_t(u32, len, buf_len); buf_len -= this_len; /* Stop looping for invalid buffer length of 0 */ - if (buf_len || !this_len) { + if (buf_len > bl->min_left_sub_one || !this_len) { WRITE_ONCE(buf->addr, READ_ONCE(buf->addr) + this_len); WRITE_ONCE(buf->len, buf_len); return false; @@ -637,6 +637,10 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (reg.ring_entries >= 65536) return -EINVAL; + /* minimum left byte count is a property of incremental buffers */ + if (!(reg.flags & IOU_PBUF_RING_INC) && reg.min_left) + return -EINVAL; + bl = io_buffer_get_list(ctx, reg.bgid); if (bl) { /* if mapped buffer ring OR classic exists, don't allow */ @@ -680,10 +684,11 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) } #endif - bl->nr_entries = reg.ring_entries; bl->mask = reg.ring_entries - 1; bl->flags |= IOBL_BUF_RING; bl->buf_ring = br; + if (reg.min_left) + bl->min_left_sub_one = reg.min_left - 1; if (reg.flags & IOU_PBUF_RING_INC) bl->flags |= IOBL_INC; ret = io_buffer_add_list(ctx, bl, reg.bgid); diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index bf15e26520d385..401773e1ef805e 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -27,12 +27,18 @@ struct io_buffer_list { __u16 bgid; /* below is for ring provided buffers */ - __u16 nr_entries; __u16 head; __u16 mask; __u16 flags; + /* + * minimum required amount to be left to reuse an incrementally + * consumed buffer. If less than this is left at consumption time, + * buffer is done and head is incremented to the next buffer. + */ + __u32 min_left_sub_one; + struct io_mapped_region region; }; diff --git a/io_uring/napi.c b/io_uring/napi.c index 4a10de03e42693..8d68366a4b9039 100644 --- a/io_uring/napi.c +++ b/io_uring/napi.c @@ -276,6 +276,8 @@ static int io_napi_register_napi(struct io_ring_ctx *ctx, /* clean the napi list for new settings */ io_napi_free(ctx); WRITE_ONCE(ctx->napi_track_mode, napi->op_param); + /* cap NAPI at 10 msec of spin time */ + napi->busy_poll_to = min(10000, napi->busy_poll_to); WRITE_ONCE(ctx->napi_busy_poll_dt, napi->busy_poll_to * NSEC_PER_USEC); WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi->prefer_busy_poll); return 0; diff --git a/io_uring/tw.c b/io_uring/tw.c index fdff81eebc95cb..023d5e6bc491a6 100644 --- a/io_uring/tw.c +++ b/io_uring/tw.c @@ -273,8 +273,18 @@ void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags) void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) { - struct llist_node *node = llist_del_all(&ctx->work_llist); + struct llist_node *node; + /* + * Running the work items may utilize ->retry_llist as a means + * for capping the number of task_work entries run at the same + * time. But that list can potentially race with moving the work + * from here, if the task is exiting. As any normal task_work + * running holds ->uring_lock already, just guard this slow path + * with ->uring_lock to avoid racing on ->retry_llist. + */ + guard(mutex)(&ctx->uring_lock); + node = llist_del_all(&ctx->work_llist); __io_fallback_tw(node, false); node = llist_del_all(&ctx->retry_llist); __io_fallback_tw(node, false); diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 7b93c87b8371b4..19837e0b5e9189 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -495,10 +495,9 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, for (i = 0; i < nr_iovs; i++) { struct net_iov *niov = &area->nia.niovs[i]; - niov->owner = &area->nia; + net_iov_init(niov, &area->nia, NET_IOV_IOURING); area->freelist[i] = i; atomic_set(&area->user_refs[i], 0); - niov->type = NET_IOV_IOURING; } if (ifq->dev) { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 43adc96c7f1afb..45c0b1ed687acb 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3934,33 +3934,41 @@ static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) static ssize_t pressure_write(struct kernfs_open_file *of, char *buf, size_t nbytes, enum psi_res res) { - struct cgroup_file_ctx *ctx = of->priv; + struct cgroup_file_ctx *ctx; struct psi_trigger *new; struct cgroup *cgrp; struct psi_group *psi; + ssize_t ret = 0; cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; - cgroup_get(cgrp); - cgroup_kn_unlock(of->kn); + ctx = of->priv; + if (!ctx) { + ret = -ENODEV; + goto out_unlock; + } /* Allow only one trigger per file descriptor */ if (ctx->psi.trigger) { - cgroup_put(cgrp); - return -EBUSY; + ret = -EBUSY; + goto out_unlock; } psi = cgroup_psi(cgrp); new = psi_trigger_create(psi, buf, res, of->file, of); if (IS_ERR(new)) { - cgroup_put(cgrp); - return PTR_ERR(new); + ret = PTR_ERR(new); + goto out_unlock; } smp_store_release(&ctx->psi.trigger, new); - cgroup_put(cgrp); + +out_unlock: + cgroup_kn_unlock(of->kn); + if (ret) + return ret; return nbytes; } @@ -5716,16 +5724,6 @@ static void offline_css(struct cgroup_subsys_state *css) RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); wake_up_all(&css->cgroup->offline_waitq); - - css->cgroup->nr_dying_subsys[ss->id]++; - /* - * Parent css and cgroup cannot be freed until after the freeing - * of child css, see css_free_rwork_fn(). - */ - while ((css = css->parent)) { - css->nr_descendants--; - css->cgroup->nr_dying_subsys[ss->id]++; - } } /** @@ -6038,6 +6036,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref) */ static void kill_css(struct cgroup_subsys_state *css) { + struct cgroup_subsys *ss = css->ss; + lockdep_assert_held(&cgroup_mutex); if (css->flags & CSS_DYING) @@ -6074,6 +6074,16 @@ static void kill_css(struct cgroup_subsys_state *css) * css is confirmed to be seen as killed on all CPUs. */ percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn); + + css->cgroup->nr_dying_subsys[ss->id]++; + /* + * Parent css and cgroup cannot be freed until after the freeing + * of child css, see css_free_rwork_fn(). + */ + while ((css = css->parent)) { + css->nr_descendants--; + css->cgroup->nr_dying_subsys[ss->id]++; + } } /** diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h index fd7d19842ded7d..bb4e692bea300c 100644 --- a/kernel/cgroup/cpuset-internal.h +++ b/kernel/cgroup/cpuset-internal.h @@ -168,6 +168,11 @@ struct cpuset { int nr_deadline_tasks; int nr_migrate_dl_tasks; u64 sum_migrate_dl_bw; + /* + * CPU used for temporary DL bandwidth allocation during attach; + * -1 if no DL bandwidth was allocated in the current attach. + */ + int dl_bw_cpu; /* Invalid partition error code, not lock protected */ enum prs_errcode prs_err; diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1335e437098e80..e3a081a07c6d51 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -288,6 +288,7 @@ struct cpuset top_cpuset = { .flags = BIT(CS_CPU_EXCLUSIVE) | BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE), .partition_root_state = PRS_ROOT, + .dl_bw_cpu = -1, }; /** @@ -579,6 +580,8 @@ static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs) if (!trial) return NULL; + trial->dl_bw_cpu = -1; + /* Setup cpumask pointer array */ cpumask_var_t *pmask[4] = { &trial->cpus_allowed, @@ -2980,6 +2983,7 @@ static void reset_migrate_dl_data(struct cpuset *cs) { cs->nr_migrate_dl_tasks = 0; cs->sum_migrate_dl_bw = 0; + cs->dl_bw_cpu = -1; } /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ @@ -3056,6 +3060,8 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) reset_migrate_dl_data(cs); goto out_unlock; } + + cs->dl_bw_cpu = cpu; } out_success: @@ -3080,12 +3086,11 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset) mutex_lock(&cpuset_mutex); dec_attach_in_progress_locked(cs); - if (cs->nr_migrate_dl_tasks) { - int cpu = cpumask_any(cs->effective_cpus); + if (cs->dl_bw_cpu >= 0) + dl_bw_free(cs->dl_bw_cpu, cs->sum_migrate_dl_bw); - dl_bw_free(cpu, cs->sum_migrate_dl_bw); + if (cs->nr_migrate_dl_tasks) reset_migrate_dl_data(cs); - } mutex_unlock(&cpuset_mutex); } diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c index 9967fb25c56343..4fdab4cf49e0f2 100644 --- a/kernel/cgroup/rdma.c +++ b/kernel/cgroup/rdma.c @@ -283,7 +283,7 @@ int rdmacg_try_charge(struct rdma_cgroup **rdmacg, ret = PTR_ERR(rpool); goto err; } else { - new = rpool->resources[index].usage + 1; + new = (s64)rpool->resources[index].usage + 1; if (new > rpool->resources[index].max) { ret = -EAGAIN; goto err; diff --git a/kernel/fork.c b/kernel/fork.c index f1ad69c6dc2d4e..5f3fdfdb14c7c7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1951,9 +1951,11 @@ static void rv_task_fork(struct task_struct *p) static bool need_futex_hash_allocate_default(u64 clone_flags) { - if ((clone_flags & (CLONE_THREAD | CLONE_VM)) != (CLONE_THREAD | CLONE_VM)) - return false; - return true; + /* + * Allocate a default futex hash for any sibling that will + * share the parent's mm, except vfork. + */ + return (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM; } /* @@ -2380,10 +2382,6 @@ __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cancel_cgroup; - /* - * Allocate a default futex hash for the user process once the first - * thread spawns. - */ if (need_futex_hash_allocate_default(clone_flags)) { retval = futex_hash_allocate_default(); if (retval) diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c index d818b4d47f1bad..b597cb3d17fc11 100644 --- a/kernel/futex/requeue.c +++ b/kernel/futex/requeue.c @@ -319,8 +319,11 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, return -EINVAL; /* Ensure that this does not race against an early wakeup */ - if (!futex_requeue_pi_prepare(top_waiter, NULL)) + if (!futex_requeue_pi_prepare(top_waiter, NULL)) { + plist_del(&top_waiter->list, &hb1->chain); + futex_hb_waiters_dec(hb1); return -EAGAIN; + } /* * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit @@ -722,10 +725,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, /* * We were woken prior to requeue by a timeout or a signal. - * Unqueue the futex_q and determine which it was. + * Conditionally unqueue the futex_q and determine which it was. */ - plist_del(&q->list, &hb->chain); - futex_hb_waiters_dec(hb); + if (!plist_node_empty(&q->list)) { + plist_del(&q->list, &hb->chain); + futex_hb_waiters_dec(hb); + } /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c index 94762de1fe5f03..18509d8082ea75 100644 --- a/kernel/liveupdate/kexec_handover.c +++ b/kernel/liveupdate/kexec_handover.c @@ -762,19 +762,24 @@ int kho_add_subtree(const char *name, void *blob, size_t size) goto out_pack; } - err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME, - &phys, sizeof(phys)); - if (err < 0) - goto out_pack; + fdt_err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME, + &phys, sizeof(phys)); + if (fdt_err < 0) + goto out_del_node; - err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_SIZE_PROP_NAME, - &size_u64, sizeof(size_u64)); - if (err < 0) - goto out_pack; + fdt_err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_SIZE_PROP_NAME, + &size_u64, sizeof(size_u64)); + if (fdt_err < 0) + goto out_del_node; WARN_ON_ONCE(kho_debugfs_blob_add(&kho_out.dbg, name, blob, size, false)); + err = 0; + goto out_pack; + +out_del_node: + fdt_del_node(root_fdt, off); out_pack: fdt_pack(root_fdt); diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c index a3327a28fc1f72..7a42385dabe279 100644 --- a/kernel/liveupdate/luo_session.c +++ b/kernel/liveupdate/luo_session.c @@ -514,11 +514,12 @@ int luo_session_deserialize(void) { struct luo_session_header *sh = &luo_session_global.incoming; static bool is_deserialized; - static int err; + static int saved_err; + int err; /* If has been deserialized, always return the same error code */ if (is_deserialized) - return err; + return saved_err; is_deserialized = true; if (!sh->active) @@ -547,7 +548,8 @@ int luo_session_deserialize(void) pr_warn("Failed to allocate session [%.*s] during deserialization %pe\n", (int)sizeof(sh->ser[i].name), sh->ser[i].name, session); - return PTR_ERR(session); + err = PTR_ERR(session); + goto save_err; } err = luo_session_insert(sh, session); @@ -555,7 +557,7 @@ int luo_session_deserialize(void) pr_warn("Failed to insert session [%s] %pe\n", session->name, ERR_PTR(err)); luo_session_free(session); - return err; + goto save_err; } scoped_guard(mutex, &session->mutex) { @@ -565,7 +567,7 @@ int luo_session_deserialize(void) if (err) { pr_warn("Failed to deserialize files for session [%s] %pe\n", session->name, ERR_PTR(err)); - return err; + goto save_err; } } @@ -574,6 +576,9 @@ int luo_session_deserialize(void) sh->ser = NULL; return 0; +save_err: + saved_err = err; + return err; } int luo_session_serialize(void) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index da20fb6ea25ae1..b8871449d3c694 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4458,6 +4458,7 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p) p->se.nr_migrations = 0; p->se.vruntime = 0; p->se.vlag = 0; + p->se.rel_deadline = 0; INIT_LIST_HEAD(&p->se.group_node); /* A delayed task cannot be in clone(). */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index e426e27b679449..345aa11b84b28e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -32,6 +32,7 @@ static const struct rhashtable_params scx_sched_hash_params = { .key_len = sizeof_field(struct scx_sched, ops.sub_cgroup_id), .key_offset = offsetof(struct scx_sched, ops.sub_cgroup_id), .head_offset = offsetof(struct scx_sched, hash_node), + .insecure_elasticity = true, /* inserted under scx_sched_lock */ }; static struct rhashtable scx_sched_hash; @@ -52,8 +53,6 @@ DEFINE_STATIC_KEY_FALSE(__scx_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); static DEFINE_RAW_SPINLOCK(scx_bypass_lock); -static cpumask_var_t scx_bypass_lb_donee_cpumask; -static cpumask_var_t scx_bypass_lb_resched_cpumask; static bool scx_init_task_enabled; static bool scx_switching_all; DEFINE_STATIC_KEY_FALSE(__scx_switched_all); @@ -469,24 +468,35 @@ static inline void update_locked_rq(struct rq *rq) __this_cpu_write(scx_locked_rq_state, rq); } -#define SCX_CALL_OP(sch, op, rq, args...) \ +/* + * SCX ops can recurse via scx_bpf_sub_dispatch() - the inner call must not + * clobber the outer's scx_locked_rq_state. Save it on entry, restore on exit. + */ +#define SCX_CALL_OP(sch, op, locked_rq, args...) \ do { \ - if (rq) \ - update_locked_rq(rq); \ + struct rq *__prev_locked_rq; \ + \ + if (locked_rq) { \ + __prev_locked_rq = scx_locked_rq(); \ + update_locked_rq(locked_rq); \ + } \ (sch)->ops.op(args); \ - if (rq) \ - update_locked_rq(NULL); \ + if (locked_rq) \ + update_locked_rq(__prev_locked_rq); \ } while (0) -#define SCX_CALL_OP_RET(sch, op, rq, args...) \ +#define SCX_CALL_OP_RET(sch, op, locked_rq, args...) \ ({ \ + struct rq *__prev_locked_rq; \ __typeof__((sch)->ops.op(args)) __ret; \ \ - if (rq) \ - update_locked_rq(rq); \ + if (locked_rq) { \ + __prev_locked_rq = scx_locked_rq(); \ + update_locked_rq(locked_rq); \ + } \ __ret = (sch)->ops.op(args); \ - if (rq) \ - update_locked_rq(NULL); \ + if (locked_rq) \ + update_locked_rq(__prev_locked_rq); \ __ret; \ }) @@ -498,39 +508,39 @@ do { \ * those subject tasks. * * Every SCX_CALL_OP_TASK*() call site invokes its op with @p's rq lock held - - * either via the @rq argument here, or (for ops.select_cpu()) via @p's pi_lock - * held by try_to_wake_up() with rq tracking via scx_rq.in_select_cpu. So if - * kf_tasks[] is set, @p's scheduler-protected fields are stable. + * either via the @locked_rq argument here, or (for ops.select_cpu()) via @p's + * pi_lock held by try_to_wake_up() with rq tracking via scx_rq.in_select_cpu. + * So if kf_tasks[] is set, @p's scheduler-protected fields are stable. * * kf_tasks[] can not stack, so task-based SCX ops must not nest. The * WARN_ON_ONCE() in each macro catches a re-entry of any of the three variants * while a previous one is still in progress. */ -#define SCX_CALL_OP_TASK(sch, op, rq, task, args...) \ +#define SCX_CALL_OP_TASK(sch, op, locked_rq, task, args...) \ do { \ WARN_ON_ONCE(current->scx.kf_tasks[0]); \ current->scx.kf_tasks[0] = task; \ - SCX_CALL_OP((sch), op, rq, task, ##args); \ + SCX_CALL_OP((sch), op, locked_rq, task, ##args); \ current->scx.kf_tasks[0] = NULL; \ } while (0) -#define SCX_CALL_OP_TASK_RET(sch, op, rq, task, args...) \ +#define SCX_CALL_OP_TASK_RET(sch, op, locked_rq, task, args...) \ ({ \ __typeof__((sch)->ops.op(task, ##args)) __ret; \ WARN_ON_ONCE(current->scx.kf_tasks[0]); \ current->scx.kf_tasks[0] = task; \ - __ret = SCX_CALL_OP_RET((sch), op, rq, task, ##args); \ + __ret = SCX_CALL_OP_RET((sch), op, locked_rq, task, ##args); \ current->scx.kf_tasks[0] = NULL; \ __ret; \ }) -#define SCX_CALL_OP_2TASKS_RET(sch, op, rq, task0, task1, args...) \ +#define SCX_CALL_OP_2TASKS_RET(sch, op, locked_rq, task0, task1, args...) \ ({ \ __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \ WARN_ON_ONCE(current->scx.kf_tasks[0]); \ current->scx.kf_tasks[0] = task0; \ current->scx.kf_tasks[1] = task1; \ - __ret = SCX_CALL_OP_RET((sch), op, rq, task0, task1, ##args); \ + __ret = SCX_CALL_OP_RET((sch), op, locked_rq, task0, task1, ##args); \ current->scx.kf_tasks[0] = NULL; \ current->scx.kf_tasks[1] = NULL; \ __ret; \ @@ -1388,18 +1398,55 @@ static void call_task_dequeue(struct scx_sched *sch, struct rq *rq, p->scx.flags &= ~SCX_TASK_IN_CUSTODY; } -static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p, - u64 enq_flags) +static void local_dsq_post_enq(struct scx_sched *sch, struct scx_dispatch_q *dsq, + struct task_struct *p, u64 enq_flags) { struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); - bool preempt = false; - call_task_dequeue(scx_root, rq, p, 0); + call_task_dequeue(sch, rq, p, 0); + + /* + * Note that @rq's lock may be dropped between this enqueue and @p + * actually getting on CPU. This gives higher-class tasks (e.g. RT) + * an opportunity to wake up on @rq and prevent @p from running. + * Here are some concrete examples: + * + * Example 1: + * + * We dispatch two tasks from a single ops.dispatch(): + * - First, a local task to this CPU's local DSQ; + * - Second, a local/remote task to a remote CPU's local DSQ. + * We must drop the local rq lock in order to finish the second + * dispatch. In that time, an RT task can wake up on the local rq. + * + * Example 2: + * + * We dispatch a local/remote task to a remote CPU's local DSQ. + * We must drop the remote rq lock before the dispatched task can run, + * which gives an RT task an opportunity to wake up on the remote rq. + * + * Both examples work the same if we replace dispatching with moving + * the tasks from a user-created DSQ. + * + * We must detect these wakeups so that we can re-enqueue IMMED tasks + * from @rq's local DSQ. scx_wakeup_preempt() serves exactly this + * purpose, but for it to be invoked, we must ensure that we bump + * @rq->next_class to &ext_sched_class if it's currently idle. + * + * wakeup_preempt() does the bumping, and since we only invoke it if + * @rq->next_class is below &ext_sched_class, it will also + * resched_curr(rq). + */ + if (sched_class_above(p->sched_class, rq->next_class)) + wakeup_preempt(rq, p, 0); /* * If @rq is in balance, the CPU is already vacant and looking for the * next task to run. No need to preempt or trigger resched after moving * @p into its local DSQ. + * Note that the wakeup_preempt() above may have already triggered + * a resched if @rq->next_class was idle. It's harmless, since + * need_resched is cleared immediately after task pick. */ if (rq->scx.flags & SCX_RQ_IN_BALANCE) return; @@ -1407,11 +1454,8 @@ static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && rq->curr->sched_class == &ext_sched_class) { rq->curr->scx.slice = 0; - preempt = true; - } - - if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class)) resched_curr(rq); + } } static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq, @@ -1494,11 +1538,13 @@ static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq, if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) rcu_assign_pointer(dsq->first_task, p); } else { - bool was_empty; - - was_empty = list_empty(&dsq->list); + /* + * dsq->list can contain parked BPF iterator cursors, so + * list_empty() here isn't a reliable proxy for "no real + * task in the DSQ". Test dsq->first_task directly. + */ list_add_tail(&p->scx.dsq_list.node, &dsq->list); - if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) + if (!dsq->first_task && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) rcu_assign_pointer(dsq->first_task, p); } } @@ -1518,7 +1564,7 @@ static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq, * concurrently in a non-atomic way. */ if (is_local) { - local_dsq_post_enq(dsq, p, enq_flags); + local_dsq_post_enq(sch, dsq, p, enq_flags); } else { /* * Task on global/bypass DSQ: leave custody, task on @@ -2129,7 +2175,8 @@ static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_fl schedule_reenq_local(rq, 0); } -static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, +static void move_local_task_to_local_dsq(struct scx_sched *sch, + struct task_struct *p, u64 enq_flags, struct scx_dispatch_q *src_dsq, struct rq *dst_rq) { @@ -2149,7 +2196,7 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, dsq_inc_nr(dst_dsq, p, enq_flags); p->scx.dsq = dst_dsq; - local_dsq_post_enq(dst_dsq, p, enq_flags); + local_dsq_post_enq(sch, dst_dsq, p, enq_flags); } /** @@ -2370,7 +2417,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch, /* @p is going from a non-local DSQ to a local DSQ */ if (src_rq == dst_rq) { task_unlink_from_dsq(p, src_dsq); - move_local_task_to_local_dsq(p, enq_flags, + move_local_task_to_local_dsq(sch, p, enq_flags, src_dsq, dst_rq); raw_spin_unlock(&src_dsq->lock); } else { @@ -2423,7 +2470,7 @@ static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, if (rq == task_rq) { task_unlink_from_dsq(p, dsq); - move_local_task_to_local_dsq(p, enq_flags, dsq, rq); + move_local_task_to_local_dsq(sch, p, enq_flags, dsq, rq); raw_spin_unlock(&dsq->lock); return true; } @@ -3183,7 +3230,7 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, if (sch_a == sch_b && SCX_HAS_OP(sch_a, core_sched_before) && !scx_bypassing(sch_a, task_cpu(a))) return SCX_CALL_OP_2TASKS_RET(sch_a, core_sched_before, - NULL, + task_rq(a), (struct task_struct *)a, (struct task_struct *)b); else @@ -3631,6 +3678,22 @@ static void __scx_disable_and_exit_task(struct scx_sched *sch, SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args); } +/* + * Undo a completed __scx_init_task(sch, p, false) when scx_enable_task() never + * ran. The task state has not been transitioned, so this mirrors the + * SCX_TASK_INIT branch in __scx_disable_and_exit_task(). + */ +static void scx_sub_init_cancel_task(struct scx_sched *sch, struct task_struct *p) +{ + struct scx_exit_task_args args = { .cancelled = true }; + + lockdep_assert_held(&p->pi_lock); + lockdep_assert_rq_held(task_rq(p)); + + if (SCX_HAS_OP(sch, exit_task)) + SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args); +} + static void scx_disable_and_exit_task(struct scx_sched *sch, struct task_struct *p) { @@ -3639,11 +3702,12 @@ static void scx_disable_and_exit_task(struct scx_sched *sch, /* * If set, @p exited between __scx_init_task() and scx_enable_task() in * scx_sub_enable() and is initialized for both the associated sched and - * its parent. Disable and exit for the child too. + * its parent. Exit for the child too - scx_enable_task() never ran for + * it, so undo only init_task. */ - if ((p->scx.flags & SCX_TASK_SUB_INIT) && - !WARN_ON_ONCE(!scx_enabling_sub_sched)) { - __scx_disable_and_exit_task(scx_enabling_sub_sched, p); + if (p->scx.flags & SCX_TASK_SUB_INIT) { + if (!WARN_ON_ONCE(!scx_enabling_sub_sched)) + scx_sub_init_cancel_task(scx_enabling_sub_sched, p); p->scx.flags &= ~SCX_TASK_SUB_INIT; } @@ -4324,9 +4388,10 @@ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) void scx_group_set_weight(struct task_group *tg, unsigned long weight) { - struct scx_sched *sch = scx_root; + struct scx_sched *sch; percpu_down_read(&scx_cgroup_ops_rwsem); + sch = scx_root; if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && tg->scx.weight != weight) @@ -4339,9 +4404,10 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight) void scx_group_set_idle(struct task_group *tg, bool idle) { - struct scx_sched *sch = scx_root; + struct scx_sched *sch; percpu_down_read(&scx_cgroup_ops_rwsem); + sch = scx_root; if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) SCX_CALL_OP(sch, cgroup_set_idle, NULL, tg_cgrp(tg), idle); @@ -4355,9 +4421,10 @@ void scx_group_set_idle(struct task_group *tg, bool idle) void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us) { - struct scx_sched *sch = scx_root; + struct scx_sched *sch; percpu_down_read(&scx_cgroup_ops_rwsem); + sch = scx_root; if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && (tg->scx.bw_period_us != period_us || @@ -4380,21 +4447,6 @@ static struct cgroup *root_cgroup(void) return &cgrp_dfl_root.cgrp; } -static struct cgroup *sch_cgroup(struct scx_sched *sch) -{ - return sch->cgrp; -} - -/* for each descendant of @cgrp including self, set ->scx_sched to @sch */ -static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) -{ - struct cgroup *pos; - struct cgroup_subsys_state *css; - - cgroup_for_each_live_descendant_pre(pos, css, cgrp) - rcu_assign_pointer(pos->scx_sched, sch); -} - static void scx_cgroup_lock(void) { #ifdef CONFIG_EXT_GROUP_SCHED @@ -4412,12 +4464,30 @@ static void scx_cgroup_unlock(void) } #else /* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */ static struct cgroup *root_cgroup(void) { return NULL; } -static struct cgroup *sch_cgroup(struct scx_sched *sch) { return NULL; } -static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) {} static void scx_cgroup_lock(void) {} static void scx_cgroup_unlock(void) {} #endif /* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */ +#ifdef CONFIG_EXT_SUB_SCHED +static struct cgroup *sch_cgroup(struct scx_sched *sch) +{ + return sch->cgrp; +} + +/* for each descendant of @cgrp including self, set ->scx_sched to @sch */ +static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) +{ + struct cgroup *pos; + struct cgroup_subsys_state *css; + + cgroup_for_each_live_descendant_pre(pos, css, cgrp) + rcu_assign_pointer(pos->scx_sched, sch); +} +#else /* CONFIG_EXT_SUB_SCHED */ +static struct cgroup *sch_cgroup(struct scx_sched *sch) { return NULL; } +static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) {} +#endif /* CONFIG_EXT_SUB_SCHED */ + /* * Omitted operations: * @@ -4712,6 +4782,8 @@ static void scx_sched_free_rcu_work(struct work_struct *work) irq_work_sync(&sch->disable_irq_work); kthread_destroy_worker(sch->helper); timer_shutdown_sync(&sch->bypass_lb_timer); + free_cpumask_var(sch->bypass_lb_donee_cpumask); + free_cpumask_var(sch->bypass_lb_resched_cpumask); #ifdef CONFIG_EXT_SUB_SCHED kfree(sch->cgrp_path); @@ -4938,6 +5010,25 @@ void scx_softlockup(u32 dur_s) smp_processor_id(), dur_s); } +/* + * scx_hardlockup() runs from NMI and eventually calls scx_claim_exit(), + * which takes scx_sched_lock. scx_sched_lock isn't NMI-safe and grabbing + * it from NMI context can lead to deadlocks. Defer via irq_work; the + * disable path runs off irq_work anyway. + */ +static atomic_t scx_hardlockup_cpu = ATOMIC_INIT(-1); + +static void scx_hardlockup_irq_workfn(struct irq_work *work) +{ + int cpu = atomic_xchg(&scx_hardlockup_cpu, -1); + + if (cpu >= 0 && handle_lockup("hard lockup - CPU %d", cpu)) + printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", + cpu); +} + +static DEFINE_IRQ_WORK(scx_hardlockup_irq_work, scx_hardlockup_irq_workfn); + /** * scx_hardlockup - sched_ext hardlockup handler * @@ -4946,17 +5037,19 @@ void scx_softlockup(u32 dur_s) * Try kicking out the current scheduler in an attempt to recover the system to * a good state before taking more drastic actions. * - * Returns %true if sched_ext is enabled and abort was initiated, which may - * resolve the reported hardlockup. %false if sched_ext is not enabled or - * someone else already initiated abort. + * Queues an irq_work; the handle_lockup() call happens in IRQ context (see + * scx_hardlockup_irq_workfn). + * + * Returns %true if sched_ext is enabled and the work was queued, %false + * otherwise. */ bool scx_hardlockup(int cpu) { - if (!handle_lockup("hard lockup - CPU %d", cpu)) + if (!rcu_access_pointer(scx_root)) return false; - printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", - cpu); + atomic_cmpxchg(&scx_hardlockup_cpu, -1, cpu); + irq_work_queue(&scx_hardlockup_irq_work); return true; } @@ -5000,6 +5093,15 @@ static u32 bypass_lb_cpu(struct scx_sched *sch, s32 donor, if (cpumask_empty(donee_mask)) break; + /* + * If an earlier pass placed @p on @donor_dsq from a different + * CPU and the donee hasn't consumed it yet, @p is still on the + * previous CPU and task_rq(@p) != @donor_rq. @p can't be moved + * without its rq locked. Skip. + */ + if (task_rq(p) != donor_rq) + continue; + donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); if (donee >= nr_cpu_ids) continue; @@ -5058,8 +5160,8 @@ static u32 bypass_lb_cpu(struct scx_sched *sch, s32 donor, static void bypass_lb_node(struct scx_sched *sch, int node) { const struct cpumask *node_mask = cpumask_of_node(node); - struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask; - struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask; + struct cpumask *donee_mask = sch->bypass_lb_donee_cpumask; + struct cpumask *resched_mask = sch->bypass_lb_resched_cpumask; u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0; u32 nr_target, nr_donor_target; u32 before_min = U32_MAX, before_max = 0; @@ -5698,6 +5800,8 @@ static void scx_sub_disable(struct scx_sched *sch) if (sch->ops.exit) SCX_CALL_OP(sch, exit, NULL, sch->exit_info); + if (sch->sub_kset) + kset_unregister(sch->sub_kset); kobject_del(&sch->kobj); } #else /* CONFIG_EXT_SUB_SCHED */ @@ -5829,6 +5933,10 @@ static void scx_root_disable(struct scx_sched *sch) * could observe an object of the same name still in the hierarchy when * the next scheduler is loaded. */ +#ifdef CONFIG_EXT_SUB_SCHED + if (sch->sub_kset) + kset_unregister(sch->sub_kset); +#endif kobject_del(&sch->kobj); free_kick_syncs(); @@ -5921,6 +6029,25 @@ static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind) irq_work_queue(&sch->disable_irq_work); } +/** + * scx_flush_disable_work - flush the disable work and wait for it to finish + * @sch: the scheduler + * + * sch->disable_work might still not queued, causing kthread_flush_work() + * as a noop. Syncing the irq_work first is required to guarantee the + * kthread work has been queued before waiting for it. + */ +static void scx_flush_disable_work(struct scx_sched *sch) +{ + int kind; + + do { + irq_work_sync(&sch->disable_irq_work); + kthread_flush_work(&sch->disable_work); + kind = atomic_read(&sch->exit_kind); + } while (kind != SCX_EXIT_NONE && kind != SCX_EXIT_DONE); +} + static void dump_newline(struct seq_buf *s) { trace_sched_ext_dump(""); @@ -6032,9 +6159,8 @@ static void ops_dump_exit(void) scx_dump_data.cpu = -1; } -static void scx_dump_task(struct scx_sched *sch, - struct seq_buf *s, struct scx_dump_ctx *dctx, - struct task_struct *p, char marker) +static void scx_dump_task(struct scx_sched *sch, struct seq_buf *s, struct scx_dump_ctx *dctx, + struct rq *rq, struct task_struct *p, char marker) { static unsigned long bt[SCX_EXIT_BT_LEN]; struct scx_sched *task_sch = scx_task_sched(p); @@ -6075,7 +6201,7 @@ static void scx_dump_task(struct scx_sched *sch, if (SCX_HAS_OP(sch, dump_task)) { ops_dump_init(s, " "); - SCX_CALL_OP(sch, dump_task, NULL, dctx, p); + SCX_CALL_OP(sch, dump_task, rq, dctx, p); ops_dump_exit(); } @@ -6199,8 +6325,7 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei, used = seq_buf_used(&ns); if (SCX_HAS_OP(sch, dump_cpu)) { ops_dump_init(&ns, " "); - SCX_CALL_OP(sch, dump_cpu, NULL, - &dctx, cpu, idle); + SCX_CALL_OP(sch, dump_cpu, rq, &dctx, cpu, idle); ops_dump_exit(); } @@ -6223,11 +6348,11 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei, if (rq->curr->sched_class == &ext_sched_class && (dump_all_tasks || scx_task_on_sched(sch, rq->curr))) - scx_dump_task(sch, &s, &dctx, rq->curr, '*'); + scx_dump_task(sch, &s, &dctx, rq, rq->curr, '*'); list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) if (dump_all_tasks || scx_task_on_sched(sch, p)) - scx_dump_task(sch, &s, &dctx, p, ' '); + scx_dump_task(sch, &s, &dctx, rq, p, ' '); next: rq_unlock_irqrestore(rq, &rf); } @@ -6437,6 +6562,15 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops, init_irq_work(&sch->disable_irq_work, scx_disable_irq_workfn); kthread_init_work(&sch->disable_work, scx_disable_workfn); timer_setup(&sch->bypass_lb_timer, scx_bypass_lb_timerfn, 0); + + if (!alloc_cpumask_var(&sch->bypass_lb_donee_cpumask, GFP_KERNEL)) { + ret = -ENOMEM; + goto err_stop_helper; + } + if (!alloc_cpumask_var(&sch->bypass_lb_resched_cpumask, GFP_KERNEL)) { + ret = -ENOMEM; + goto err_free_lb_cpumask; + } sch->ops = *ops; rcu_assign_pointer(ops->priv, sch); @@ -6446,14 +6580,14 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops, char *buf = kzalloc(PATH_MAX, GFP_KERNEL); if (!buf) { ret = -ENOMEM; - goto err_stop_helper; + goto err_free_lb_resched; } cgroup_path(cgrp, buf, PATH_MAX); sch->cgrp_path = kstrdup(buf, GFP_KERNEL); kfree(buf); if (!sch->cgrp_path) { ret = -ENOMEM; - goto err_stop_helper; + goto err_free_lb_resched; } sch->cgrp = cgrp; @@ -6488,10 +6622,12 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops, #endif /* CONFIG_EXT_SUB_SCHED */ return sch; -#ifdef CONFIG_EXT_SUB_SCHED +err_free_lb_resched: + free_cpumask_var(sch->bypass_lb_resched_cpumask); +err_free_lb_cpumask: + free_cpumask_var(sch->bypass_lb_donee_cpumask); err_stop_helper: kthread_destroy_worker(sch->helper); -#endif err_free_pcpu: for_each_possible_cpu(cpu) { if (cpu == bypass_fail_cpu) @@ -6510,7 +6646,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops, err_free_sch: kfree(sch); err_put_cgrp: -#if defined(CONFIG_EXT_GROUP_SCHED) || defined(CONFIG_EXT_SUB_SCHED) +#ifdef CONFIG_EXT_SUB_SCHED cgroup_put(cgrp); #endif return ERR_PTR(ret); @@ -6601,7 +6737,7 @@ static void scx_root_enable_workfn(struct kthread_work *work) if (ret) goto err_unlock; -#if defined(CONFIG_EXT_GROUP_SCHED) || defined(CONFIG_EXT_SUB_SCHED) +#ifdef CONFIG_EXT_SUB_SCHED cgroup_get(cgrp); #endif sch = scx_alloc_and_add_sched(ops, cgrp, NULL); @@ -6639,8 +6775,10 @@ static void scx_root_enable_workfn(struct kthread_work *work) rcu_assign_pointer(scx_root, sch); ret = scx_link_sched(sch); - if (ret) + if (ret) { + cpus_read_unlock(); goto err_disable; + } scx_idle_enable(ops); @@ -6821,7 +6959,7 @@ static void scx_root_enable_workfn(struct kthread_work *work) * completion. sch's base reference will be put by bpf_scx_unreg(). */ scx_error(sch, "scx_root_enable() failed (%d)", ret); - kthread_flush_work(&sch->disable_work); + scx_flush_disable_work(sch); cmd->ret = 0; } @@ -7072,23 +7210,30 @@ static void scx_sub_enable_workfn(struct kthread_work *work) abort: put_task_struct(p); scx_task_iter_stop(&sti); - scx_enabling_sub_sched = NULL; + /* + * Undo __scx_init_task() for tasks we marked. scx_enable_task() never + * ran for @sch on them, so calling scx_disable_task() here would invoke + * ops.disable() without a matching ops.enable(). scx_enabling_sub_sched + * must stay set until SUB_INIT is cleared from every marked task - + * scx_disable_and_exit_task() reads it when a task exits concurrently. + */ scx_task_iter_start(&sti, sch->cgrp); while ((p = scx_task_iter_next_locked(&sti))) { if (p->scx.flags & SCX_TASK_SUB_INIT) { - __scx_disable_and_exit_task(sch, p); + scx_sub_init_cancel_task(sch, p); p->scx.flags &= ~SCX_TASK_SUB_INIT; } } scx_task_iter_stop(&sti); + scx_enabling_sub_sched = NULL; err_unlock_and_disable: /* we'll soon enter disable path, keep bypass on */ scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); err_disable: mutex_unlock(&scx_enable_mutex); - kthread_flush_work(&sch->disable_work); + scx_flush_disable_work(sch); cmd->ret = 0; } @@ -7349,7 +7494,7 @@ static void bpf_scx_unreg(void *kdata, struct bpf_link *link) struct scx_sched *sch = rcu_dereference_protected(ops->priv, true); scx_disable(sch, SCX_EXIT_UNREG); - kthread_flush_work(&sch->disable_work); + scx_flush_disable_work(sch); RCU_INIT_POINTER(ops->priv, NULL); kobject_put(&sch->kobj); } @@ -8033,12 +8178,22 @@ static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, struct task_struct *p, u64 dsq_id, u64 enq_flags) { struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; - struct scx_sched *sch = src_dsq->sched; + struct scx_sched *sch; struct rq *this_rq, *src_rq, *locked_rq; bool dispatched = false; bool in_balance; unsigned long flags; + /* + * The verifier considers an iterator slot initialized on any + * KF_ITER_NEW return, so a BPF program may legally reach here after + * bpf_iter_scx_dsq_new() failed and left @kit->dsq NULL. + */ + if (unlikely(!src_dsq)) + return false; + + sch = src_dsq->sched; + if (!scx_vet_enq_flags(sch, dsq_id, &enq_flags)) return false; @@ -8526,7 +8681,7 @@ __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice, guard(rcu)(); sch = scx_prog_sched(aux); - if (unlikely(!scx_task_on_sched(sch, p))) + if (unlikely(!sch || !scx_task_on_sched(sch, p))) return false; p->scx.slice = slice; @@ -8549,7 +8704,7 @@ __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime, guard(rcu)(); sch = scx_prog_sched(aux); - if (unlikely(!scx_task_on_sched(sch, p))) + if (unlikely(!sch || !scx_task_on_sched(sch, p))) return false; p->scx.dsq_vtime = vtime; @@ -8633,11 +8788,12 @@ __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags, const struct bpf_prog_aux /** * scx_bpf_dsq_nr_queued - Return the number of queued tasks * @dsq_id: id of the DSQ + * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs * * Return the number of tasks in the DSQ matching @dsq_id. If not found, * -%ENOENT is returned. */ -__bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) +__bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id, const struct bpf_prog_aux *aux) { struct scx_sched *sch; struct scx_dispatch_q *dsq; @@ -8645,7 +8801,7 @@ __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) preempt_disable(); - sch = rcu_dereference_sched(scx_root); + sch = scx_prog_sched(aux); if (unlikely(!sch)) { ret = -ENODEV; goto out; @@ -8677,21 +8833,21 @@ __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) /** * scx_bpf_destroy_dsq - Destroy a custom DSQ * @dsq_id: DSQ to destroy + * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs * * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is * empty and no further tasks are dispatched to it. Ignored if called on a DSQ * which doesn't exist. Can be called from any online scx_ops operations. */ -__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) +__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id, const struct bpf_prog_aux *aux) { struct scx_sched *sch; - rcu_read_lock(); - sch = rcu_dereference(scx_root); + guard(rcu)(); + sch = scx_prog_sched(aux); if (sch) destroy_dsq(sch, dsq_id); - rcu_read_unlock(); } /** @@ -9445,8 +9601,8 @@ BTF_KFUNCS_START(scx_kfunc_ids_any) BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_IMPLICIT_ARGS | KF_RCU); BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_IMPLICIT_ARGS | KF_RCU); BTF_ID_FLAGS(func, scx_bpf_kick_cpu, KF_IMPLICIT_ARGS) -BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) -BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) +BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued, KF_IMPLICIT_ARGS) +BTF_ID_FLAGS(func, scx_bpf_destroy_dsq, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_IMPLICIT_ARGS | KF_RCU_PROTECTED | KF_RET_NULL) BTF_ID_FLAGS(func, scx_bpf_dsq_reenq, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2, KF_IMPLICIT_ARGS) @@ -9479,6 +9635,7 @@ BTF_KFUNCS_END(scx_kfunc_ids_any) static const struct btf_kfunc_id_set scx_kfunc_set_any = { .owner = THIS_MODULE, .set = &scx_kfunc_ids_any, + .filter = scx_kfunc_context_filter, }; /* @@ -9526,13 +9683,12 @@ static const u32 scx_kf_allow_flags[] = { }; /* - * Verifier-time filter for context-sensitive SCX kfuncs. Registered via the - * .filter field on each per-group btf_kfunc_id_set. The BPF core invokes this - * for every kfunc call in the registered hook (BPF_PROG_TYPE_STRUCT_OPS or + * Verifier-time filter for SCX kfuncs. Registered via the .filter field on + * each per-group btf_kfunc_id_set. The BPF core invokes this for every kfunc + * call in the registered hook (BPF_PROG_TYPE_STRUCT_OPS or * BPF_PROG_TYPE_SYSCALL), regardless of which set originally introduced the - * kfunc - so the filter must short-circuit on kfuncs it doesn't govern (e.g. - * scx_kfunc_ids_any) by falling through to "allow" when none of the - * context-sensitive sets contain the kfunc. + * kfunc - so the filter must short-circuit on kfuncs it doesn't govern by + * falling through to "allow" when none of the SCX sets contain the kfunc. */ int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id) { @@ -9541,18 +9697,21 @@ int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id) bool in_enqueue = btf_id_set8_contains(&scx_kfunc_ids_enqueue_dispatch, kfunc_id); bool in_dispatch = btf_id_set8_contains(&scx_kfunc_ids_dispatch, kfunc_id); bool in_cpu_release = btf_id_set8_contains(&scx_kfunc_ids_cpu_release, kfunc_id); + bool in_idle = btf_id_set8_contains(&scx_kfunc_ids_idle, kfunc_id); + bool in_any = btf_id_set8_contains(&scx_kfunc_ids_any, kfunc_id); u32 moff, flags; - /* Not a context-sensitive kfunc (e.g. from scx_kfunc_ids_any) - allow. */ - if (!(in_unlocked || in_select_cpu || in_enqueue || in_dispatch || in_cpu_release)) + /* Not an SCX kfunc - allow. */ + if (!(in_unlocked || in_select_cpu || in_enqueue || in_dispatch || + in_cpu_release || in_idle || in_any)) return 0; /* SYSCALL progs (e.g. BPF test_run()) may call unlocked and select_cpu kfuncs. */ if (prog->type == BPF_PROG_TYPE_SYSCALL) - return (in_unlocked || in_select_cpu) ? 0 : -EACCES; + return (in_unlocked || in_select_cpu || in_idle || in_any) ? 0 : -EACCES; if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) - return -EACCES; + return (in_any || in_idle) ? 0 : -EACCES; /* * add_subprog_and_kfunc() collects all kfunc calls, including dead code @@ -9565,14 +9724,15 @@ int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id) return 0; /* - * Non-SCX struct_ops: only unlocked kfuncs are safe. The other - * context-sensitive kfuncs assume the rq lock is held by the SCX - * dispatch path, which doesn't apply to other struct_ops users. + * Non-SCX struct_ops: SCX kfuncs are not permitted. */ if (prog->aux->st_ops != &bpf_sched_ext_ops) - return in_unlocked ? 0 : -EACCES; + return -EACCES; /* SCX struct_ops: check the per-op allow list. */ + if (in_any || in_idle) + return 0; + moff = prog->aux->attach_st_ops_member_off; flags = scx_kf_allow_flags[SCX_MOFF_IDX(moff)]; @@ -9656,12 +9816,6 @@ static int __init scx_init(void) return ret; } - if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) || - !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) { - pr_err("sched_ext: Failed to allocate cpumasks\n"); - return -ENOMEM; - } - return 0; } __initcall(scx_init); diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 443d12a3df67c6..7468560a6d8041 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -927,14 +927,24 @@ static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p, * Accessing p->cpus_ptr / p->nr_cpus_allowed needs either @p's rq * lock or @p's pi_lock. Three cases: * - * - inside ops.select_cpu(): try_to_wake_up() holds @p's pi_lock. + * - inside ops.select_cpu(): try_to_wake_up() holds the wake-up + * task's pi_lock; the wake-up task is recorded in kf_tasks[0] + * by SCX_CALL_OP_TASK_RET(). * - other rq-locked SCX op: scx_locked_rq() points at the held rq. * - truly unlocked (UNLOCKED ops, SYSCALL, non-SCX struct_ops): * nothing held, take pi_lock ourselves. + * + * In the first two cases, BPF schedulers may pass an arbitrary task + * that the held lock doesn't cover. Refuse those. */ if (this_rq()->scx.in_select_cpu) { + if (!scx_kf_arg_task_ok(sch, p)) + return -EINVAL; lockdep_assert_held(&p->pi_lock); - } else if (!scx_locked_rq()) { + } else if (scx_locked_rq()) { + if (task_rq(p) != scx_locked_rq()) + goto cross_task; + } else { raw_spin_lock_irqsave(&p->pi_lock, irq_flags); we_locked = true; } @@ -960,6 +970,11 @@ static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p, raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); return cpu; + +cross_task: + scx_error(sch, "select_cpu kfunc called cross-task on %s[%d]", + p->comm, p->pid); + return -EINVAL; } /** @@ -1467,6 +1482,7 @@ BTF_KFUNCS_END(scx_kfunc_ids_idle) static const struct btf_kfunc_id_set scx_kfunc_set_idle = { .owner = THIS_MODULE, .set = &scx_kfunc_ids_idle, + .filter = scx_kfunc_context_filter, }; /* diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h index dc35f850481e1d..8d169d3bbdf98d 100644 --- a/kernel/sched/ext_idle.h +++ b/kernel/sched/ext_idle.h @@ -12,6 +12,7 @@ struct sched_ext_ops; +extern struct btf_id_set8 scx_kfunc_ids_idle; extern struct btf_id_set8 scx_kfunc_ids_select_cpu; void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops); diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h index 62ce4eaf6a3f56..a075732d4430d8 100644 --- a/kernel/sched/ext_internal.h +++ b/kernel/sched/ext_internal.h @@ -1075,6 +1075,8 @@ struct scx_sched { struct irq_work disable_irq_work; struct kthread_work disable_work; struct timer_list bypass_lb_timer; + cpumask_var_t bypass_lb_donee_cpumask; + cpumask_var_t bypass_lb_resched_cpumask; struct rcu_work rcu_work; /* all ancestors including self */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69361c63353ad5..728965851842e1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -847,13 +847,19 @@ static s64 entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 avrunt * Similarly, check that the entity didn't gain positive lag when DELAY_ZERO * is set. * - * Return true if the lag has been adjusted. + * Return true if the vlag has been modified. Specifically: + * + * se->vlag != avg_vruntime() - se->vruntime + * + * This can be due to clamping in entity_lag() or clamping due to + * sched_delayed. Either way, when vlag is modified and the entity is + * retained, the tree needs to be adjusted. */ static __always_inline bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) { - s64 vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq)); - bool ret; + u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag = entity_lag(cfs_rq, se, avruntime); WARN_ON_ONCE(!se->on_rq); @@ -863,10 +869,9 @@ bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) if (sched_feat(DELAY_ZERO)) vlag = min(vlag, 0); } - ret = (vlag == se->vlag); se->vlag = vlag; - return ret; + return avruntime - vlag != se->vruntime; } /* @@ -1099,7 +1104,7 @@ static inline void cancel_protect_slice(struct sched_entity *se) * * Which allows tree pruning through eligibility. */ -static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect) +static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq, bool protect) { struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node; struct sched_entity *se = __pick_first_entity(cfs_rq); @@ -1170,11 +1175,6 @@ static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect) return best; } -static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) -{ - return __pick_eevdf(cfs_rq, true); -} - struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) { struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); @@ -5749,11 +5749,11 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags); * 4) do not run the "skip" process, if something else is available */ static struct sched_entity * -pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq) +pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq, bool protect) { struct sched_entity *se; - se = pick_eevdf(cfs_rq); + se = pick_eevdf(cfs_rq, protect); if (se->sched_delayed) { dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); /* @@ -9027,7 +9027,7 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f { enum preempt_wakeup_action preempt_action = PREEMPT_WAKEUP_PICK; struct task_struct *donor = rq->donor; - struct sched_entity *se = &donor->se, *pse = &p->se; + struct sched_entity *nse, *se = &donor->se, *pse = &p->se; struct cfs_rq *cfs_rq = task_cfs_rq(donor); int cse_is_idle, pse_is_idle; @@ -9138,11 +9138,17 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f } pick: + nse = pick_next_entity(rq, cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT); + /* If @p has become the most eligible task, force preemption */ + if (nse == pse) + goto preempt; + /* - * If @p has become the most eligible task, force preemption. + * Because p is enqueued, nse being null can only mean that we + * dequeued a delayed task. */ - if (__pick_eevdf(cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT) == pse) - goto preempt; + if (!nse) + goto pick; if (sched_feat(RUN_TO_PARITY)) update_protect_slice(cfs_rq, se); @@ -9179,7 +9185,7 @@ static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf) throttled |= check_cfs_rq_runtime(cfs_rq); - se = pick_next_entity(rq, cfs_rq); + se = pick_next_entity(rq, cfs_rq, true); if (!se) goto again; cfs_rq = group_cfs_rq(se); diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 6809b370e991d8..d1564db95a8f5a 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -373,10 +373,10 @@ __init static int init_annotated_branch_stats(void) int ret; ret = register_stat_tracer(&annotated_branch_stats); - if (!ret) { + if (ret) { printk(KERN_WARNING "Warning: could not register " "annotated branches stats\n"); - return 1; + return ret; } return 0; } @@ -438,10 +438,10 @@ __init static int all_annotated_branch_stats(void) int ret; ret = register_stat_tracer(&all_branch_stats); - if (!ret) { + if (ret) { printk(KERN_WARNING "Warning: could not register " "all branches stats\n"); - return 1; + return ret; } return 0; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index e1c73065dae51b..e0d3a0da26af51 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1523,6 +1523,12 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, parg->offset = *size; *size += parg->type->size * (parg->count ?: 1); + if (*size > MAX_PROBE_EVENT_SIZE) { + ret = -E2BIG; + trace_probe_log_err(ctx->offset, EVENT_TOO_BIG); + goto fail; + } + if (parg->count) { len = strlen(parg->type->fmttype) + 6; parg->fmt = kmalloc(len, GFP_KERNEL); diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 9fc56c93713045..262d8707a3df79 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -38,6 +38,7 @@ #define MAX_BTF_ARGS_LEN 128 #define MAX_DENTRY_ARGS_LEN 256 #define MAX_STRING_SIZE PATH_MAX +#define MAX_PROBE_EVENT_SIZE 3072 /* Reserved field names */ #define FIELD_STRING_IP "__probe_ip" @@ -561,7 +562,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(BAD_TYPE4STR, "This type does not fit for string."),\ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),\ C(TOO_MANY_ARGS, "Too many arguments are specified"), \ - C(TOO_MANY_EARGS, "Too many entry arguments specified"), + C(TOO_MANY_EARGS, "Too many entry arguments specified"), \ + C(EVENT_TOO_BIG, "Event too big (too many fields?)"), #undef C #define C(a, b) TP_ERR_##a diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d18d7ed9ab67a8..60ae5e6fc1ee69 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2,7 +2,7 @@ /* * Maple Tree implementation * Copyright (c) 2018-2022 Oracle Corporation - * Authors: Liam R. Howlett + * Authors: Liam R. Howlett * Matthew Wilcox * Copyright (c) 2023 ByteDance * Author: Peng Zhang diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6074ed5f66f3fd..7a67ef5b67b666 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -441,10 +441,33 @@ static void rht_deferred_worker(struct work_struct *work) mutex_unlock(&ht->mutex); + /* + * Re-arm via @run_work, not @run_irq_work. + * rhashtable_free_and_destroy() drains async work as irq_work_sync() + * followed by cancel_work_sync(). If this site queued irq_work while + * cancel_work_sync() was waiting for us, irq_work_sync() would already + * have returned and the stale irq_work could fire post-teardown. + * cancel_work_sync() natively handles self-requeue on @run_work. + */ if (err) schedule_work(&ht->run_work); } +/* + * Insert-path callers can run under a raw spinlock (e.g. an insecure_elasticity + * user). Calling schedule_work() under that lock records caller_lock -> + * pool->lock -> pi_lock -> rq->__lock, closing a locking cycle if any of + * these is acquired in the reverse direction elsewhere. Bounce through + * irq_work so the schedule_work() runs with the caller's lock no longer held. + */ +static void rht_deferred_irq_work(struct irq_work *irq_work) +{ + struct rhashtable *ht = container_of(irq_work, struct rhashtable, + run_irq_work); + + schedule_work(&ht->run_work); +} + static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) { @@ -477,7 +500,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, if (err == -EEXIST) err = 0; } else - schedule_work(&ht->run_work); + irq_work_queue(&ht->run_irq_work); return err; @@ -488,7 +511,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) - schedule_work(&ht->run_work); + irq_work_queue(&ht->run_irq_work); return err; } @@ -538,7 +561,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return NULL; } - if (elasticity <= 0) + if (elasticity <= 0 && !ht->p.insecure_elasticity) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); @@ -568,7 +591,8 @@ static struct bucket_table *rhashtable_insert_one( if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); - if (unlikely(rht_grow_above_100(ht, tbl))) + if (unlikely(rht_grow_above_100(ht, tbl)) && + !ht->p.insecure_elasticity) return ERR_PTR(-EAGAIN); head = rht_ptr(bkt, tbl, hash); @@ -629,7 +653,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, rht_unlock(tbl, bkt, flags); if (inserted && rht_grow_above_75(ht, tbl)) - schedule_work(&ht->run_work); + irq_work_queue(&ht->run_irq_work); } } while (!IS_ERR_OR_NULL(new_tbl)); @@ -1084,6 +1108,7 @@ int rhashtable_init_noprof(struct rhashtable *ht, RCU_INIT_POINTER(ht->tbl, tbl); INIT_WORK(&ht->run_work, rht_deferred_worker); + init_irq_work(&ht->run_irq_work, rht_deferred_irq_work); return 0; } @@ -1149,6 +1174,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, struct bucket_table *tbl, *next_tbl; unsigned int i; + irq_work_sync(&ht->run_irq_work); cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 434d8a2fdd99c5..b9367c61e8b50d 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -2,7 +2,7 @@ /* * test_maple_tree.c: Test the maple tree API * Copyright (c) 2018-2022 Oracle Corporation - * Author: Liam R. Howlett + * Author: Liam R. Howlett * * Any tests that only require the interface of the tree. */ @@ -4021,6 +4021,6 @@ static void __exit maple_tree_harvest(void) module_init(maple_tree_seed); module_exit(maple_tree_harvest); -MODULE_AUTHOR("Liam R. Howlett "); +MODULE_AUTHOR("Liam R. Howlett "); MODULE_DESCRIPTION("maple tree API test module"); MODULE_LICENSE("GPL"); diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 554559d7297600..8494040b1ee48f 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -161,15 +161,6 @@ module_param(monitor_region_end, ulong, 0600); */ static unsigned long addr_unit __read_mostly = 1; -/* - * PID of the DAMON thread - * - * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread. - * Else, -1. - */ -static int kdamond_pid __read_mostly = -1; -module_param(kdamond_pid, int, 0400); - static struct damos_stat damon_lru_sort_hot_stat; DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat, lru_sort_tried_hot_regions, lru_sorted_hot_regions, @@ -386,12 +377,8 @@ static int damon_lru_sort_turn(bool on) { int err; - if (!on) { - err = damon_stop(&ctx, 1); - if (!err) - kdamond_pid = -1; - return err; - } + if (!on) + return damon_stop(&ctx, 1); err = damon_lru_sort_apply_parameters(); if (err) @@ -400,9 +387,6 @@ static int damon_lru_sort_turn(bool on) err = damon_start(&ctx, 1, true); if (err) return err; - kdamond_pid = damon_kdamond_pid(ctx); - if (kdamond_pid < 0) - return kdamond_pid; return damon_call(ctx, &call_control); } @@ -430,42 +414,83 @@ module_param_cb(addr_unit, &addr_unit_param_ops, &addr_unit, 0600); MODULE_PARM_DESC(addr_unit, "Scale factor for DAMON_LRU_SORT to ops address conversion (default: 1)"); +static bool damon_lru_sort_enabled(void) +{ + if (!ctx) + return false; + return damon_is_running(ctx); +} + static int damon_lru_sort_enabled_store(const char *val, const struct kernel_param *kp) { - bool is_enabled = enabled; - bool enable; int err; - err = kstrtobool(val, &enable); + err = kstrtobool(val, &enabled); if (err) return err; - if (is_enabled == enable) + if (damon_lru_sort_enabled() == enabled) return 0; /* Called before init function. The function will handle this. */ if (!damon_initialized()) - goto set_param_out; + return 0; - err = damon_lru_sort_turn(enable); - if (err) - return err; + return damon_lru_sort_turn(enabled); +} -set_param_out: - enabled = enable; - return err; +static int damon_lru_sort_enabled_load(char *buffer, + const struct kernel_param *kp) +{ + return sprintf(buffer, "%c\n", damon_lru_sort_enabled() ? 'Y' : 'N'); } static const struct kernel_param_ops enabled_param_ops = { .set = damon_lru_sort_enabled_store, - .get = param_get_bool, + .get = damon_lru_sort_enabled_load, }; module_param_cb(enabled, &enabled_param_ops, &enabled, 0600); MODULE_PARM_DESC(enabled, "Enable or disable DAMON_LRU_SORT (default: disabled)"); +static int damon_lru_sort_kdamond_pid_store(const char *val, + const struct kernel_param *kp) +{ + /* + * kdamond_pid is read-only, but kernel command line could write it. + * Do nothing here. + */ + return 0; +} + +static int damon_lru_sort_kdamond_pid_load(char *buffer, + const struct kernel_param *kp) +{ + int kdamond_pid = -1; + + if (ctx) { + kdamond_pid = damon_kdamond_pid(ctx); + if (kdamond_pid < 0) + kdamond_pid = -1; + } + return sprintf(buffer, "%d\n", kdamond_pid); +} + +static const struct kernel_param_ops kdamond_pid_param_ops = { + .set = damon_lru_sort_kdamond_pid_store, + .get = damon_lru_sort_kdamond_pid_load, +}; + +/* + * PID of the DAMON thread + * + * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread. + * Else, -1. + */ +module_param_cb(kdamond_pid, &kdamond_pid_param_ops, NULL, 0400); + static int __init damon_lru_sort_init(void) { int err; diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 86da147786583f..fe7fce26cf6ce3 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -144,15 +144,6 @@ static unsigned long addr_unit __read_mostly = 1; static bool skip_anon __read_mostly; module_param(skip_anon, bool, 0600); -/* - * PID of the DAMON thread - * - * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread. - * Else, -1. - */ -static int kdamond_pid __read_mostly = -1; -module_param(kdamond_pid, int, 0400); - static struct damos_stat damon_reclaim_stat; DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat, reclaim_tried_regions, reclaimed_regions, quota_exceeds); @@ -288,12 +279,8 @@ static int damon_reclaim_turn(bool on) { int err; - if (!on) { - err = damon_stop(&ctx, 1); - if (!err) - kdamond_pid = -1; - return err; - } + if (!on) + return damon_stop(&ctx, 1); err = damon_reclaim_apply_parameters(); if (err) @@ -302,9 +289,6 @@ static int damon_reclaim_turn(bool on) err = damon_start(&ctx, 1, true); if (err) return err; - kdamond_pid = damon_kdamond_pid(ctx); - if (kdamond_pid < 0) - return kdamond_pid; return damon_call(ctx, &call_control); } @@ -332,42 +316,83 @@ module_param_cb(addr_unit, &addr_unit_param_ops, &addr_unit, 0600); MODULE_PARM_DESC(addr_unit, "Scale factor for DAMON_RECLAIM to ops address conversion (default: 1)"); +static bool damon_reclaim_enabled(void) +{ + if (!ctx) + return false; + return damon_is_running(ctx); +} + static int damon_reclaim_enabled_store(const char *val, const struct kernel_param *kp) { - bool is_enabled = enabled; - bool enable; int err; - err = kstrtobool(val, &enable); + err = kstrtobool(val, &enabled); if (err) return err; - if (is_enabled == enable) + if (damon_reclaim_enabled() == enabled) return 0; /* Called before init function. The function will handle this. */ if (!damon_initialized()) - goto set_param_out; + return 0; - err = damon_reclaim_turn(enable); - if (err) - return err; + return damon_reclaim_turn(enabled); +} -set_param_out: - enabled = enable; - return err; +static int damon_reclaim_enabled_load(char *buffer, + const struct kernel_param *kp) +{ + return sprintf(buffer, "%c\n", damon_reclaim_enabled() ? 'Y' : 'N'); } static const struct kernel_param_ops enabled_param_ops = { .set = damon_reclaim_enabled_store, - .get = param_get_bool, + .get = damon_reclaim_enabled_load, }; module_param_cb(enabled, &enabled_param_ops, &enabled, 0600); MODULE_PARM_DESC(enabled, "Enable or disable DAMON_RECLAIM (default: disabled)"); +static int damon_reclaim_kdamond_pid_store(const char *val, + const struct kernel_param *kp) +{ + /* + * kdamond_pid is read-only, but kernel command line could write it. + * Do nothing here. + */ + return 0; +} + +static int damon_reclaim_kdamond_pid_load(char *buffer, + const struct kernel_param *kp) +{ + int kdamond_pid = -1; + + if (ctx) { + kdamond_pid = damon_kdamond_pid(ctx); + if (kdamond_pid < 0) + kdamond_pid = -1; + } + return sprintf(buffer, "%d\n", kdamond_pid); +} + +static const struct kernel_param_ops kdamond_pid_param_ops = { + .set = damon_reclaim_kdamond_pid_store, + .get = damon_reclaim_kdamond_pid_load, +}; + +/* + * PID of the DAMON thread + * + * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread. + * Else, -1. + */ +module_param_cb(kdamond_pid, &kdamond_pid_param_ops, NULL, 0400); + static int __init damon_reclaim_init(void) { int err; diff --git a/mm/damon/stat.c b/mm/damon/stat.c index 99ba346f9e3257..3951b762cbddf6 100644 --- a/mm/damon/stat.c +++ b/mm/damon/stat.c @@ -19,14 +19,17 @@ static int damon_stat_enabled_store( const char *val, const struct kernel_param *kp); +static int damon_stat_enabled_load(char *buffer, + const struct kernel_param *kp); + static const struct kernel_param_ops enabled_param_ops = { .set = damon_stat_enabled_store, - .get = param_get_bool, + .get = damon_stat_enabled_load, }; static bool enabled __read_mostly = IS_ENABLED( CONFIG_DAMON_STAT_ENABLED_DEFAULT); -module_param_cb(enabled, &enabled_param_ops, &enabled, 0600); +module_param_cb(enabled, &enabled_param_ops, NULL, 0600); MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT"); static unsigned long estimated_memory_bandwidth __read_mostly; @@ -273,17 +276,23 @@ static void damon_stat_stop(void) damon_stat_context = NULL; } +static bool damon_stat_enabled(void) +{ + if (!damon_stat_context) + return false; + return damon_is_running(damon_stat_context); +} + static int damon_stat_enabled_store( const char *val, const struct kernel_param *kp) { - bool is_enabled = enabled; int err; err = kstrtobool(val, &enabled); if (err) return err; - if (is_enabled == enabled) + if (damon_stat_enabled() == enabled) return 0; if (!damon_initialized()) @@ -293,16 +302,17 @@ static int damon_stat_enabled_store( */ return 0; - if (enabled) { - err = damon_stat_start(); - if (err) - enabled = false; - return err; - } + if (enabled) + return damon_stat_start(); damon_stat_stop(); return 0; } +static int damon_stat_enabled_load(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%c\n", damon_stat_enabled() ? 'Y' : 'N'); +} + static int __init damon_stat_init(void) { int err = 0; diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 5186966dafb350..245d63808411a2 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -533,9 +533,14 @@ static ssize_t memcg_path_show(struct kobject *kobj, { struct damon_sysfs_scheme_filter *filter = container_of(kobj, struct damon_sysfs_scheme_filter, kobj); + int len; - return sysfs_emit(buf, "%s\n", + if (!mutex_trylock(&damon_sysfs_lock)) + return -EBUSY; + len = sysfs_emit(buf, "%s\n", filter->memcg_path ? filter->memcg_path : ""); + mutex_unlock(&damon_sysfs_lock); + return len; } static ssize_t memcg_path_store(struct kobject *kobj, @@ -550,8 +555,13 @@ static ssize_t memcg_path_store(struct kobject *kobj, return -ENOMEM; strscpy(path, buf, count + 1); + if (!mutex_trylock(&damon_sysfs_lock)) { + kfree(path); + return -EBUSY; + } kfree(filter->memcg_path); filter->memcg_path = path; + mutex_unlock(&damon_sysfs_lock); return count; } @@ -1187,8 +1197,13 @@ static ssize_t path_show(struct kobject *kobj, { struct damos_sysfs_quota_goal *goal = container_of(kobj, struct damos_sysfs_quota_goal, kobj); + int len; - return sysfs_emit(buf, "%s\n", goal->path ? goal->path : ""); + if (!mutex_trylock(&damon_sysfs_lock)) + return -EBUSY; + len = sysfs_emit(buf, "%s\n", goal->path ? goal->path : ""); + mutex_unlock(&damon_sysfs_lock); + return len; } static ssize_t path_store(struct kobject *kobj, @@ -1203,8 +1218,13 @@ static ssize_t path_store(struct kobject *kobj, return -ENOMEM; strscpy(path, buf, count + 1); + if (!mutex_trylock(&damon_sysfs_lock)) { + kfree(path); + return -EBUSY; + } kfree(goal->path); goal->path = path; + mutex_unlock(&damon_sysfs_lock); return count; } diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c index f83ae4998990f6..7693ccefd0c64f 100644 --- a/mm/hugetlb_cma.c +++ b/mm/hugetlb_cma.c @@ -204,6 +204,7 @@ void __init hugetlb_cma_reserve(void) */ per_node = DIV_ROUND_UP(hugetlb_cma_size, nodes_weight(hugetlb_bootmem_nodes)); + per_node = round_up(per_node, PAGE_SIZE << order); pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", hugetlb_cma_size / SZ_1M, per_node / SZ_1M); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c3d98ab41f1f1b..c03d4787d46680 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -805,12 +805,17 @@ static long memcg_state_val_in_pages(int idx, long val) * Used in mod_memcg_state() and mod_memcg_lruvec_state() to avoid race with * reparenting of non-hierarchical state_locals. */ -static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg) +static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg, + bool *rcu_locked) { - if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + /* Rebinding can cause this value to be changed at runtime */ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { + *rcu_locked = false; return memcg; + } rcu_read_lock(); + *rcu_locked = true; while (memcg_is_dying(memcg)) memcg = parent_mem_cgroup(memcg); @@ -818,20 +823,21 @@ static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *me return memcg; } -static inline void get_non_dying_memcg_end(void) +static inline void get_non_dying_memcg_end(bool rcu_locked) { - if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + if (!rcu_locked) return; rcu_read_unlock(); } #else -static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg) +static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg, + bool *rcu_locked) { return memcg; } -static inline void get_non_dying_memcg_end(void) +static inline void get_non_dying_memcg_end(bool rcu_locked) { } #endif @@ -865,12 +871,14 @@ static void __mod_memcg_state(struct mem_cgroup *memcg, void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, int val) { + bool rcu_locked = false; + if (mem_cgroup_disabled()) return; - memcg = get_non_dying_memcg_start(memcg); + memcg = get_non_dying_memcg_start(memcg, &rcu_locked); __mod_memcg_state(memcg, idx, val); - get_non_dying_memcg_end(); + get_non_dying_memcg_end(rcu_locked); } #ifdef CONFIG_MEMCG_V1 @@ -933,14 +941,15 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec, struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup_per_node *pn; struct mem_cgroup *memcg; + bool rcu_locked = false; pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - memcg = get_non_dying_memcg_start(pn->memcg); + memcg = get_non_dying_memcg_start(pn->memcg, &rcu_locked); pn = memcg->nodeinfo[pgdat->node_id]; __mod_memcg_lruvec_state(pn, idx, val); - get_non_dying_memcg_end(); + get_non_dying_memcg_end(rcu_locked); } /** diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 88cd53d4ba0929..833f743f309f9b 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1835,7 +1835,9 @@ static int balance_dirty_pages(struct bdi_writeback *wb, balance_domain_limits(mdtc, strictlimit); } - if (nr_dirty > gdtc->bg_thresh && !writeback_in_progress(wb)) + if (!writeback_in_progress(wb) && + (nr_dirty > gdtc->bg_thresh || + (strictlimit && gdtc->wb_dirty > gdtc->wb_bg_thresh))) wb_start_background_writeback(wb); /* @@ -1862,15 +1864,9 @@ static int balance_dirty_pages(struct bdi_writeback *wb, * Unconditionally start background writeback if it's not * already in progress. We need to do this because the global * dirty threshold check above (nr_dirty > gdtc->bg_thresh) - * doesn't account for these cases: - * - * a) strictlimit BDIs: throttling is calculated using per-wb - * thresholds. The per-wb threshold can be exceeded even when - * nr_dirty < gdtc->bg_thresh - * - * b) memcg-based throttling: memcg uses its own dirty count and - * thresholds and can trigger throttling even when global - * nr_dirty < gdtc->bg_thresh + * doesn't account for the memcg-based throttling case. memcg + * uses its own dirty count and thresholds and can trigger + * throttling even when global nr_dirty < gdtc->bg_thresh * * Writeback needs to be started else the writer stalls in the * throttle loop waiting for dirty pages to be written back diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 65e205111553ea..227d58dc3de6a7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7737,6 +7737,11 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) return NULL; + + /* On UP, spin_trylock() always succeeds even when it is locked */ + if (!IS_ENABLED(CONFIG_SMP) && in_nmi()) + return NULL; + if (!pcp_allowed_order(order)) return NULL; diff --git a/mm/slub.c b/mm/slub.c index 161079ac5ba128..0baa906f39ab84 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5339,6 +5339,10 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node) if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) return NULL; + /* On UP, spin_trylock() always succeeds even when it is locked */ + if (!IS_ENABLED(CONFIG_SMP) && in_nmi()) + return NULL; + retry: if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return NULL; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 885da1e5646655..180bad42fc79d3 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -443,8 +443,10 @@ static int mfill_copy_folio_locked(struct folio *folio, unsigned long src_addr) return ret; } -static int mfill_copy_folio_retry(struct mfill_state *state, struct folio *folio) +static int mfill_copy_folio_retry(struct mfill_state *state, + struct folio *folio) { + const struct vm_uffd_ops *orig_ops = vma_uffd_ops(state->vma); unsigned long src_addr = state->src_addr; void *kaddr; int err; @@ -465,6 +467,14 @@ static int mfill_copy_folio_retry(struct mfill_state *state, struct folio *folio if (err) return err; + /* + * The VMA type may have changed while the lock was dropped + * (e.g. replaced with a hugetlb mapping), making the caller's + * ops pointer stale. + */ + if (vma_uffd_ops(state->vma) != orig_ops) + return -EAGAIN; + err = mfill_establish_pmd(state); if (err) return err; diff --git a/mm/util.c b/mm/util.c index 232c3930a662cf..3cc949a0b7ed4f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -1232,7 +1232,7 @@ int __compat_vma_mmap(struct vm_area_desc *desc, /* Update the VMA from the descriptor. */ compat_set_vma_from_desc(vma, desc); /* Complete any specified mmap actions. */ - return mmap_action_complete(vma, &desc->action); + return mmap_action_complete(vma, &desc->action, /*is_compat=*/true); } EXPORT_SYMBOL(__compat_vma_mmap); @@ -1389,7 +1389,8 @@ static int call_vma_mapped(struct vm_area_struct *vma) } static int mmap_action_finish(struct vm_area_struct *vma, - struct mmap_action *action, int err) + struct mmap_action *action, int err, + bool is_compat) { size_t len; @@ -1400,8 +1401,12 @@ static int mmap_action_finish(struct vm_area_struct *vma, /* do_munmap() might take rmap lock, so release if held. */ maybe_rmap_unlock_action(vma, action); - if (!err) - return 0; + /* + * If this is invoked from the compatibility layer, post-mmap() hook + * logic will handle cleanup for us. + */ + if (!err || is_compat) + return err; /* * If an error occurs, unmap the VMA altogether and return an error. We @@ -1451,13 +1456,15 @@ EXPORT_SYMBOL(mmap_action_prepare); * mmap_action_complete - Execute VMA descriptor action. * @vma: The VMA to perform the action upon. * @action: The action to perform. + * @is_compat: Is this being invoked from the compatibility layer? * * Similar to mmap_action_prepare(). * - * Return: 0 on success, or error, at which point the VMA will be unmapped. + * Return: 0 on success, or error, at which point the VMA will be unmapped if + * !@is_compat. */ int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action) + struct mmap_action *action, bool is_compat) { int err = 0; @@ -1478,7 +1485,7 @@ int mmap_action_complete(struct vm_area_struct *vma, break; } - return mmap_action_finish(vma, action, err); + return mmap_action_finish(vma, action, err, is_compat); } EXPORT_SYMBOL(mmap_action_complete); #else @@ -1500,7 +1507,8 @@ int mmap_action_prepare(struct vm_area_desc *desc) EXPORT_SYMBOL(mmap_action_prepare); int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action) + struct mmap_action *action, + bool is_compat) { int err = 0; @@ -1517,7 +1525,7 @@ int mmap_action_complete(struct vm_area_struct *vma, break; } - return mmap_action_finish(vma, action, err); + return mmap_action_finish(vma, action, err, is_compat); } EXPORT_SYMBOL(mmap_action_complete); #endif diff --git a/mm/vma.c b/mm/vma.c index 377321b4873484..d90791b00a7b81 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2780,7 +2780,8 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr, __mmap_complete(&map, vma); if (have_mmap_prepare && allocated_new) { - error = mmap_action_complete(vma, &desc.action); + error = mmap_action_complete(vma, &desc.action, + /*is_compat=*/false); if (error) return error; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index aa08651ec0df6d..c31a8615a8328d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4361,7 +4361,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align return NULL; if (p) { - memcpy(n, p, old_size); + memcpy(n, p, min(size, old_size)); vfree(p); } diff --git a/net/core/devmem.c b/net/core/devmem.c index cde4c89bc146ae..468344739db29e 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -297,8 +297,7 @@ net_devmem_bind_dmabuf(struct net_device *dev, for (i = 0; i < owner->area.num_niovs; i++) { niov = &owner->area.niovs[i]; - niov->type = NET_IOV_DMABUF; - niov->owner = &owner->area; + net_iov_init(niov, &owner->area, NET_IOV_DMABUF); page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), net_devmem_get_dma_addr(niov)); if (direction == DMA_TO_DEVICE) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9e12524b67fadb..5d92160165071f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3210,8 +3210,10 @@ int neigh_xmit(int index, struct net_device *dev, rcu_read_lock(); tbl = rcu_dereference(neigh_tables[index]); - if (!tbl) - goto out_unlock; + if (!tbl) { + rcu_read_unlock(); + goto out_kfree_skb; + } if (index == NEIGH_ARP_TABLE) { u32 key = *((u32 *)addr); @@ -3227,7 +3229,6 @@ int neigh_xmit(int index, struct net_device *dev, goto out_kfree_skb; } err = READ_ONCE(neigh->output)(neigh, skb); -out_unlock: rcu_read_unlock(); } else if (index == NEIGH_LINK_TABLE) { @@ -3237,11 +3238,10 @@ int neigh_xmit(int index, struct net_device *dev, goto out_kfree_skb; err = dev_queue_xmit(skb); } -out: return err; out_kfree_skb: kfree_skb(skb); - goto out; + return err; } EXPORT_SYMBOL(neigh_xmit); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index cd74beffd209ca..4381e0fc25bf47 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -704,6 +704,23 @@ static int netpoll_take_ipv4(struct netpoll *np, struct net_device *ndev) return 0; } +/* + * Test whether the caller left np->local_ip unset, so that + * netpoll_setup() should auto-populate it from the egress device. + * + * np->local_ip is a union of __be32 (IPv4) and struct in6_addr (IPv6), + * so an IPv6 address whose first 4 bytes are zero (e.g. ::1, ::2, + * IPv4-mapped ::ffff:a.b.c.d) must not be tested via the IPv4 arm — + * doing so would misclassify a caller-supplied address as unset and + * silently overwrite it with whatever address the device exposes. + */ +static bool netpoll_local_ip_unset(const struct netpoll *np) +{ + if (np->ipv6) + return ipv6_addr_any(&np->local_ip.in6); + return !np->local_ip.ip; +} + int netpoll_setup(struct netpoll *np) { struct net *net = current->nsproxy->net_ns; @@ -747,7 +764,7 @@ int netpoll_setup(struct netpoll *np) rtnl_lock(); } - if (!np->local_ip.ip) { + if (netpoll_local_ip_unset(np)) { if (!np->ipv6) { err = netpoll_take_ipv4(np, ndev); if (err) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 877bbf7a19389c..6e576dec80db42 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -327,6 +327,11 @@ static void page_pool_uninit(struct page_pool *pool) if (!pool->system) free_percpu(pool->recycle_stats); #endif + + if (pool->mp_ops) { + pool->mp_ops->destroy(pool); + static_branch_dec(&page_pool_mem_providers); + } } /** @@ -1146,11 +1151,6 @@ static void __page_pool_destroy(struct page_pool *pool) page_pool_unlist(pool); page_pool_uninit(pool); - if (pool->mp_ops) { - pool->mp_ops->destroy(pool); - static_branch_dec(&page_pool_mem_providers); - } - kfree(pool); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8a08d09b4c309b..2058ca860294b0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -151,16 +151,6 @@ static struct mr_table *__ipmr_get_table(struct net *net, u32 id) return NULL; } -static struct mr_table *ipmr_get_table(struct net *net, u32 id) -{ - struct mr_table *mrt; - - rcu_read_lock(); - mrt = __ipmr_get_table(net, id); - rcu_read_unlock(); - return mrt; -} - static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { @@ -293,7 +283,7 @@ static void __net_exit ipmr_rules_exit_rtnl(struct net *net, struct mr_table *mrt, *next; list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { - list_del(&mrt->list); + list_del_rcu(&mrt->list); ipmr_free_table(mrt, dev_kill_list); } } @@ -315,28 +305,30 @@ bool ipmr_rule_default(const struct fib_rule *rule) } EXPORT_SYMBOL(ipmr_rule_default); #else -#define ipmr_for_each_table(mrt, net) \ - for (mrt = net->ipv4.mrt; mrt; mrt = NULL) - static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { if (!mrt) - return net->ipv4.mrt; + return rcu_dereference(net->ipv4.mrt); return NULL; } -static struct mr_table *ipmr_get_table(struct net *net, u32 id) +static struct mr_table *__ipmr_get_table(struct net *net, u32 id) { - return net->ipv4.mrt; + return rcu_dereference_check(net->ipv4.mrt, + lockdep_rtnl_is_held() || + !rcu_access_pointer(net->ipv4.mrt)); } -#define __ipmr_get_table ipmr_get_table +#define ipmr_for_each_table(mrt, net) \ + for (mrt = __ipmr_get_table(net, 0); mrt; mrt = NULL) static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { - *mrt = net->ipv4.mrt; + *mrt = rcu_dereference(net->ipv4.mrt); + if (!*mrt) + return -EAGAIN; return 0; } @@ -347,7 +339,8 @@ static int __net_init ipmr_rules_init(struct net *net) mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); if (IS_ERR(mrt)) return PTR_ERR(mrt); - net->ipv4.mrt = mrt; + + rcu_assign_pointer(net->ipv4.mrt, mrt); return 0; } @@ -358,9 +351,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) static void __net_exit ipmr_rules_exit_rtnl(struct net *net, struct list_head *dev_kill_list) { - ipmr_free_table(net->ipv4.mrt, dev_kill_list); + struct mr_table *mrt = rcu_dereference_protected(net->ipv4.mrt, 1); - net->ipv4.mrt = NULL; + RCU_INIT_POINTER(net->ipv4.mrt, NULL); + ipmr_free_table(mrt, dev_kill_list); } static int ipmr_rules_dump(struct net *net, struct notifier_block *nb, @@ -381,6 +375,17 @@ bool ipmr_rule_default(const struct fib_rule *rule) EXPORT_SYMBOL(ipmr_rule_default); #endif +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + rcu_read_lock(); + mrt = __ipmr_get_table(net, id); + rcu_read_unlock(); + + return mrt; +} + static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { @@ -441,12 +446,11 @@ static void ipmr_free_table(struct mr_table *mrt, struct list_head *dev_kill_lis WARN_ON_ONCE(!mr_can_free_table(net)); - timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC | MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC, &ipmr_dev_kill_list); - rhltable_destroy(&mrt->mfc_hash); - kfree(mrt); + timer_shutdown_sync(&mrt->ipmr_expire_timer); + mr_table_free(mrt); WARN_ON_ONCE(!net_initialized(net) && !list_empty(&ipmr_dev_kill_list)); list_splice(&ipmr_dev_kill_list, dev_kill_list); @@ -1135,12 +1139,19 @@ static int ipmr_cache_report(const struct mr_table *mrt, static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb, struct net_device *dev) { + struct net *net = read_pnet(&mrt->net); const struct iphdr *iph = ip_hdr(skb); - struct mfc_cache *c; + struct mfc_cache *c = NULL; bool found = false; int err; spin_lock_bh(&mfc_unres_lock); + + if (!check_net(net)) { + err = -EINVAL; + goto err; + } + list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { if (c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) { @@ -1153,10 +1164,8 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, /* Create a new entry if allowable */ c = ipmr_cache_alloc_unres(); if (!c) { - spin_unlock_bh(&mfc_unres_lock); - - kfree_skb(skb); - return -ENOBUFS; + err = -ENOBUFS; + goto err; } /* Fill in the new cache entry */ @@ -1166,17 +1175,8 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, /* Reflect first query at mrouted. */ err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); - - if (err < 0) { - /* If the report failed throw the cache entry - out - Brad Parker - */ - spin_unlock_bh(&mfc_unres_lock); - - ipmr_cache_free(c); - kfree_skb(skb); - return err; - } + if (err < 0) + goto err; atomic_inc(&mrt->cache_resolve_queue_len); list_add(&c->_c.list, &mrt->mfc_unres_queue); @@ -1189,18 +1189,26 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, /* See if we can append the packet */ if (c->_c.mfc_un.unres.unresolved.qlen > 3) { - kfree_skb(skb); + c = NULL; err = -ENOBUFS; - } else { - if (dev) { - skb->dev = dev; - skb->skb_iif = dev->ifindex; - } - skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); - err = 0; + goto err; + } + + if (dev) { + skb->dev = dev; + skb->skb_iif = dev->ifindex; } + skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); + spin_unlock_bh(&mfc_unres_lock); + return 0; + +err: + spin_unlock_bh(&mfc_unres_lock); + if (c) + ipmr_cache_free(c); + kfree_skb(skb); return err; } @@ -1346,7 +1354,7 @@ static void mroute_clean_tables(struct mr_table *mrt, int flags, } if (flags & MRT_FLUSH_MFC) { - if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { + if (atomic_read(&mrt->cache_resolve_queue_len) != 0 || !check_net(net)) { spin_lock_bh(&mfc_unres_lock); list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { list_del(&c->list); diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index 37a3c144276c75..3930d612c3deec 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -28,6 +28,20 @@ void vif_device_init(struct vif_device *v, v->link = dev->ifindex; } +static void __mr_free_table(struct work_struct *work) +{ + struct mr_table *mrt = container_of(to_rcu_work(work), + struct mr_table, work); + + rhltable_destroy(&mrt->mfc_hash); + kfree(mrt); +} + +void mr_table_free(struct mr_table *mrt) +{ + queue_rcu_work(system_unbound_wq, &mrt->work); +} + struct mr_table * mr_table_alloc(struct net *net, u32 id, struct mr_table_ops *ops, @@ -50,6 +64,8 @@ mr_table_alloc(struct net *net, u32 id, kfree(mrt); return ERR_PTR(err); } + + INIT_RCU_WORK(&mrt->work, __mr_free_table); INIT_LIST_HEAD(&mrt->mfc_cache_list); INIT_LIST_HEAD(&mrt->mfc_unres_queue); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 1cdd9c28ab2da4..97ead883e4a13b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -110,13 +110,25 @@ static inline int arp_packet_match(const struct arphdr *arphdr, arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); - tgt_devaddr = arpptr; - arpptr += dev->addr_len; + + if (IS_ENABLED(CONFIG_FIREWIRE_NET) && dev->type == ARPHRD_IEEE1394) { + if (unlikely(memchr_inv(arpinfo->tgt_devaddr.mask, 0, + sizeof(arpinfo->tgt_devaddr.mask)))) + return 0; + + tgt_devaddr = NULL; + } else { + tgt_devaddr = arpptr; + arpptr += dev->addr_len; + } memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (NF_INVF(arpinfo, ARPT_INV_SRCDEVADDR, arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, - dev->addr_len)) || + dev->addr_len))) + return 0; + + if (tgt_devaddr && NF_INVF(arpinfo, ARPT_INV_TGTDEVADDR, arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len))) diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index a4e07e5e9c118d..f65dd339208e8a 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -40,6 +40,10 @@ target(struct sk_buff *skb, const struct xt_action_param *par) } arpptr += pln; if (mangle->flags & ARPT_MANGLE_TDEV) { + if (unlikely(IS_ENABLED(CONFIG_FIREWIRE_NET) && + skb->dev->type == ARPHRD_IEEE1394)) + return NF_DROP; + if (ARPT_DEV_ADDR_LEN_MAX < hln || (arpptr + hln > skb_tail_pointer(skb))) return NF_DROP; @@ -47,6 +51,10 @@ target(struct sk_buff *skb, const struct xt_action_param *par) } arpptr += hln; if (mangle->flags & ARPT_MANGLE_TIP) { + if (unlikely(IS_ENABLED(CONFIG_FIREWIRE_NET) && + skb->dev->type == ARPHRD_IEEE1394)) + return NF_DROP; + if (ARPT_MANGLE_ADDR_LEN_MAX < pln || (arpptr + pln > skb_tail_pointer(skb))) return NF_DROP; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8d791a954cd6ce..322db13333c706 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -50,7 +50,8 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) { const struct inet_connection_sock *icsk = inet_csk(sk); - u32 remaining, user_timeout; + u32 user_timeout; + s32 remaining; s32 elapsed; user_timeout = READ_ONCE(icsk->icsk_user_timeout); @@ -61,7 +62,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) if (unlikely(elapsed < 0)) elapsed = 0; remaining = msecs_to_jiffies(user_timeout) - elapsed; - remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); + remaining = max_t(int, remaining, TCP_TIMEOUT_MIN); return min_t(u32, remaining, when); } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 95558fd6f447e3..03cbce842c1a7a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -491,6 +491,7 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) struct net *net = dev_net(skb->dev); struct inet6_dev *idev; struct ipv6hdr *oldhdr; + unsigned int chdr_len; unsigned char *buf; int accept_rpl_seg; int i, err; @@ -592,8 +593,10 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) skb_pull(skb, ((hdr->hdrlen + 1) << 3)); skb_postpull_rcsum(skb, oldhdr, sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); - if (unlikely(!hdr->segments_left)) { - if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, + chdr_len = sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3); + if (unlikely(!hdr->segments_left || + skb_headroom(skb) < chdr_len + skb->mac_len)) { + if (pskb_expand_head(skb, chdr_len + skb->mac_len, 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); @@ -603,7 +606,7 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) oldhdr = ipv6_hdr(skb); } - skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); + skb_push(skb, chdr_len); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index c7942cf655671c..4e10adcd70e89d 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -287,7 +287,16 @@ static int rpl_input(struct sk_buff *skb) if (!dst) { ip6_route_input(skb); + + /* ip6_route_input() sets a NOREF dst; force a refcount on it + * before caching or further use. + */ + skb_dst_force(skb); dst = skb_dst(skb); + if (unlikely(!dst)) { + err = -ENETUNREACH; + goto drop; + } /* cache only if we don't create a dst reference loop */ if (!dst->error && lwtst != dst->lwtstate) { diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 9b64343ebad686..4c45c0a77d75d0 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -515,7 +515,16 @@ static int seg6_input_core(struct net *net, struct sock *sk, if (!dst) { ip6_route_input(skb); + + /* ip6_route_input() sets a NOREF dst; force a refcount on it + * before caching or further use. + */ + skb_dst_force(skb); dst = skb_dst(skb); + if (unlikely(!dst)) { + err = -ENETUNREACH; + goto drop; + } /* cache only if we don't create a dst reference loop */ if (!dst->error && lwtst != dst->lwtstate) { diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c index 0ebf43be993993..c9f1e5af3cd3ec 100644 --- a/net/mptcp/pm_kernel.c +++ b/net/mptcp/pm_kernel.c @@ -1278,6 +1278,7 @@ static void __reset_counters(struct pm_nl_pernet *pernet) WRITE_ONCE(pernet->endp_signal_max, 0); WRITE_ONCE(pernet->endp_subflow_max, 0); WRITE_ONCE(pernet->endp_laminar_max, 0); + WRITE_ONCE(pernet->endp_fullmesh_max, 0); pernet->endpoints = 0; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 718e910ff23f8f..4546a8b09884a3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3302,7 +3302,8 @@ bool __mptcp_close(struct sock *sk, long timeout) goto cleanup; } - if (mptcp_data_avail(msk) || timeout < 0) { + if (mptcp_data_avail(msk) || timeout < 0 || + (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { /* If the msk has read data, or the caller explicitly ask it, * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose */ diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index de90a2897d2d8f..0efe40be2fde07 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -159,10 +159,10 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - bool slow = lock_sock_fast(ssk); - sock_set_timestamp(sk, optname, !!val); - unlock_sock_fast(ssk, slow); + lock_sock(ssk); + sock_set_timestamp(ssk, optname, !!val); + release_sock(ssk); } release_sock(sk); @@ -235,10 +235,10 @@ static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk, mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - bool slow = lock_sock_fast(ssk); - sock_set_timestamping(sk, optname, timestamping); - unlock_sock_fast(ssk, slow); + lock_sock(ssk); + sock_set_timestamping(ssk, optname, timestamping); + release_sock(ssk); } release_sock(sk); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 645d2c43ebf7af..7e10fa65cbdd31 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -466,9 +466,13 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, if (!ih) goto out_unlock; - if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir]) - ct->proto.sctp.init[!dir] = 0; - ct->proto.sctp.init[dir] = 1; + /* Do not record INIT matching peer vtag (stale or retransmitted INIT). */ + if (old_state == SCTP_CONNTRACK_NONE || + ct->proto.sctp.vtag[!dir] != ih->init_tag) { + if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir]) + ct->proto.sctp.init[!dir] = 0; + ct->proto.sctp.init[dir] = 1; + } pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); ct->proto.sctp.vtag[!dir] = ih->init_tag; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 182cfb11944874..1eb55907d470d6 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -181,6 +181,57 @@ static int sip_parse_addr(const struct nf_conn *ct, const char *cp, return 1; } +/* Parse optional port number after IP address. + * Returns false on malformed input, true otherwise. + * If port is non-NULL, stores parsed port in network byte order. + * If no port is present, sets *port to default SIP port. + */ +static bool sip_parse_port(const char *dptr, const char **endp, + const char *limit, __be16 *port) +{ + unsigned int p = 0; + int len = 0; + + if (dptr >= limit) + return false; + + if (*dptr != ':') { + if (port) + *port = htons(SIP_PORT); + if (endp) + *endp = dptr; + return true; + } + + dptr++; /* skip ':' */ + + while (dptr < limit && isdigit(*dptr)) { + p = p * 10 + (*dptr - '0'); + dptr++; + len++; + if (len > 5) /* max "65535" */ + return false; + } + + if (len == 0) + return false; + + /* reached limit while parsing port */ + if (dptr >= limit) + return false; + + if (p < 1024 || p > 65535) + return false; + + if (port) + *port = htons(p); + + if (endp) + *endp = dptr; + + return true; +} + /* skip ip address. returns its length. */ static int epaddr_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) @@ -193,11 +244,8 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr, return 0; } - /* Port number */ - if (*dptr == ':') { - dptr++; - dptr += digits_len(ct, dptr, limit, shift); - } + if (!sip_parse_port(dptr, &dptr, limit, NULL)) + return 0; return dptr - aux; } @@ -228,6 +276,51 @@ static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr, return epaddr_len(ct, dptr, limit, shift); } +/* simple_strtoul stops after first non-number character. + * But as we're not dealing with c-strings, we can't rely on + * hitting \r,\n,\0 etc. before moving past end of buffer. + * + * This is a variant of simple_strtoul, but doesn't require + * a c-string. + * + * If value exceeds UINT_MAX, 0 is returned. + */ +static unsigned int sip_strtouint(const char *cp, unsigned int len, char **endp) +{ + const unsigned int max = sizeof("4294967295"); + unsigned int olen = len; + const char *s = cp; + u64 result = 0; + + if (len > max) + len = max; + + while (olen > 0 && isdigit(*s)) { + unsigned int value; + + if (len == 0) + goto err; + + value = *s - '0'; + result = result * 10 + value; + + if (result > UINT_MAX) + goto err; + s++; + len--; + olen--; + } + + if (endp) + *endp = (char *)s; + + return result; +err: + if (endp) + *endp = (char *)cp; + return 0; +} + /* Parse a SIP request line of the form: * * Request-Line = Method SP Request-URI SP SIP-Version CRLF @@ -241,7 +334,6 @@ int ct_sip_parse_request(const struct nf_conn *ct, { const char *start = dptr, *limit = dptr + datalen, *end; unsigned int mlen; - unsigned int p; int shift = 0; /* Skip method and following whitespace */ @@ -267,14 +359,8 @@ int ct_sip_parse_request(const struct nf_conn *ct, if (!sip_parse_addr(ct, dptr, &end, addr, limit, true)) return -1; - if (end < limit && *end == ':') { - end++; - p = simple_strtoul(end, (char **)&end, 10); - if (p < 1024 || p > 65535) - return -1; - *port = htons(p); - } else - *port = htons(SIP_PORT); + if (!sip_parse_port(end, &end, limit, port)) + return -1; if (end == dptr) return 0; @@ -509,7 +595,6 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, union nf_inet_addr *addr, __be16 *port) { const char *c, *limit = dptr + datalen; - unsigned int p; int ret; ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen, @@ -520,14 +605,8 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) return -1; - if (*c == ':') { - c++; - p = simple_strtoul(c, (char **)&c, 10); - if (p < 1024 || p > 65535) - return -1; - *port = htons(p); - } else - *port = htons(SIP_PORT); + if (!sip_parse_port(c, &c, limit, port)) + return -1; if (dataoff) *dataoff = c - dptr; @@ -609,7 +688,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, return 0; start += strlen(name); - *val = simple_strtoul(start, &end, 0); + *val = sip_strtouint(start, limit - start, (char **)&end); if (start == end) return -1; if (matchoff && matchlen) { @@ -1064,6 +1143,8 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff, mediaoff = sdpoff; for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) { + char *end; + if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen, SDP_HDR_MEDIA, SDP_HDR_UNSPEC, &mediaoff, &medialen) <= 0) @@ -1079,8 +1160,8 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff, mediaoff += t->len; medialen -= t->len; - port = simple_strtoul(*dptr + mediaoff, NULL, 10); - if (port == 0) + port = sip_strtouint(*dptr + mediaoff, *datalen - mediaoff, (char **)&end); + if (port == 0 || *dptr + mediaoff == end) continue; if (port < 1024 || port > 65535) { nf_ct_helper_log(skb, ct, "wrong port %u", port); @@ -1254,7 +1335,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff, */ if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, &matchoff, &matchlen) > 0) - expires = simple_strtoul(*dptr + matchoff, NULL, 10); + expires = sip_strtouint(*dptr + matchoff, *datalen - matchoff, NULL); ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, SIP_HDR_CONTACT, NULL, @@ -1358,7 +1439,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int protoff, if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, &matchoff, &matchlen) > 0) - expires = simple_strtoul(*dptr + matchoff, NULL, 10); + expires = sip_strtouint(*dptr + matchoff, *datalen - matchoff, NULL); while (1) { unsigned int c_expires = expires; @@ -1418,10 +1499,12 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen, matchend; unsigned int code, cseq, i; + char *end; if (*datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; - code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10); + code = sip_strtouint(*dptr + strlen("SIP/2.0 "), + *datalen - strlen("SIP/2.0 "), NULL); if (!code) { nf_ct_helper_log(skb, ct, "cannot get code"); return NF_DROP; @@ -1432,8 +1515,8 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff, nf_ct_helper_log(skb, ct, "cannot parse cseq"); return NF_DROP; } - cseq = simple_strtoul(*dptr + matchoff, NULL, 10); - if (!cseq && *(*dptr + matchoff) != '0') { + cseq = sip_strtouint(*dptr + matchoff, *datalen - matchoff, (char **)&end); + if (*dptr + matchoff == end) { nf_ct_helper_log(skb, ct, "cannot get cseq"); return NF_DROP; } @@ -1482,6 +1565,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { const struct sip_handler *handler; + char *end; handler = &sip_handlers[i]; if (handler->request == NULL) @@ -1498,8 +1582,8 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, nf_ct_helper_log(skb, ct, "cannot parse cseq"); return NF_DROP; } - cseq = simple_strtoul(*dptr + matchoff, NULL, 10); - if (!cseq && *(*dptr + matchoff) != '0') { + cseq = sip_strtouint(*dptr + matchoff, *datalen - matchoff, (char **)&end); + if (*dptr + matchoff == end) { nf_ct_helper_log(skb, ct, "cannot get cseq"); return NF_DROP; } @@ -1575,7 +1659,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, &matchoff, &matchlen) <= 0) break; - clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); + clen = sip_strtouint(dptr + matchoff, datalen - matchoff, (char **)&end); if (dptr + matchoff == end) break; diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index c845b6d1a2bdf4..9fbfc6bff0c221 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -246,6 +246,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff, if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen, "rport=", &poff, &plen, &n) > 0 && + n >= 1024 && n <= 65535 && htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port && htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 8537b94653d374..d20ce5c36d3187 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -374,6 +374,38 @@ static void nft_netdev_hook_free_rcu(struct nft_hook *hook) call_rcu(&hook->rcu, __nft_netdev_hook_free_rcu); } +static void nft_netdev_hook_unlink_free_rcu(struct nft_hook *hook) +{ + list_del_rcu(&hook->list); + nft_netdev_hook_free_rcu(hook); +} + +static void nft_trans_hook_destroy(struct nft_trans_hook *trans_hook) +{ + list_del(&trans_hook->list); + kfree(trans_hook); +} + +static void nft_netdev_unregister_trans_hook(struct net *net, + const struct nft_table *table, + struct list_head *hook_list) +{ + struct nft_trans_hook *trans_hook, *next; + struct nf_hook_ops *ops; + struct nft_hook *hook; + + list_for_each_entry_safe(trans_hook, next, hook_list, list) { + hook = trans_hook->hook; + + if (!(table->flags & NFT_TABLE_F_DORMANT)) { + list_for_each_entry(ops, &hook->ops_list, list) + nf_unregister_net_hook(net, ops); + } + nft_netdev_hook_unlink_free_rcu(hook); + nft_trans_hook_destroy(trans_hook); + } +} + static void nft_netdev_unregister_hooks(struct net *net, struct list_head *hook_list, bool release_netdev) @@ -384,10 +416,8 @@ static void nft_netdev_unregister_hooks(struct net *net, list_for_each_entry_safe(hook, next, hook_list, list) { list_for_each_entry(ops, &hook->ops_list, list) nf_unregister_net_hook(net, ops); - if (release_netdev) { - list_del(&hook->list); - nft_netdev_hook_free_rcu(hook); - } + if (release_netdev) + nft_netdev_hook_unlink_free_rcu(hook); } } @@ -1942,15 +1972,69 @@ static int nft_nla_put_hook_dev(struct sk_buff *skb, struct nft_hook *hook) return nla_put_string(skb, attr, hook->ifname); } +struct nft_hook_dump_ctx { + struct nft_hook *first; + int n; +}; + +static int nft_dump_basechain_hook_one(struct sk_buff *skb, + struct nft_hook *hook, + struct nft_hook_dump_ctx *dump_ctx) +{ + if (!dump_ctx->first) + dump_ctx->first = hook; + + if (nft_nla_put_hook_dev(skb, hook)) + return -1; + + dump_ctx->n++; + + return 0; +} + +static int nft_dump_basechain_hook_list(struct sk_buff *skb, + const struct net *net, + const struct list_head *hook_list, + struct nft_hook_dump_ctx *dump_ctx) +{ + struct nft_hook *hook; + int err; + + list_for_each_entry_rcu(hook, hook_list, list, + lockdep_commit_lock_is_held(net)) { + err = nft_dump_basechain_hook_one(skb, hook, dump_ctx); + if (err < 0) + return err; + } + + return 0; +} + +static int nft_dump_basechain_trans_hook_list(struct sk_buff *skb, + const struct list_head *trans_hook_list, + struct nft_hook_dump_ctx *dump_ctx) +{ + struct nft_trans_hook *trans_hook; + int err; + + list_for_each_entry(trans_hook, trans_hook_list, list) { + err = nft_dump_basechain_hook_one(skb, trans_hook->hook, dump_ctx); + if (err < 0) + return err; + } + + return 0; +} + static int nft_dump_basechain_hook(struct sk_buff *skb, const struct net *net, int family, const struct nft_base_chain *basechain, - const struct list_head *hook_list) + const struct list_head *hook_list, + const struct list_head *trans_hook_list) { const struct nf_hook_ops *ops = &basechain->ops; - struct nft_hook *hook, *first = NULL; + struct nft_hook_dump_ctx dump_hook_ctx = {}; struct nlattr *nest, *nest_devs; - int n = 0; nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK); if (nest == NULL) @@ -1965,23 +2049,23 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, if (!nest_devs) goto nla_put_failure; - if (!hook_list) + if (!hook_list && !trans_hook_list) hook_list = &basechain->hook_list; - list_for_each_entry_rcu(hook, hook_list, list, - lockdep_commit_lock_is_held(net)) { - if (!first) - first = hook; - - if (nft_nla_put_hook_dev(skb, hook)) - goto nla_put_failure; - n++; + if (hook_list && + nft_dump_basechain_hook_list(skb, net, hook_list, &dump_hook_ctx)) { + goto nla_put_failure; + } else if (trans_hook_list && + nft_dump_basechain_trans_hook_list(skb, trans_hook_list, + &dump_hook_ctx)) { + goto nla_put_failure; } + nla_nest_end(skb, nest_devs); - if (n == 1 && - !hook_is_prefix(first) && - nla_put_string(skb, NFTA_HOOK_DEV, first->ifname)) + if (dump_hook_ctx.n == 1 && + !hook_is_prefix(dump_hook_ctx.first) && + nla_put_string(skb, NFTA_HOOK_DEV, dump_hook_ctx.first->ifname)) goto nla_put_failure; } nla_nest_end(skb, nest); @@ -1995,7 +2079,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain, - const struct list_head *hook_list) + const struct list_head *hook_list, + const struct list_head *trans_hook_list) { struct nlmsghdr *nlh; @@ -2011,7 +2096,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, NFTA_CHAIN_PAD)) goto nla_put_failure; - if (!hook_list && + if (!hook_list && !trans_hook_list && (event == NFT_MSG_DELCHAIN || event == NFT_MSG_DESTROYCHAIN)) { nlmsg_end(skb, nlh); @@ -2022,7 +2107,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, const struct nft_base_chain *basechain = nft_base_chain(chain); struct nft_stats __percpu *stats; - if (nft_dump_basechain_hook(skb, net, family, basechain, hook_list)) + if (nft_dump_basechain_hook(skb, net, family, basechain, + hook_list, trans_hook_list)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CHAIN_POLICY, @@ -2058,7 +2144,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, } static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event, - const struct list_head *hook_list) + const struct list_head *hook_list, + const struct list_head *trans_hook_list) { struct nftables_pernet *nft_net; struct sk_buff *skb; @@ -2078,7 +2165,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event, err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, event, flags, ctx->family, ctx->table, - ctx->chain, hook_list); + ctx->chain, hook_list, trans_hook_list); if (err < 0) { kfree_skb(skb); goto err; @@ -2124,7 +2211,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb, NFT_MSG_NEWCHAIN, NLM_F_MULTI, table->family, table, - chain, NULL) < 0) + chain, NULL, NULL) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@ -2178,7 +2265,7 @@ static int nf_tables_getchain(struct sk_buff *skb, const struct nfnl_info *info, err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, - 0, family, table, chain, NULL); + 0, family, table, chain, NULL, NULL); if (err < 0) goto err_fill_chain_info; @@ -2271,10 +2358,8 @@ void nf_tables_chain_destroy(struct nft_chain *chain) if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) { list_for_each_entry_safe(hook, next, - &basechain->hook_list, list) { - list_del_rcu(&hook->list); - nft_netdev_hook_free_rcu(hook); - } + &basechain->hook_list, list) + nft_netdev_hook_unlink_free_rcu(hook); } module_put(basechain->type->owner); if (rcu_access_pointer(basechain->stats)) { @@ -2343,8 +2428,12 @@ static struct nft_hook *nft_hook_list_find(struct list_head *hook_list, list_for_each_entry(hook, hook_list, list) { if (!strncmp(hook->ifname, this->ifname, - min(hook->ifnamelen, this->ifnamelen))) + min(hook->ifnamelen, this->ifnamelen))) { + if (hook->flags & NFT_HOOK_REMOVE) + continue; + return hook; + } } return NULL; @@ -2974,6 +3063,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, list_for_each_entry(ops, &h->ops_list, list) nf_unregister_net_hook(ctx->net, ops); } + /* hook.list is on stack, no need for list_del_rcu() */ list_del(&h->list); nft_netdev_hook_free_rcu(h); } @@ -3102,6 +3192,32 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info, return nf_tables_addchain(&ctx, family, policy, flags, extack); } +static int nft_trans_delhook(struct nft_hook *hook, + struct list_head *del_list) +{ + struct nft_trans_hook *trans_hook; + + trans_hook = kmalloc_obj(*trans_hook, GFP_KERNEL); + if (!trans_hook) + return -ENOMEM; + + trans_hook->hook = hook; + list_add_tail(&trans_hook->list, del_list); + hook->flags |= NFT_HOOK_REMOVE; + + return 0; +} + +static void nft_trans_delhook_abort(struct list_head *del_list) +{ + struct nft_trans_hook *trans_hook, *next; + + list_for_each_entry_safe(trans_hook, next, del_list, list) { + trans_hook->hook->flags &= ~NFT_HOOK_REMOVE; + nft_trans_hook_destroy(trans_hook); + } +} + static int nft_delchain_hook(struct nft_ctx *ctx, struct nft_base_chain *basechain, struct netlink_ext_ack *extack) @@ -3128,7 +3244,10 @@ static int nft_delchain_hook(struct nft_ctx *ctx, err = -ENOENT; goto err_chain_del_hook; } - list_move(&hook->list, &chain_del_list); + if (nft_trans_delhook(hook, &chain_del_list) < 0) { + err = -ENOMEM; + goto err_chain_del_hook; + } } trans = nft_trans_alloc_chain(ctx, NFT_MSG_DELCHAIN); @@ -3148,7 +3267,7 @@ static int nft_delchain_hook(struct nft_ctx *ctx, return 0; err_chain_del_hook: - list_splice(&chain_del_list, &basechain->hook_list); + nft_trans_delhook_abort(&chain_del_list); nft_chain_release_hook(&chain_hook); return err; @@ -8852,10 +8971,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net, list_for_each_entry_safe(hook, next, hook_list, list) { list_for_each_entry(ops, &hook->ops_list, list) nft_unregister_flowtable_ops(net, flowtable, ops); - if (release_netdev) { - list_del(&hook->list); - nft_netdev_hook_free_rcu(hook); - } + if (release_netdev) + nft_netdev_hook_unlink_free_rcu(hook); } } @@ -8926,8 +9043,7 @@ static int nft_register_flowtable_net_hooks(struct net *net, nft_unregister_flowtable_ops(net, flowtable, ops); } - list_del_rcu(&hook->list); - nft_netdev_hook_free_rcu(hook); + nft_netdev_hook_unlink_free_rcu(hook); } return err; @@ -8937,9 +9053,25 @@ static void nft_hooks_destroy(struct list_head *hook_list) { struct nft_hook *hook, *next; - list_for_each_entry_safe(hook, next, hook_list, list) { - list_del_rcu(&hook->list); - nft_netdev_hook_free_rcu(hook); + list_for_each_entry_safe(hook, next, hook_list, list) + nft_netdev_hook_unlink_free_rcu(hook); +} + +static void nft_flowtable_unregister_trans_hook(struct net *net, + struct nft_flowtable *flowtable, + struct list_head *hook_list) +{ + struct nft_trans_hook *trans_hook, *next; + struct nf_hook_ops *ops; + struct nft_hook *hook; + + list_for_each_entry_safe(trans_hook, next, hook_list, list) { + hook = trans_hook->hook; + list_for_each_entry(ops, &hook->ops_list, list) + nft_unregister_flowtable_ops(net, flowtable, ops); + + nft_netdev_hook_unlink_free_rcu(hook); + nft_trans_hook_destroy(trans_hook); } } @@ -9028,8 +9160,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, nft_unregister_flowtable_ops(ctx->net, flowtable, ops); } - list_del_rcu(&hook->list); - nft_netdev_hook_free_rcu(hook); + nft_netdev_hook_unlink_free_rcu(hook); } return err; @@ -9202,7 +9333,10 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, err = -ENOENT; goto err_flowtable_del_hook; } - list_move(&hook->list, &flowtable_del_list); + if (nft_trans_delhook(hook, &flowtable_del_list) < 0) { + err = -ENOMEM; + goto err_flowtable_del_hook; + } } trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE, @@ -9223,7 +9357,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, return 0; err_flowtable_del_hook: - list_splice(&flowtable_del_list, &flowtable->hook_list); + nft_trans_delhook_abort(&flowtable_del_list); nft_flowtable_hook_release(&flowtable_hook); return err; @@ -9288,8 +9422,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, struct nft_flowtable *flowtable, - struct list_head *hook_list) + struct list_head *hook_list, + struct list_head *trans_hook_list) { + struct nft_trans_hook *trans_hook; struct nlattr *nest, *nest_devs; struct nft_hook *hook; struct nlmsghdr *nlh; @@ -9306,7 +9442,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, NFTA_FLOWTABLE_PAD)) goto nla_put_failure; - if (!hook_list && + if (!hook_list && !trans_hook_list && (event == NFT_MSG_DELFLOWTABLE || event == NFT_MSG_DESTROYFLOWTABLE)) { nlmsg_end(skb, nlh); @@ -9328,13 +9464,20 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, if (!nest_devs) goto nla_put_failure; - if (!hook_list) + if (!hook_list && !trans_hook_list) hook_list = &flowtable->hook_list; - list_for_each_entry_rcu(hook, hook_list, list, - lockdep_commit_lock_is_held(net)) { - if (nft_nla_put_hook_dev(skb, hook)) - goto nla_put_failure; + if (hook_list) { + list_for_each_entry_rcu(hook, hook_list, list, + lockdep_commit_lock_is_held(net)) { + if (nft_nla_put_hook_dev(skb, hook)) + goto nla_put_failure; + } + } else if (trans_hook_list) { + list_for_each_entry(trans_hook, trans_hook_list, list) { + if (nft_nla_put_hook_dev(skb, trans_hook->hook)) + goto nla_put_failure; + } } nla_nest_end(skb, nest_devs); nla_nest_end(skb, nest); @@ -9388,7 +9531,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb, NFT_MSG_NEWFLOWTABLE, NLM_F_MULTI | NLM_F_APPEND, table->family, - flowtable, NULL) < 0) + flowtable, NULL, NULL) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@ -9488,7 +9631,7 @@ static int nf_tables_getflowtable(struct sk_buff *skb, err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFT_MSG_NEWFLOWTABLE, 0, family, - flowtable, NULL); + flowtable, NULL, NULL); if (err < 0) goto err_fill_flowtable_info; @@ -9501,7 +9644,9 @@ static int nf_tables_getflowtable(struct sk_buff *skb, static void nf_tables_flowtable_notify(struct nft_ctx *ctx, struct nft_flowtable *flowtable, - struct list_head *hook_list, int event) + struct list_head *hook_list, + struct list_head *trans_hook_list, + int event) { struct nftables_pernet *nft_net = nft_pernet(ctx->net); struct sk_buff *skb; @@ -9521,7 +9666,8 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid, ctx->seq, event, flags, - ctx->family, flowtable, hook_list); + ctx->family, flowtable, + hook_list, trans_hook_list); if (err < 0) { kfree_skb(skb); goto err; @@ -9535,13 +9681,8 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) { - struct nft_hook *hook, *next; - flowtable->data.type->free(&flowtable->data); - list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - list_del_rcu(&hook->list); - nft_netdev_hook_free_rcu(hook); - } + nft_hooks_destroy(&flowtable->hook_list); kfree(flowtable->name); module_put(flowtable->data.type->owner); kfree(flowtable); @@ -10060,9 +10201,7 @@ static void nft_commit_release(struct nft_trans *trans) break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: - if (nft_trans_chain_update(trans)) - nft_hooks_destroy(&nft_trans_chain_hooks(trans)); - else + if (!nft_trans_chain_update(trans)) nf_tables_chain_destroy(nft_trans_chain(trans)); break; case NFT_MSG_DELRULE: @@ -10083,9 +10222,7 @@ static void nft_commit_release(struct nft_trans *trans) break; case NFT_MSG_DELFLOWTABLE: case NFT_MSG_DESTROYFLOWTABLE: - if (nft_trans_flowtable_update(trans)) - nft_hooks_destroy(&nft_trans_flowtable_hooks(trans)); - else + if (!nft_trans_flowtable_update(trans)) nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); break; } @@ -10845,31 +10982,28 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) if (nft_trans_chain_update(trans)) { nft_chain_commit_update(nft_trans_container_chain(trans)); nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, - &nft_trans_chain_hooks(trans)); - list_splice(&nft_trans_chain_hooks(trans), - &nft_trans_basechain(trans)->hook_list); + &nft_trans_chain_hooks(trans), NULL); + list_splice_rcu(&nft_trans_chain_hooks(trans), + &nft_trans_basechain(trans)->hook_list); /* trans destroyed after rcu grace period */ } else { nft_chain_commit_drop_policy(nft_trans_container_chain(trans)); nft_clear(net, nft_trans_chain(trans)); - nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, NULL); + nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, NULL, NULL); nft_trans_destroy(trans); } break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: if (nft_trans_chain_update(trans)) { - nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN, + nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN, NULL, &nft_trans_chain_hooks(trans)); - if (!(table->flags & NFT_TABLE_F_DORMANT)) { - nft_netdev_unregister_hooks(net, - &nft_trans_chain_hooks(trans), - true); - } + nft_netdev_unregister_trans_hook(net, table, + &nft_trans_chain_hooks(trans)); } else { nft_chain_del(nft_trans_chain(trans)); nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN, - NULL); + NULL, NULL); nf_tables_unregister_hook(ctx.net, ctx.table, nft_trans_chain(trans)); } @@ -10975,14 +11109,16 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), + NULL, NFT_MSG_NEWFLOWTABLE); - list_splice(&nft_trans_flowtable_hooks(trans), - &nft_trans_flowtable(trans)->hook_list); + list_splice_rcu(&nft_trans_flowtable_hooks(trans), + &nft_trans_flowtable(trans)->hook_list); } else { nft_clear(net, nft_trans_flowtable(trans)); nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), NULL, + NULL, NFT_MSG_NEWFLOWTABLE); } nft_trans_destroy(trans); @@ -10992,16 +11128,18 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) if (nft_trans_flowtable_update(trans)) { nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), + NULL, &nft_trans_flowtable_hooks(trans), trans->msg_type); - nft_unregister_flowtable_net_hooks(net, - nft_trans_flowtable(trans), - &nft_trans_flowtable_hooks(trans)); + nft_flowtable_unregister_trans_hook(net, + nft_trans_flowtable(trans), + &nft_trans_flowtable_hooks(trans)); } else { list_del_rcu(&nft_trans_flowtable(trans)->list); nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), NULL, + NULL, trans->msg_type); nft_unregister_flowtable_net_hooks(net, nft_trans_flowtable(trans), @@ -11165,8 +11303,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: if (nft_trans_chain_update(trans)) { - list_splice(&nft_trans_chain_hooks(trans), - &nft_trans_basechain(trans)->hook_list); + nft_trans_delhook_abort(&nft_trans_chain_hooks(trans)); } else { nft_use_inc_restore(&table->use); nft_clear(trans->net, nft_trans_chain(trans)); @@ -11280,8 +11417,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) case NFT_MSG_DELFLOWTABLE: case NFT_MSG_DESTROYFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - list_splice(&nft_trans_flowtable_hooks(trans), - &nft_trans_flowtable(trans)->hook_list); + nft_trans_delhook_abort(&nft_trans_flowtable_hooks(trans)); } else { nft_use_inc_restore(&table->use); nft_clear(trans->net, nft_trans_flowtable(trans)); diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 13808e9cd99933..94dccdcfa06bba 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -196,7 +196,8 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv, if (err < 0) return err; - if (priv->data.data[0] >= BITS_PER_TYPE(u32)) { + if (!priv->data.data[0] || + priv->data.data[0] >= BITS_PER_TYPE(u32)) { nft_data_release(&priv->data, desc.type); return -EINVAL; } diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index cb6e8279010a4b..b5fa65558318f5 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c @@ -63,7 +63,7 @@ match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info, return 0; for (i = sp->len - 1; i >= 0; i--) { - pos = strict ? i - sp->len + 1 : 0; + pos = strict ? sp->len - i - 1 : 0; if (pos >= info->len) return 0; e = &info->pol[pos]; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index c4af26357144eb..631a99cdbd006b 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -208,9 +208,15 @@ static int pn_socket_autobind(struct socket *sock) sa.spn_family = AF_PHONET; err = pn_socket_bind(sock, (struct sockaddr_unsized *)&sa, sizeof(struct sockaddr_pn)); - if (err != -EINVAL) + /* + * pn_socket_bind() also returns -EINVAL when sk_state != TCP_CLOSE + * without a prior bind, so -EINVAL alone is not sufficient to infer + * that the socket was already bound. Only treat it as "already + * bound" when the port is non-zero; otherwise propagate the error + * instead of crashing the kernel. + */ + if (err != -EINVAL || unlikely(!pn_port(pn_sk(sock->sk)->sobject))) return err; - BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); return 0; /* socket was already bound */ } diff --git a/net/psp/psp-nl-gen.c b/net/psp/psp-nl-gen.c index 22a48d0fa378c9..953309952cef75 100644 --- a/net/psp/psp-nl-gen.c +++ b/net/psp/psp-nl-gen.c @@ -76,7 +76,7 @@ static const struct genl_split_ops psp_nl_ops[] = { .post_doit = psp_device_unlock, .policy = psp_dev_set_nl_policy, .maxattr = PSP_A_DEV_PSP_VERSIONS_ENA, - .flags = GENL_CMD_CAP_DO, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, }, { .cmd = PSP_CMD_KEY_ROTATE, @@ -85,7 +85,7 @@ static const struct genl_split_ops psp_nl_ops[] = { .post_doit = psp_device_unlock, .policy = psp_key_rotate_nl_policy, .maxattr = PSP_A_DEV_ID, - .flags = GENL_CMD_CAP_DO, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, }, { .cmd = PSP_CMD_RX_ASSOC, diff --git a/net/psp/psp_nl.c b/net/psp/psp_nl.c index 6afd7707ec12ea..0cc744a6e1c9bc 100644 --- a/net/psp/psp_nl.c +++ b/net/psp/psp_nl.c @@ -305,8 +305,13 @@ int psp_assoc_device_get_locked(const struct genl_split_ops *ops, psd = psp_dev_get_for_sock(socket->sk); if (psd) { - err = psp_dev_check_access(psd, genl_info_net(info)); - if (err) { + /* Extra care needed here, psp_dev_get_for_sock() only gives + * us access to struct psp_dev's memory, which is quite weak. + */ + mutex_lock(&psd->lock); + if (!psp_dev_is_registered(psd) || + psp_dev_check_access(psd, genl_info_net(info))) { + mutex_unlock(&psd->lock); psp_dev_put(psd); psd = NULL; } @@ -319,7 +324,6 @@ int psp_assoc_device_get_locked(const struct genl_split_ops *ops, id = info->attrs[PSP_A_ASSOC_DEV_ID]; if (psd) { - mutex_lock(&psd->lock); if (id && psd->id != nla_get_u32(id)) { mutex_unlock(&psd->lock); NL_SET_ERR_MSG_ATTR(info->extack, id, diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 02e1fa4577ae60..13c6d1869a1447 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -813,7 +813,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, i++, k = (k + 1) % CAKE_SET_WAYS) { if (q->tags[outer_hash + k] == flow_hash) { if (i) - q->way_hits++; + WRITE_ONCE(q->way_hits, q->way_hits + 1); if (!q->flows[outer_hash + k].set) { /* need to increment host refcnts */ @@ -831,7 +831,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, for (i = 0; i < CAKE_SET_WAYS; i++, k = (k + 1) % CAKE_SET_WAYS) { if (!q->flows[outer_hash + k].set) { - q->way_misses++; + WRITE_ONCE(q->way_misses, q->way_misses + 1); allocate_src = cake_dsrc(flow_mode); allocate_dst = cake_ddst(flow_mode); goto found; @@ -841,7 +841,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, /* With no empty queues, default to the original * queue, accept the collision, update the host tags. */ - q->way_collisions++; + WRITE_ONCE(q->way_collisions, q->way_collisions + 1); allocate_src = cake_dsrc(flow_mode); allocate_dst = cake_ddst(flow_mode); @@ -1379,9 +1379,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off) len -= off; if (qd->max_netlen < len) - qd->max_netlen = len; + WRITE_ONCE(qd->max_netlen, len); if (qd->min_netlen > len) - qd->min_netlen = len; + WRITE_ONCE(qd->min_netlen, len); len += q->rate_overhead; @@ -1401,9 +1401,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off) } if (qd->max_adjlen < len) - qd->max_adjlen = len; + WRITE_ONCE(qd->max_adjlen, len); if (qd->min_adjlen > len) - qd->min_adjlen = len; + WRITE_ONCE(qd->min_adjlen, len); return len; } @@ -1416,7 +1416,7 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) u16 segs = qdisc_pkt_segs(skb); u32 len = qdisc_pkt_len(skb); - q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); + WRITE_ONCE(q->avg_netoff, cake_ewma(q->avg_netoff, off << 16, 8)); if (segs == 1) return cake_calc_overhead(q, len, off); @@ -1590,16 +1590,17 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) } if (cobalt_queue_full(&flow->cvars, &b->cparams, now)) - b->unresponsive_flow_count++; + WRITE_ONCE(b->unresponsive_flow_count, + b->unresponsive_flow_count + 1); len = qdisc_pkt_len(skb); q->buffer_used -= skb->truesize; b->backlogs[idx] -= len; - b->tin_backlog -= len; + WRITE_ONCE(b->tin_backlog, b->tin_backlog - len); sch->qstats.backlog -= len; flow->dropped++; - b->tin_dropped++; + WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1); if (q->config->rate_flags & CAKE_FLAG_INGRESS) cake_advance_shaper(q, b, skb, now, true); @@ -1795,7 +1796,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, } if (unlikely(len > b->max_skblen)) - b->max_skblen = len; + WRITE_ONCE(b->max_skblen, len); if (qdisc_pkt_segs(skb) > 1 && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) { struct sk_buff *segs, *nskb; @@ -1819,15 +1820,15 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, numsegs++; slen += segs->len; q->buffer_used += segs->truesize; - b->packets++; + WRITE_ONCE(b->packets, b->packets + 1); } /* stats */ - b->bytes += slen; b->backlogs[idx] += slen; - b->tin_backlog += slen; sch->qstats.backlog += slen; q->avg_window_bytes += slen; + WRITE_ONCE(b->bytes, b->bytes + slen); + WRITE_ONCE(b->tin_backlog, b->tin_backlog + slen); qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); consume_skb(skb); @@ -1843,10 +1844,10 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, ack = cake_ack_filter(q, flow); if (ack) { - b->ack_drops++; + WRITE_ONCE(b->ack_drops, b->ack_drops + 1); sch->qstats.drops++; ack_pkt_len = qdisc_pkt_len(ack); - b->bytes += ack_pkt_len; + WRITE_ONCE(b->bytes, b->bytes + ack_pkt_len); q->buffer_used += skb->truesize - ack->truesize; if (q->config->rate_flags & CAKE_FLAG_INGRESS) cake_advance_shaper(q, b, ack, now, true); @@ -1859,12 +1860,12 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, } /* stats */ - b->packets++; - b->bytes += len - ack_pkt_len; + WRITE_ONCE(b->packets, b->packets + 1); b->backlogs[idx] += len - ack_pkt_len; - b->tin_backlog += len - ack_pkt_len; sch->qstats.backlog += len - ack_pkt_len; q->avg_window_bytes += len - ack_pkt_len; + WRITE_ONCE(b->bytes, b->bytes + len - ack_pkt_len); + WRITE_ONCE(b->tin_backlog, b->tin_backlog + len - ack_pkt_len); } if (q->overflow_timeout) @@ -1894,9 +1895,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; b = div64_u64(b, window_interval); - q->avg_peak_bandwidth = - cake_ewma(q->avg_peak_bandwidth, b, - b > q->avg_peak_bandwidth ? 2 : 8); + WRITE_ONCE(q->avg_peak_bandwidth, + cake_ewma(q->avg_peak_bandwidth, b, + b > q->avg_peak_bandwidth ? 2 : 8)); q->avg_window_bytes = 0; q->avg_window_begin = now; @@ -1917,11 +1918,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (!flow->set) { list_add_tail(&flow->flowchain, &b->new_flows); } else { - b->decaying_flow_count--; + WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count - 1); list_move_tail(&flow->flowchain, &b->new_flows); } flow->set = CAKE_SET_SPARSE; - b->sparse_flow_count++; + WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count + 1); flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode); } else if (flow->set == CAKE_SET_SPARSE_WAIT) { @@ -1929,15 +1930,15 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, * in the bulk rotation. */ flow->set = CAKE_SET_BULK; - b->sparse_flow_count--; - b->bulk_flow_count++; + WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1); + WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count + 1); cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode); cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); } if (q->buffer_used > q->buffer_max_used) - q->buffer_max_used = q->buffer_used; + WRITE_ONCE(q->buffer_max_used, q->buffer_used); if (q->buffer_used <= q->buffer_limit) return NET_XMIT_SUCCESS; @@ -1977,7 +1978,7 @@ static struct sk_buff *cake_dequeue_one(struct Qdisc *sch) skb = dequeue_head(flow); len = qdisc_pkt_len(skb); b->backlogs[q->cur_flow] -= len; - b->tin_backlog -= len; + WRITE_ONCE(b->tin_backlog, b->tin_backlog - len); sch->qstats.backlog -= len; q->buffer_used -= skb->truesize; sch->q.qlen--; @@ -2042,7 +2043,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) cake_configure_rates(sch, new_rate, true); q->last_checked_active = now; - q->active_queues = num_active_qs; + WRITE_ONCE(q->active_queues, num_active_qs); } begin: @@ -2149,8 +2150,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) */ if (flow->set == CAKE_SET_SPARSE) { if (flow->head) { - b->sparse_flow_count--; - b->bulk_flow_count++; + WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1); + WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count + 1); cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode); cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); @@ -2177,7 +2178,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) if (!skb) { /* this queue was actually empty */ if (cobalt_queue_empty(&flow->cvars, &b->cparams, now)) - b->unresponsive_flow_count--; + WRITE_ONCE(b->unresponsive_flow_count, + b->unresponsive_flow_count - 1); if (flow->cvars.p_drop || flow->cvars.count || ktime_before(now, flow->cvars.drop_next)) { @@ -2187,32 +2189,32 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) list_move_tail(&flow->flowchain, &b->decaying_flows); if (flow->set == CAKE_SET_BULK) { - b->bulk_flow_count--; + WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count - 1); cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode); cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); - b->decaying_flow_count++; + WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count + 1); } else if (flow->set == CAKE_SET_SPARSE || flow->set == CAKE_SET_SPARSE_WAIT) { - b->sparse_flow_count--; - b->decaying_flow_count++; + WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1); + WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count + 1); } flow->set = CAKE_SET_DECAYING; } else { /* remove empty queue from the flowchain */ list_del_init(&flow->flowchain); if (flow->set == CAKE_SET_SPARSE || - flow->set == CAKE_SET_SPARSE_WAIT) - b->sparse_flow_count--; - else if (flow->set == CAKE_SET_BULK) { - b->bulk_flow_count--; + flow->set == CAKE_SET_SPARSE_WAIT) { + WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1); + } else if (flow->set == CAKE_SET_BULK) { + WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count - 1); cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode); cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); - } else - b->decaying_flow_count--; - + } else { + WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count - 1); + } flow->set = CAKE_SET_NONE; } goto begin; @@ -2234,7 +2236,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) b->tin_deficit -= len; } flow->dropped++; - b->tin_dropped++; + WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_qstats_drop(sch); qdisc_dequeue_drop(sch, skb, reason); @@ -2242,17 +2244,19 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) goto retry; } - b->tin_ecn_mark += !!flow->cvars.ecn_marked; + WRITE_ONCE(b->tin_ecn_mark, b->tin_ecn_mark + !!flow->cvars.ecn_marked); qdisc_bstats_update(sch, skb); WRITE_ONCE(q->last_active, now); /* collect delay stats */ delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); - b->avge_delay = cake_ewma(b->avge_delay, delay, 8); - b->peak_delay = cake_ewma(b->peak_delay, delay, - delay > b->peak_delay ? 2 : 8); - b->base_delay = cake_ewma(b->base_delay, delay, - delay < b->base_delay ? 2 : 8); + WRITE_ONCE(b->avge_delay, cake_ewma(b->avge_delay, delay, 8)); + WRITE_ONCE(b->peak_delay, + cake_ewma(b->peak_delay, delay, + delay > b->peak_delay ? 2 : 8)); + WRITE_ONCE(b->base_delay, + cake_ewma(b->base_delay, delay, + delay < b->base_delay ? 2 : 8)); len = cake_advance_shaper(q, b, skb, now, false); flow->deficit -= len; @@ -2329,9 +2333,9 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, u8 rate_shft = 0; u64 rate_ns = 0; - b->flow_quantum = 1514; if (rate) { - b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL); + WRITE_ONCE(b->flow_quantum, + max(min(rate >> 12, 1514ULL), 300ULL)); rate_shft = 34; rate_ns = ((u64)NSEC_PER_SEC) << rate_shft; rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate)); @@ -2339,9 +2343,11 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, rate_ns >>= 1; rate_shft--; } - } /* else unlimited, ie. zero delay */ - - b->tin_rate_bps = rate; + } else { + /* else unlimited, ie. zero delay */ + WRITE_ONCE(b->flow_quantum, 1514); + } + WRITE_ONCE(b->tin_rate_bps, rate); b->tin_rate_ns = rate_ns; b->tin_rate_shft = rate_shft; @@ -2350,10 +2356,11 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, byte_target_ns = (byte_target * rate_ns) >> rate_shft; - b->cparams.target = max((byte_target_ns * 3) / 2, target_ns); - b->cparams.interval = max(rtt_est_ns + - b->cparams.target - target_ns, - b->cparams.target * 2); + WRITE_ONCE(b->cparams.target, + max((byte_target_ns * 3) / 2, target_ns)); + WRITE_ONCE(b->cparams.interval, + max(rtt_est_ns + b->cparams.target - target_ns, + b->cparams.target * 2)); b->cparams.mtu_time = byte_target_ns; b->cparams.p_inc = 1 << 24; /* 1/256 */ b->cparams.p_dec = 1 << 20; /* 1/4096 */ @@ -2611,25 +2618,27 @@ static void cake_reconfigure(struct Qdisc *sch) { struct cake_sched_data *qd = qdisc_priv(sch); struct cake_sched_config *q = qd->config; + u32 buffer_limit; cake_configure_rates(sch, qd->config->rate_bps, false); if (q->buffer_config_limit) { - qd->buffer_limit = q->buffer_config_limit; + buffer_limit = q->buffer_config_limit; } else if (q->rate_bps) { u64 t = q->rate_bps * q->interval; do_div(t, USEC_PER_SEC / 4); - qd->buffer_limit = max_t(u32, t, 4U << 20); + buffer_limit = max_t(u32, t, 4U << 20); } else { - qd->buffer_limit = ~0; + buffer_limit = ~0; } sch->flags &= ~TCQ_F_CAN_BYPASS; - qd->buffer_limit = min(qd->buffer_limit, - max(sch->limit * psched_mtu(qdisc_dev(sch)), - q->buffer_config_limit)); + WRITE_ONCE(qd->buffer_limit, + min(buffer_limit, + max(sch->limit * psched_mtu(qdisc_dev(sch)), + q->buffer_config_limit))); } static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt, @@ -2774,10 +2783,10 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, return ret; if (overhead_changed) { - qd->max_netlen = 0; - qd->max_adjlen = 0; - qd->min_netlen = ~0; - qd->min_adjlen = ~0; + WRITE_ONCE(qd->max_netlen, 0); + WRITE_ONCE(qd->max_adjlen, 0); + WRITE_ONCE(qd->min_netlen, ~0); + WRITE_ONCE(qd->min_adjlen, ~0); } if (qd->tins) { @@ -2995,15 +3004,15 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) goto nla_put_failure; \ } while (0) - PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); - PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); - PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); - PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); - PUT_STAT_U32(MAX_NETLEN, q->max_netlen); - PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); - PUT_STAT_U32(MIN_NETLEN, q->min_netlen); - PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); - PUT_STAT_U32(ACTIVE_QUEUES, q->active_queues); + PUT_STAT_U64(CAPACITY_ESTIMATE64, READ_ONCE(q->avg_peak_bandwidth)); + PUT_STAT_U32(MEMORY_LIMIT, READ_ONCE(q->buffer_limit)); + PUT_STAT_U32(MEMORY_USED, READ_ONCE(q->buffer_max_used)); + PUT_STAT_U32(AVG_NETOFF, ((READ_ONCE(q->avg_netoff) + 0x8000) >> 16)); + PUT_STAT_U32(MAX_NETLEN, READ_ONCE(q->max_netlen)); + PUT_STAT_U32(MAX_ADJLEN, READ_ONCE(q->max_adjlen)); + PUT_STAT_U32(MIN_NETLEN, READ_ONCE(q->min_netlen)); + PUT_STAT_U32(MIN_ADJLEN, READ_ONCE(q->min_adjlen)); + PUT_STAT_U32(ACTIVE_QUEUES, READ_ONCE(q->active_queues)); #undef PUT_STAT_U32 #undef PUT_STAT_U64 @@ -3029,38 +3038,38 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) if (!ts) goto nla_put_failure; - PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps); - PUT_TSTAT_U64(SENT_BYTES64, b->bytes); - PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog); + PUT_TSTAT_U64(THRESHOLD_RATE64, READ_ONCE(b->tin_rate_bps)); + PUT_TSTAT_U64(SENT_BYTES64, READ_ONCE(b->bytes)); + PUT_TSTAT_U32(BACKLOG_BYTES, READ_ONCE(b->tin_backlog)); PUT_TSTAT_U32(TARGET_US, - ktime_to_us(ns_to_ktime(b->cparams.target))); + ktime_to_us(ns_to_ktime(READ_ONCE(b->cparams.target)))); PUT_TSTAT_U32(INTERVAL_US, - ktime_to_us(ns_to_ktime(b->cparams.interval))); + ktime_to_us(ns_to_ktime(READ_ONCE(b->cparams.interval)))); - PUT_TSTAT_U32(SENT_PACKETS, b->packets); - PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped); - PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark); - PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops); + PUT_TSTAT_U32(SENT_PACKETS, READ_ONCE(b->packets)); + PUT_TSTAT_U32(DROPPED_PACKETS, READ_ONCE(b->tin_dropped)); + PUT_TSTAT_U32(ECN_MARKED_PACKETS, READ_ONCE(b->tin_ecn_mark)); + PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, READ_ONCE(b->ack_drops)); PUT_TSTAT_U32(PEAK_DELAY_US, - ktime_to_us(ns_to_ktime(b->peak_delay))); + ktime_to_us(ns_to_ktime(READ_ONCE(b->peak_delay)))); PUT_TSTAT_U32(AVG_DELAY_US, - ktime_to_us(ns_to_ktime(b->avge_delay))); + ktime_to_us(ns_to_ktime(READ_ONCE(b->avge_delay)))); PUT_TSTAT_U32(BASE_DELAY_US, - ktime_to_us(ns_to_ktime(b->base_delay))); + ktime_to_us(ns_to_ktime(READ_ONCE(b->base_delay)))); - PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits); - PUT_TSTAT_U32(WAY_MISSES, b->way_misses); - PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions); + PUT_TSTAT_U32(WAY_INDIRECT_HITS, READ_ONCE(b->way_hits)); + PUT_TSTAT_U32(WAY_MISSES, READ_ONCE(b->way_misses)); + PUT_TSTAT_U32(WAY_COLLISIONS, READ_ONCE(b->way_collisions)); - PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count + - b->decaying_flow_count); - PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count); - PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count); - PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen); + PUT_TSTAT_U32(SPARSE_FLOWS, READ_ONCE(b->sparse_flow_count) + + READ_ONCE(b->decaying_flow_count)); + PUT_TSTAT_U32(BULK_FLOWS, READ_ONCE(b->bulk_flow_count)); + PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, READ_ONCE(b->unresponsive_flow_count)); + PUT_TSTAT_U32(MAX_SKBLEN, READ_ONCE(b->max_skblen)); - PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum); + PUT_TSTAT_U32(FLOW_QUANTUM, READ_ONCE(b->flow_quantum)); nla_nest_end(d->skb, ts); } @@ -3298,10 +3307,10 @@ static int cake_mq_change(struct Qdisc *sch, struct nlattr *opt, struct cake_sched_data *qd = qdisc_priv(chld); if (overhead_changed) { - qd->max_netlen = 0; - qd->max_adjlen = 0; - qd->min_netlen = ~0; - qd->min_adjlen = ~0; + WRITE_ONCE(qd->max_netlen, 0); + WRITE_ONCE(qd->max_adjlen, 0); + WRITE_ONCE(qd->min_netlen, ~0); + WRITE_ONCE(qd->min_adjlen, ~0); } if (qd->tins) { diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 94df8e741a9791..2875bcdb18a413 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -229,7 +229,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Draw a packet at random from queue and compare flow */ if (choke_match_random(q, skb, &idx)) { - q->stats.matched++; + WRITE_ONCE(q->stats.matched, q->stats.matched + 1); choke_drop_by_idx(sch, idx, to_free); goto congestion_drop; } @@ -241,11 +241,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_overlimit(sch); if (use_harddrop(q) || !use_ecn(q) || !INET_ECN_set_ce(skb)) { - q->stats.forced_drop++; + WRITE_ONCE(q->stats.forced_drop, + q->stats.forced_drop + 1); goto congestion_drop; } - q->stats.forced_mark++; + WRITE_ONCE(q->stats.forced_mark, + q->stats.forced_mark + 1); } else if (++q->vars.qcount) { if (red_mark_probability(p, &q->vars, q->vars.qavg)) { q->vars.qcount = 0; @@ -253,11 +255,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_overlimit(sch); if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { - q->stats.prob_drop++; + WRITE_ONCE(q->stats.prob_drop, + q->stats.prob_drop + 1); goto congestion_drop; } - q->stats.prob_mark++; + WRITE_ONCE(q->stats.prob_mark, + q->stats.prob_mark + 1); } } else q->vars.qR = red_random(p); @@ -272,7 +276,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; } - q->stats.pdrop++; + WRITE_ONCE(q->stats.pdrop, q->stats.pdrop + 1); return qdisc_drop(skb, sch, to_free); congestion_drop: @@ -461,10 +465,12 @@ static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { - .early = q->stats.prob_drop + q->stats.forced_drop, - .marked = q->stats.prob_mark + q->stats.forced_mark, - .pdrop = q->stats.pdrop, - .matched = q->stats.matched, + .early = READ_ONCE(q->stats.prob_drop) + + READ_ONCE(q->stats.forced_drop), + .marked = READ_ONCE(q->stats.prob_mark) + + READ_ONCE(q->stats.forced_mark), + .pdrop = READ_ONCE(q->stats.pdrop), + .matched = READ_ONCE(q->stats.matched), }; return gnet_stats_copy_app(d, &st, sizeof(st)); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 154c70f489f289..7becbf5362b316 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -509,18 +509,19 @@ static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb) static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_pie_sched_data *q = qdisc_priv(sch); - struct tc_fq_pie_xstats st = { - .packets_in = q->stats.packets_in, - .overlimit = q->stats.overlimit, - .overmemory = q->overmemory, - .dropped = q->stats.dropped, - .ecn_mark = q->stats.ecn_mark, - .new_flow_count = q->new_flow_count, - .memory_usage = q->memory_usage, - }; + struct tc_fq_pie_xstats st = { 0 }; struct list_head *pos; sch_tree_lock(sch); + + st.packets_in = q->stats.packets_in; + st.overlimit = q->stats.overlimit; + st.overmemory = q->overmemory; + st.dropped = q->stats.dropped; + st.ecn_mark = q->stats.ecn_mark; + st.new_flow_count = q->new_flow_count; + st.memory_usage = q->memory_usage; + list_for_each(pos, &q->new_flows) st.new_flows_len++; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 20df1c08b1e9d0..bc18e1976b6e07 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -227,10 +227,10 @@ static bool loss_4state(struct netem_sched_data *q) if (rnd < clg->a4) { clg->state = LOST_IN_GAP_PERIOD; return true; - } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { + } else if (rnd < clg->a1 + clg->a4) { clg->state = LOST_IN_BURST_PERIOD; return true; - } else if (clg->a1 + clg->a4 < rnd) { + } else { clg->state = TX_IN_GAP_PERIOD; } @@ -247,9 +247,9 @@ static bool loss_4state(struct netem_sched_data *q) case LOST_IN_BURST_PERIOD: if (rnd < clg->a3) clg->state = TX_IN_BURST_PERIOD; - else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { + else if (rnd < clg->a2 + clg->a3) { clg->state = TX_IN_GAP_PERIOD; - } else if (clg->a2 + clg->a3 < rnd) { + } else { clg->state = LOST_IN_BURST_PERIOD; return true; } @@ -524,7 +524,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1 << get_random_u32_below(8); } - if (unlikely(q->t_len >= sch->limit)) { + if (unlikely(sch->q.qlen >= sch->limit)) { /* re-link segs, so that qdisc_drop_all() frees them all */ skb->next = segs; qdisc_drop_all(skb, sch, to_free); @@ -659,9 +659,8 @@ static void get_slot_next(struct netem_sched_data *q, u64 now) if (!q->slot_dist) next_delay = q->slot_config.min_delay + - (get_random_u32() * - (q->slot_config.max_delay - - q->slot_config.min_delay) >> 32); + mul_u64_u32_shr(q->slot_config.max_delay - q->slot_config.min_delay, + get_random_u32(), 32); else next_delay = tabledist(q->slot_config.dist_delay, (s32)(q->slot_config.dist_jitter), @@ -827,6 +826,39 @@ static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) return 0; } +static int validate_time(const struct nlattr *attr, const char *name, + struct netlink_ext_ack *extack) +{ + if (nla_get_s64(attr) < 0) { + NL_SET_ERR_MSG_ATTR_FMT(extack, attr, "negative %s", name); + return -EINVAL; + } + return 0; +} + +static int validate_slot(const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + const struct tc_netem_slot *c = nla_data(attr); + + if (c->min_delay < 0 || c->max_delay < 0) { + NL_SET_ERR_MSG_ATTR(extack, attr, "negative slot delay"); + return -EINVAL; + } + if (c->min_delay > c->max_delay) { + NL_SET_ERR_MSG_ATTR(extack, attr, "slot min delay greater than max delay"); + return -EINVAL; + } + if (c->dist_delay < 0 || c->dist_jitter < 0) { + NL_SET_ERR_MSG_ATTR(extack, attr, "negative dist delay"); + return -EINVAL; + } + if (c->max_packets < 0 || c->max_bytes < 0) { + NL_SET_ERR_MSG_ATTR(extack, attr, "negative slot limit"); + return -EINVAL; + } + return 0; +} + static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) { const struct tc_netem_slot *c = nla_data(attr); @@ -1040,6 +1072,24 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, goto table_free; } + if (tb[TCA_NETEM_SLOT]) { + ret = validate_slot(tb[TCA_NETEM_SLOT], extack); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_LATENCY64]) { + ret = validate_time(tb[TCA_NETEM_LATENCY64], "latency", extack); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_JITTER64]) { + ret = validate_time(tb[TCA_NETEM_JITTER64], "jitter", extack); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -1112,11 +1162,10 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, /* capping jitter to the range acceptable by tabledist() */ q->jitter = min_t(s64, abs(q->jitter), INT_MAX); - if (tb[TCA_NETEM_PRNG_SEED]) + if (tb[TCA_NETEM_PRNG_SEED]) { q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]); - else - q->prng.seed = get_random_u64(); - prandom_seed_state(&q->prng.prng_state, q->prng.seed); + prandom_seed_state(&q->prng.prng_state, q->prng.seed); + } unlock: sch_tree_unlock(sch); @@ -1139,6 +1188,9 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt, return -EINVAL; q->loss_model = CLG_RANDOM; + q->prng.seed = get_random_u64(); + prandom_seed_state(&q->prng.prng_state, q->prng.seed); + ret = netem_change(sch, opt, extack); if (ret) pr_info("netem: change failed\n"); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index a47a09d764001d..45245157e00a69 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -634,7 +634,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, queue = skb_get_queue_mapping(skb); child = q->qdiscs[queue]; - if (unlikely(!child)) + if (unlikely(child == &noop_qdisc)) return qdisc_drop(skb, sch, to_free); if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { @@ -717,7 +717,7 @@ static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, int len; u8 tc; - if (unlikely(!child)) + if (unlikely(child == &noop_qdisc)) return NULL; if (TXTIME_ASSIST_IS_ENABLED(q->flags)) @@ -2184,6 +2184,9 @@ static int taprio_graft(struct Qdisc *sch, unsigned long cl, if (!dev_queue) return -EINVAL; + if (!new) + new = &noop_qdisc; + if (dev->flags & IFF_UP) dev_deactivate(dev, false); @@ -2197,14 +2200,14 @@ static int taprio_graft(struct Qdisc *sch, unsigned long cl, *old = q->qdiscs[cl - 1]; if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old); - if (new) + if (new != &noop_qdisc) qdisc_refcount_inc(new); - if (*old) + if (*old && *old != &noop_qdisc) qdisc_put(*old); } q->qdiscs[cl - 1] = new; - if (new) + if (new != &noop_qdisc) new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; if (dev->flags & IFF_UP) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7b823d75914191..8e89a870780c49 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1556,6 +1556,12 @@ static enum sctp_disposition sctp_sf_do_unexpected_init( /* Tag the variable length parameters. */ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr)); + if (asoc->state >= SCTP_STATE_ESTABLISHED) { + /* Discard INIT matching peer vtag after handshake completion (stale INIT). */ + if (ntohl(chunk->subh.init_hdr->init_tag) == asoc->peer.i.init_tag) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + /* Verify the INIT chunk before processing it. */ err_chunk = NULL; if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, diff --git a/net/tls/tls.h b/net/tls/tls.h index e8f81a00652002..12f44cb649c964 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -188,6 +188,7 @@ int tls_strp_dev_init(void); void tls_strp_dev_exit(void); void tls_strp_done(struct tls_strparser *strp); +void __tls_strp_done(struct tls_strparser *strp); void tls_strp_stop(struct tls_strparser *strp); int tls_strp_init(struct tls_strparser *strp, struct sock *sk); void tls_strp_data_ready(struct tls_strparser *strp); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 98e12f0ff57e51..c72e8831762730 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -624,6 +624,12 @@ void tls_strp_done(struct tls_strparser *strp) WARN_ON(!strp->stopped); cancel_work_sync(&strp->work); + __tls_strp_done(strp); +} + +/* For setup error paths where the strparser was initialized but never armed. */ +void __tls_strp_done(struct tls_strparser *strp) +{ tls_strp_anchor_free(strp); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 94d2ae0daa8cd3..798243eabb1f87 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2624,8 +2624,12 @@ void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) void tls_sw_free_resources_rx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx; + + ctx = tls_sw_ctx_rx(tls_ctx); tls_sw_release_resources_rx(sk); + __tls_strp_done(&ctx->strp); tls_sw_free_ctx_rx(tls_ctx); } diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index f862988c1e865e..7a8963595bf982 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -375,10 +375,10 @@ static void hvs_open_connection(struct vmbus_channel *chan) } else { sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE); sndbuf = min_t(int, sndbuf, RINGBUFFER_HVS_MAX_SIZE); - sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE); + sndbuf = VMBUS_RING_SIZE(sndbuf); rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE); rcvbuf = min_t(int, rcvbuf, RINGBUFFER_HVS_MAX_SIZE); - rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE); + rcvbuf = VMBUS_RING_SIZE(rcvbuf); } chan->max_pkt_size = HVS_MAX_PKT_SIZE; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 97801966bf32c2..0f704380a8c811 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2966,7 +2966,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, { const struct cred_security_struct *crsec = selinux_cred(current_cred()); struct superblock_security_struct *sbsec; - struct xattr *xattr = lsm_get_xattr_slot(xattrs, xattr_count); + struct xattr *xattr; u32 newsid, clen; u16 newsclass; int rc; @@ -2992,6 +2992,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, !(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; + xattr = lsm_get_xattr_slot(xattrs, xattr_count); if (xattr) { rc = security_sid_to_context_force(newsid, &context, &clen); @@ -3208,15 +3209,13 @@ static inline int task_avdcache_search(struct task_security_struct *tsec, * @tsec: the task's security state * @isec: the inode associated with the cache entry * @avd: the AVD to cache - * @audited: the permission audit bitmask to cache * - * Update the AVD cache in @tsec with the @avdc and @audited info associated + * Update the AVD cache in @tsec with the @avd info associated * with @isec. */ static inline void task_avdcache_update(struct task_security_struct *tsec, struct inode_security_struct *isec, - struct av_decision *avd, - u32 audited) + struct av_decision *avd) { int spot; @@ -3228,9 +3227,7 @@ static inline void task_avdcache_update(struct task_security_struct *tsec, spot = (tsec->avdcache.dir_spot + 1) & (TSEC_AVDC_DIR_SIZE - 1); tsec->avdcache.dir_spot = spot; tsec->avdcache.dir[spot].isid = isec->sid; - tsec->avdcache.dir[spot].audited = audited; - tsec->avdcache.dir[spot].allowed = avd->allowed; - tsec->avdcache.dir[spot].permissive = avd->flags & AVD_FLAGS_PERMISSIVE; + tsec->avdcache.dir[spot].avd = *avd; tsec->avdcache.permissive_neveraudit = (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT)); } @@ -3251,6 +3248,7 @@ static int selinux_inode_permission(struct inode *inode, int requested) struct task_security_struct *tsec; struct inode_security_struct *isec; struct avdc_entry *avdc; + struct av_decision avd, *avdp = &avd; int rc, rc2; u32 audited, denied; @@ -3272,23 +3270,21 @@ static int selinux_inode_permission(struct inode *inode, int requested) rc = task_avdcache_search(tsec, isec, &avdc); if (likely(!rc)) { /* Cache hit. */ - audited = perms & avdc->audited; - denied = perms & ~avdc->allowed; - if (unlikely(denied && enforcing_enabled() && - !avdc->permissive)) + avdp = &avdc->avd; + denied = perms & ~avdp->allowed; + if (unlikely(denied) && enforcing_enabled() && + !(avdp->flags & AVD_FLAGS_PERMISSIVE)) rc = -EACCES; } else { - struct av_decision avd; - /* Cache miss. */ rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, - perms, 0, &avd); - audited = avc_audit_required(perms, &avd, rc, - (requested & MAY_ACCESS) ? FILE__AUDIT_ACCESS : 0, - &denied); - task_avdcache_update(tsec, isec, &avd, audited); + perms, 0, avdp); + task_avdcache_update(tsec, isec, avdp); } + audited = avc_audit_required(perms, avdp, rc, + (requested & MAY_ACCESS) ? + FILE__AUDIT_ACCESS : 0, &denied); if (likely(!audited)) return rc; @@ -4920,7 +4916,7 @@ static bool sock_skip_has_perm(u32 sid) static int sock_has_perm(struct sock *sk, u32 perms) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct common_audit_data ad; struct lsm_network_audit net; @@ -6227,7 +6223,7 @@ static unsigned int selinux_ip_postroute(void *priv, static int nlmsg_sock_has_extended_perms(struct sock *sk, u32 perms, u16 nlmsg_type) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct common_audit_data ad; u8 driver; u8 xperm; diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index b19e5d978e8201..3c0a16ec978b01 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -32,9 +32,7 @@ struct avdc_entry { u32 isid; /* inode SID */ - u32 allowed; /* allowed permission bitmask */ - u32 audited; /* audited permission bitmask */ - bool permissive; /* AVC permissive flag */ + struct av_decision avd; /* av decision */ }; struct cred_security_struct { diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c index 15487837e89466..a4d230a19c563f 100644 --- a/sound/ac97/bus.c +++ b/sound/ac97/bus.c @@ -206,24 +206,6 @@ void snd_ac97_codec_driver_unregister(struct ac97_codec_driver *drv) } EXPORT_SYMBOL_GPL(snd_ac97_codec_driver_unregister); -/** - * snd_ac97_codec_get_platdata - get platform_data - * @adev: the ac97 codec device - * - * For legacy platforms, in order to have platform_data in codec drivers - * available, while ac97 device are auto-created upon probe, this retrieves the - * platdata which was setup on ac97 controller registration. - * - * Returns the platform data pointer - */ -void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev) -{ - struct ac97_controller *ac97_ctrl = adev->ac97_ctrl; - - return ac97_ctrl->codecs_pdata[adev->num]; -} -EXPORT_SYMBOL_GPL(snd_ac97_codec_get_platdata); - static void ac97_ctrl_codecs_unregister(struct ac97_controller *ac97_ctrl) { int i; @@ -337,7 +319,6 @@ static int ac97_add_adapter(struct ac97_controller *ac97_ctrl) * @dev: the device providing the ac97 DC function * @slots_available: mask of the ac97 codecs that can be scanned and probed * bit0 => codec 0, bit1 => codec 1 ... bit 3 => codec 3 - * @codecs_pdata: codec platform data * * Register a digital controller which can control up to 4 ac97 codecs. This is * the controller side of the AC97 AC-link, while the slave side are the codecs. @@ -346,18 +327,15 @@ static int ac97_add_adapter(struct ac97_controller *ac97_ctrl) */ struct ac97_controller *snd_ac97_controller_register( const struct ac97_controller_ops *ops, struct device *dev, - unsigned short slots_available, void **codecs_pdata) + unsigned short slots_available) { struct ac97_controller *ac97_ctrl; - int ret, i; + int ret; ac97_ctrl = kzalloc_obj(*ac97_ctrl); if (!ac97_ctrl) return ERR_PTR(-ENOMEM); - for (i = 0; i < AC97_BUS_MAX_CODECS && codecs_pdata; i++) - ac97_ctrl->codecs_pdata[i] = codecs_pdata[i]; - ac97_ctrl->ops = ops; ac97_ctrl->slots_available = slots_available; ac97_ctrl->parent = dev; diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c index 8a44297964f500..ad7fb6b0c2c00e 100644 --- a/sound/ac97_bus.c +++ b/sound/ac97_bus.c @@ -73,7 +73,6 @@ int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id, if (snd_ac97_check_id(ac97, id, id_mask)) return 0; - return -ENODEV; } EXPORT_SYMBOL_GPL(snd_ac97_reset); @@ -81,6 +80,7 @@ EXPORT_SYMBOL_GPL(snd_ac97_reset); const struct bus_type ac97_bus_type = { .name = "ac97", }; +EXPORT_SYMBOL(ac97_bus_type); static int __init ac97_bus_init(void) { @@ -96,7 +96,5 @@ static void __exit ac97_bus_exit(void) module_exit(ac97_bus_exit); -EXPORT_SYMBOL(ac97_bus_type); - MODULE_DESCRIPTION("Legacy AC97 bus interface"); MODULE_LICENSE("GPL"); diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig index e4d7288d1e1e3e..09054ce8074f00 100644 --- a/sound/arm/Kconfig +++ b/sound/arm/Kconfig @@ -19,10 +19,3 @@ config SND_ARMAACI select SND_AC97_CODEC endif # SND_ARM - -config SND_PXA2XX_LIB - tristate - select SND_DMAENGINE_PCM - -config SND_PXA2XX_LIB_AC97 - bool diff --git a/sound/arm/Makefile b/sound/arm/Makefile index 99325a66cf771a..6b91eb796b9b2a 100644 --- a/sound/arm/Makefile +++ b/sound/arm/Makefile @@ -5,7 +5,3 @@ obj-$(CONFIG_SND_ARMAACI) += snd-aaci.o snd-aaci-y := aaci.o - -obj-$(CONFIG_SND_PXA2XX_LIB) += snd-pxa2xx-lib.o -snd-pxa2xx-lib-y := pxa2xx-pcm-lib.o -snd-pxa2xx-lib-$(CONFIG_SND_PXA2XX_LIB_AC97) += pxa2xx-ac97-lib.o diff --git a/sound/core/jack.c b/sound/core/jack.c index 5e8a2f3f41966a..96e0733ede7783 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -250,18 +250,15 @@ static const char * const jack_events_name[] = { /* the recommended buffer size is 256 */ static int parse_mask_bits(unsigned int mask_bits, char *buf, size_t buf_size) { + int len = scnprintf(buf, buf_size, "0x%04x", mask_bits); int i; - scnprintf(buf, buf_size, "0x%04x", mask_bits); - for (i = 0; i < ARRAY_SIZE(jack_events_name); i++) - if (mask_bits & (1 << i)) { - strlcat(buf, " ", buf_size); - strlcat(buf, jack_events_name[i], buf_size); - } - strlcat(buf, "\n", buf_size); + if (mask_bits & (1 << i)) + len += scnprintf(buf + len, buf_size - len, " %s", jack_events_name[i]); + len += scnprintf(buf + len, buf_size - len, "\n"); - return strlen(buf); + return len; } static ssize_t jack_kctl_mask_bits_read(struct file *file, diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index a140a0d9abb808..33fd34f0d615d9 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -2155,10 +2155,16 @@ static int snd_pcm_oss_get_trigger(struct snd_pcm_oss_file *pcm_oss_file) psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; - if (psubstream && psubstream->runtime && psubstream->runtime->oss.trigger) - result |= PCM_ENABLE_OUTPUT; - if (csubstream && csubstream->runtime && csubstream->runtime->oss.trigger) - result |= PCM_ENABLE_INPUT; + if (psubstream && psubstream->runtime) { + guard(mutex)(&psubstream->runtime->oss.params_lock); + if (psubstream->runtime->oss.trigger) + result |= PCM_ENABLE_OUTPUT; + } + if (csubstream && csubstream->runtime) { + guard(mutex)(&csubstream->runtime->oss.params_lock); + if (csubstream->runtime->oss.trigger) + result |= PCM_ENABLE_INPUT; + } return result; } @@ -2832,6 +2838,17 @@ static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream) runtime->oss.period_frames; } +static bool need_input_retrigger(struct snd_pcm_runtime *runtime) +{ + bool ret; + + guard(mutex)(&runtime->oss.params_lock); + ret = runtime->oss.trigger; + if (ret) + runtime->oss.trigger = 0; + return ret; +} + static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait) { struct snd_pcm_oss_file *pcm_oss_file; @@ -2864,11 +2881,11 @@ static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait) snd_pcm_oss_capture_ready(csubstream)) mask |= EPOLLIN | EPOLLRDNORM; } - if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) { + if (ostate != SNDRV_PCM_STATE_RUNNING && + need_input_retrigger(runtime)) { struct snd_pcm_oss_file ofile; memset(&ofile, 0, sizeof(ofile)); ofile.streams[SNDRV_PCM_STREAM_CAPTURE] = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; - runtime->oss.trigger = 0; snd_pcm_oss_set_trigger(&ofile, PCM_ENABLE_INPUT); } } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index a541bb235cfa18..50da38b141cb75 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1973,13 +1973,15 @@ static int snd_pcm_reset(struct snd_pcm_substream *substream) static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, snd_pcm_state_t state) { - struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_state_t cur_state = snd_pcm_get_state(substream); int f_flags = (__force int)state; - if (runtime->state == SNDRV_PCM_STATE_OPEN || - runtime->state == SNDRV_PCM_STATE_DISCONNECTED) + if (cur_state == SNDRV_PCM_STATE_OPEN || + cur_state == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD; - if (snd_pcm_running(substream)) + if (cur_state == SNDRV_PCM_STATE_RUNNING || + (cur_state == SNDRV_PCM_STATE_DRAINING && + substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) return -EBUSY; substream->f_flags = f_flags; return 0; @@ -2139,7 +2141,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, card = substream->pcm->card; runtime = substream->runtime; - if (runtime->state == SNDRV_PCM_STATE_OPEN) + if (snd_pcm_get_state(substream) == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (file) { @@ -3524,7 +3526,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, snd_pcm_uframes_t *frames = arg; snd_pcm_sframes_t result; - if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED) + if (snd_pcm_get_state(substream) == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD; switch (cmd) { diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c index c880d47711698a..6b474615ced893 100644 --- a/sound/core/seq/oss/seq_oss_readq.c +++ b/sound/core/seq/oss/seq_oss_readq.c @@ -34,16 +34,10 @@ snd_seq_oss_readq_new(struct seq_oss_devinfo *dp, int maxlen) { struct seq_oss_readq *q; - q = kzalloc_obj(*q); + q = kzalloc_flex(*q, q, maxlen); if (!q) return NULL; - q->q = kzalloc_objs(union evrec, maxlen); - if (!q->q) { - kfree(q); - return NULL; - } - q->maxlen = maxlen; q->qlen = 0; q->head = q->tail = 0; @@ -61,10 +55,7 @@ snd_seq_oss_readq_new(struct seq_oss_devinfo *dp, int maxlen) void snd_seq_oss_readq_delete(struct seq_oss_readq *q) { - if (q) { - kfree(q->q); - kfree(q); - } + kfree(q); } /* diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h index 38d0c4682b2989..d8e1e7504d8fbf 100644 --- a/sound/core/seq/oss/seq_oss_readq.h +++ b/sound/core/seq/oss/seq_oss_readq.h @@ -10,13 +10,13 @@ #define __SEQ_OSS_READQ_H #include "seq_oss_device.h" +#include "seq_oss_event.h" /* * definition of read queue */ struct seq_oss_readq { - union evrec *q; int qlen; int maxlen; int head, tail; @@ -24,6 +24,7 @@ struct seq_oss_readq { unsigned long input_time; wait_queue_head_t midi_sleep; spinlock_t lock; + union evrec q[] __counted_by(maxlen); }; struct seq_oss_readq *snd_seq_oss_readq_new(struct seq_oss_devinfo *dp, int maxlen); diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index aa0d2fcb1a180c..a37a1695f51c7d 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -99,6 +99,9 @@ struct loopback_ops { struct loopback_cable { spinlock_t lock; struct loopback_pcm *streams[2]; + /* in-flight peer stops running outside cable->lock */ + atomic_t stop_count; + wait_queue_head_t stop_wait; struct snd_pcm_hardware hw; /* flags */ unsigned int valid; @@ -366,8 +369,11 @@ static int loopback_check_format(struct loopback_cable *cable, int stream) return 0; if (stream == SNDRV_PCM_STREAM_CAPTURE) return -EIO; - else if (cruntime->state == SNDRV_PCM_STATE_RUNNING) + else if (cruntime->state == SNDRV_PCM_STATE_RUNNING) { + /* close must not free the peer runtime below */ + atomic_inc(&cable->stop_count); stop_capture = true; + } } setup = get_setup(dpcm_play); @@ -396,8 +402,11 @@ static int loopback_check_format(struct loopback_cable *cable, int stream) } } - if (stop_capture) + if (stop_capture) { snd_pcm_stop(dpcm_capt->substream, SNDRV_PCM_STATE_DRAINING); + if (atomic_dec_and_test(&cable->stop_count)) + wake_up(&cable->stop_wait); + } return 0; } @@ -1049,23 +1058,29 @@ static void free_cable(struct snd_pcm_substream *substream) struct loopback *loopback = substream->private_data; int dev = get_cable_index(substream); struct loopback_cable *cable; + struct loopback_pcm *dpcm; + bool other_alive; cable = loopback->cables[substream->number][dev]; if (!cable) return; - if (cable->streams[!substream->stream]) { - /* other stream is still alive */ - guard(spinlock_irq)(&cable->lock); - cable->streams[substream->stream] = NULL; - } else { - struct loopback_pcm *dpcm = substream->runtime->private_data; - if (cable->ops && cable->ops->close_cable && dpcm) - cable->ops->close_cable(dpcm); - /* free the cable */ - loopback->cables[substream->number][dev] = NULL; - kfree(cable); + scoped_guard(spinlock_irq, &cable->lock) { + cable->streams[substream->stream] = NULL; + other_alive = cable->streams[!substream->stream]; } + + /* Pair with the stop_count increment in loopback_check_format(). */ + wait_event(cable->stop_wait, !atomic_read(&cable->stop_count)); + if (other_alive) + return; + + dpcm = substream->runtime->private_data; + if (cable->ops && cable->ops->close_cable && dpcm) + cable->ops->close_cable(dpcm); + /* free the cable */ + loopback->cables[substream->number][dev] = NULL; + kfree(cable); } static int loopback_jiffies_timer_open(struct loopback_pcm *dpcm) @@ -1260,6 +1275,8 @@ static int loopback_open(struct snd_pcm_substream *substream) goto unlock; } spin_lock_init(&cable->lock); + atomic_set(&cable->stop_count, 0); + init_waitqueue_head(&cable->stop_wait); cable->hw = loopback_pcm_hardware; if (loopback->timer_source) cable->ops = &loopback_snd_timer_ops; diff --git a/sound/hda/codecs/Makefile b/sound/hda/codecs/Makefile index e7f03e281999ff..88d2f8a79467b1 100644 --- a/sound/hda/codecs/Makefile +++ b/sound/hda/codecs/Makefile @@ -7,7 +7,6 @@ snd-hda-codec-cm9825-y := cm9825.o snd-hda-codec-analog-y := analog.o snd-hda-codec-ca0110-y := ca0110.o snd-hda-codec-ca0132-y := ca0132.o -snd-hda-codec-cmedia-y := cmedia.o snd-hda-codec-conexant-y := conexant.o snd-hda-codec-idt-y := sigmatel.o snd-hda-codec-senarytech-y := senarytech.o @@ -26,7 +25,6 @@ obj-$(CONFIG_SND_HDA_CODEC_CM9825) += snd-hda-codec-cm9825.o obj-$(CONFIG_SND_HDA_CODEC_ANALOG) += snd-hda-codec-analog.o obj-$(CONFIG_SND_HDA_CODEC_CA0110) += snd-hda-codec-ca0110.o obj-$(CONFIG_SND_HDA_CODEC_CA0132) += snd-hda-codec-ca0132.o -obj-$(CONFIG_SND_HDA_CODEC_CMEDIA) += snd-hda-codec-cmedia.o obj-$(CONFIG_SND_HDA_CODEC_CONEXANT) += snd-hda-codec-conexant.o obj-$(CONFIG_SND_HDA_CODEC_SIGMATEL) += snd-hda-codec-idt.o obj-$(CONFIG_SND_HDA_CODEC_SENARYTECH) += snd-hda-codec-senarytech.o diff --git a/sound/hda/codecs/conexant.c b/sound/hda/codecs/conexant.c index 3a9717df39b4be..e3b6aaabe3a9cc 100644 --- a/sound/hda/codecs/conexant.c +++ b/sound/hda/codecs/conexant.c @@ -1175,6 +1175,7 @@ static void add_cx5051_fake_mutes(struct hda_codec *codec) static int cx_probe(struct hda_codec *codec, const struct hda_device_id *id) { struct conexant_spec *spec; + struct hda_jack_callback *callback; int err; codec_info(codec, "%s: BIOS auto-probing.\n", codec->core.chip_name); @@ -1190,7 +1191,12 @@ static int cx_probe(struct hda_codec *codec, const struct hda_device_id *id) case 0x14f11f86: case 0x14f11f87: spec->is_cx11880_sn6140 = true; - snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref); + callback = snd_hda_jack_detect_enable_callback(codec, 0x19, + cx_update_headset_mic_vref); + if (IS_ERR(callback)) { + err = PTR_ERR(callback); + goto error; + } break; } diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 6047c5660f20de..c3316a6f7e9d59 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -3694,6 +3694,17 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec, spec->power_hook = alc287_s4_power_gpio3_default; spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook; } + +static void alc287_fixup_tb_vmaster_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->gen.vmaster_mute_led = 1; + + alc287_fixup_bind_dacs(codec, fix, action); +} /* GPIO2: mute led GPIO3: micmute led */ static void alc245_tas2781_spi_hp_fixup_muteled(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -6448,7 +6459,7 @@ static const struct hda_fixup alc269_fixups[] = { }, [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = { .type = HDA_FIXUP_FUNC, - .v.func = alc287_fixup_bind_dacs, + .v.func = alc287_fixup_tb_vmaster_led, .chained = true, .chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, }, @@ -6717,6 +6728,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x159c, "Acer Nitro 5 AN515-58", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1597, "Acer Nitro 5 AN517-55", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x160e, "Acer PT316-51S", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1640, "Acer Aspire A315-44P", ALC256_FIXUP_ACER_SFG16_MICMUTE_LED), SND_PCI_QUIRK(0x1025, 0x1679, "Acer Nitro 16 AN16-41", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x169a, "Acer Swift SFG16", ALC256_FIXUP_ACER_SFG16_MICMUTE_LED), SND_PCI_QUIRK(0x1025, 0x171e, "Acer Nitro ANV15-51", ALC245_FIXUP_ACER_MICMUTE_LED), diff --git a/sound/hda/codecs/side-codecs/cs35l56_hda.c b/sound/hda/codecs/side-codecs/cs35l56_hda.c index 1ace4beef50857..4c8d01799931c8 100644 --- a/sound/hda/codecs/side-codecs/cs35l56_hda.c +++ b/sound/hda/codecs/side-codecs/cs35l56_hda.c @@ -180,11 +180,15 @@ static int cs35l56_hda_mixer_get(struct snd_kcontrol *kcontrol, { struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol); unsigned int reg_val; - int i; + int i, ret; cs35l56_hda_wait_dsp_ready(cs35l56); - regmap_read(cs35l56->base.regmap, kcontrol->private_value, ®_val); + ret = regmap_read(cs35l56->base.regmap, kcontrol->private_value, + ®_val); + if (ret) + return ret; + reg_val &= CS35L56_ASP_TXn_SRC_MASK; for (i = 0; i < CS35L56_NUM_INPUT_SRC; ++i) { @@ -203,15 +207,20 @@ static int cs35l56_hda_mixer_put(struct snd_kcontrol *kcontrol, struct cs35l56_hda *cs35l56 = snd_kcontrol_chip(kcontrol); unsigned int item = ucontrol->value.enumerated.item[0]; bool changed; + int ret; if (item >= CS35L56_NUM_INPUT_SRC) return -EINVAL; cs35l56_hda_wait_dsp_ready(cs35l56); - regmap_update_bits_check(cs35l56->base.regmap, kcontrol->private_value, - CS35L56_INPUT_MASK, cs35l56_tx_input_values[item], - &changed); + ret = regmap_update_bits_check(cs35l56->base.regmap, + kcontrol->private_value, + CS35L56_INPUT_MASK, + cs35l56_tx_input_values[item], + &changed); + if (ret) + return ret; return changed; } @@ -967,6 +976,7 @@ static int cs35l56_hda_system_resume(struct device *dev) static int cs35l56_hda_fixup_yoga9(struct cs35l56_hda *cs35l56, int *bus_addr) { /* The cirrus,dev-index property has the wrong values */ + cs35l56->num_amps = 2; switch (*bus_addr) { case 0x30: cs35l56->index = 1; @@ -1016,7 +1026,6 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id) char hid_string[8]; struct acpi_device *adev; const char *property, *sub; - size_t nval; int i, ret; /* @@ -1052,13 +1061,14 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id) ret = -EINVAL; goto err; } - nval = ret; + cs35l56->num_amps = ret; - ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval); + ret = device_property_read_u32_array(cs35l56->base.dev, property, values, + cs35l56->num_amps); if (ret) goto err; - for (i = 0; i < nval; i++) { + for (i = 0; i < cs35l56->num_amps; i++) { if (values[i] == id) { cs35l56->index = i; break; @@ -1081,7 +1091,8 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id) "Read ACPI _SUB failed(%ld): fallback to generic firmware\n", PTR_ERR(sub)); } else { - ret = cirrus_scodec_get_speaker_id(cs35l56->base.dev, cs35l56->index, nval, -1); + ret = cirrus_scodec_get_speaker_id(cs35l56->base.dev, cs35l56->index, + cs35l56->num_amps, -1); if (ret == -ENOENT) { cs35l56->system_name = sub; } else if (ret >= 0) { diff --git a/sound/hda/codecs/side-codecs/cs35l56_hda.h b/sound/hda/codecs/side-codecs/cs35l56_hda.h index cb4b5e7356a358..3705af7c186b37 100644 --- a/sound/hda/codecs/side-codecs/cs35l56_hda.h +++ b/sound/hda/codecs/side-codecs/cs35l56_hda.h @@ -26,6 +26,7 @@ struct cs35l56_hda { struct work_struct dsp_work; int index; + int num_amps; const char *system_name; const char *amp_name; diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c index 67240ce184e1a7..dd1b0cc63ad6cd 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c @@ -588,6 +588,9 @@ static void tas2781_hda_unbind(struct device *dev, comp->playback_hook = NULL; } + request_firmware_nowait_cancel(tas_hda->priv->dev, tas_hda->priv, + tasdev_fw_ready); + tas2781_hda_remove_controls(tas_hda); tasdevice_config_info_remove(tas_hda->priv); diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_spi.c b/sound/hda/codecs/side-codecs/tas2781_hda_spi.c index 560f2385212dd5..d243baff95a727 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda_spi.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda_spi.c @@ -132,10 +132,18 @@ static int tasdevice_spi_dev_update_bits(struct tasdevice_priv *tas_priv, int ret, val; /* - * In our TAS2781 SPI mode, read/write was masked in last bit of - * address, it cause regmap_update_bits() not work as expected. + * In TAS2781 SPI mode, when accessing non-book-zero or page numbers + * greater than 1 in book 0, an additional byte must be read. The + * first byte in such cases is a dummy byte and should be ignored. */ - ret = tasdevice_dev_read(tas_priv, chn, reg, &val); + if ((TASDEVICE_BOOK_ID(reg) > 0) || (TASDEVICE_PAGE_ID(reg) > 1)) { + unsigned char buf[2]; + + ret = tasdevice_dev_bulk_read(tas_priv, chn, reg, buf, 2); + val = buf[1]; + } else { + ret = tasdevice_dev_read(tas_priv, chn, reg, &val); + } if (ret < 0) { dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret); return ret; @@ -742,6 +750,9 @@ static void tas2781_hda_unbind(struct device *dev, struct device *master, comp->playback_hook = NULL; } + request_firmware_nowait_cancel(tas_priv->dev, tas_priv, + tasdev_fw_ready); + tas2781_hda_remove_controls(tas_hda); tasdevice_config_info_remove(tas_priv); diff --git a/sound/hda/core/hdmi_chmap.c b/sound/hda/core/hdmi_chmap.c index 7b276047f85a7d..c897fc443467c2 100644 --- a/sound/hda/core/hdmi_chmap.c +++ b/sound/hda/core/hdmi_chmap.c @@ -353,13 +353,16 @@ static void hdmi_std_setup_channel_mapping(struct hdac_chmap *chmap, if (hdmi_channel_mapping[ca][1] == 0) { int hdmi_slot = 0; /* fill actual channel mappings in ALSA channel (i) order */ - for (i = 0; i < ch_alloc->channels; i++) { - while (!WARN_ON(hdmi_slot >= 8) && - !ch_alloc->speakers[7 - hdmi_slot]) - hdmi_slot++; /* skip zero slots */ + for (i = 0; i < ch_alloc->channels && hdmi_slot < 8; i++) { + while (!ch_alloc->speakers[7 - hdmi_slot]) { + /* skip zero slots */ + if (++hdmi_slot >= 8) + goto out; + } hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++; } + out: /* fill the rest of the slots with ALSA channel 0xf */ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) if (!ch_alloc->speakers[7 - hdmi_slot]) diff --git a/sound/hda/core/i915.c b/sound/hda/core/i915.c index 44438c799f9570..6c068b135b7dc7 100644 --- a/sound/hda/core/i915.c +++ b/sound/hda/core/i915.c @@ -130,22 +130,22 @@ static int i915_gfx_present(struct pci_dev *hdac_pci) /* List of known platforms with no i915 support. */ static const struct pci_device_id denylist[] = { /* CNL */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a40), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a41), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a42), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a44), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a49), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a4a), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a4c), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a50), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a51), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a52), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a54), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a59), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a5a), 0x030000, 0xff0000 }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a5c), 0x030000, 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a40), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a41), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a42), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a44), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a49), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a4a), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a4c), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a50), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a51), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a52), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a54), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a59), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a5a), .class = 0x030000, .class_mask = 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a5c), .class = 0x030000, .class_mask = 0xff0000 }, /* LKF */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9840), 0x030000, 0xff0000 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9840), .class = 0x030000, .class_mask = 0xff0000 }, {} }; struct pci_dev *display_dev = NULL; diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c index 07c68568091d1e..f4a0c21c905e99 100644 --- a/sound/isa/wavefront/wavefront.c +++ b/sound/isa/wavefront/wavefront.c @@ -353,6 +353,7 @@ snd_wavefront_probe (struct snd_card *card, int dev) dev_err(card->dev, "can't allocate WSS device\n"); return err; } + acard->chip = chip; err = snd_wss_pcm(chip, 0); if (err < 0) @@ -400,6 +401,7 @@ snd_wavefront_probe (struct snd_card *card, int dev) acard->wavefront.irq = ics2115_irq[dev]; card->sync_irq = acard->wavefront.irq; acard->wavefront.base = ics2115_port[dev]; + snd_wavefront_cache_firmware(&acard->wavefront); wavefront_synth = snd_wavefront_new_synth(card, hw_dev, acard); if (wavefront_synth == NULL) { @@ -553,12 +555,51 @@ static int snd_wavefront_isa_probe(struct device *pdev, return 0; } +#ifdef CONFIG_PM +static int snd_wavefront_suspend(struct snd_card *card) +{ + snd_wavefront_card_t *acard = card->private_data; + + snd_wavefront_midi_suspend(acard); + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + acard->chip->suspend(acard->chip); + return 0; +} + +static int snd_wavefront_resume(struct snd_card *card) +{ + snd_wavefront_card_t *acard = card->private_data; + int err; + + acard->chip->resume(acard->chip); + err = snd_wavefront_resume_synth(acard); + if (err < 0) + return err; + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + return 0; +} + +static int snd_wavefront_isa_suspend(struct device *dev, unsigned int id, + pm_message_t state) +{ + return snd_wavefront_suspend(dev_get_drvdata(dev)); +} + +static int snd_wavefront_isa_resume(struct device *dev, unsigned int id) +{ + return snd_wavefront_resume(dev_get_drvdata(dev)); +} +#endif + #define DEV_NAME "wavefront" static struct isa_driver snd_wavefront_driver = { .match = snd_wavefront_isa_match, .probe = snd_wavefront_isa_probe, - /* FIXME: suspend, resume */ +#ifdef CONFIG_PM + .suspend = snd_wavefront_isa_suspend, + .resume = snd_wavefront_isa_resume, +#endif .driver = { .name = DEV_NAME }, @@ -600,12 +641,28 @@ static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard, return 0; } +#ifdef CONFIG_PM +static int snd_wavefront_pnpc_suspend(struct pnp_card_link *pcard, + pm_message_t state) +{ + return snd_wavefront_suspend(pnp_get_card_drvdata(pcard)); +} + +static int snd_wavefront_pnpc_resume(struct pnp_card_link *pcard) +{ + return snd_wavefront_resume(pnp_get_card_drvdata(pcard)); +} +#endif + static struct pnp_card_driver wavefront_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "wavefront", .id_table = snd_wavefront_pnpids, .probe = snd_wavefront_pnp_detect, - /* FIXME: suspend,resume */ +#ifdef CONFIG_PM + .suspend = snd_wavefront_pnpc_suspend, + .resume = snd_wavefront_pnpc_resume, +#endif }; #endif /* CONFIG_PNP */ diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c index 69d87c4cafaedf..fb184d9ef284a6 100644 --- a/sound/isa/wavefront/wavefront_midi.c +++ b/sound/isa/wavefront/wavefront_midi.c @@ -455,6 +455,49 @@ snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *card) card->wavefront.midi.isvirtual = 0; } +void +snd_wavefront_midi_suspend(snd_wavefront_card_t *card) + +{ + snd_wavefront_midi_t *midi = &card->wavefront.midi; + + if (!midi->istimer) + return; + + timer_delete_sync(&midi->timer); + + guard(spinlock_irqsave)(&midi->virtual); + midi->istimer = 0; +} + +void +snd_wavefront_midi_resume(snd_wavefront_card_t *card) + +{ + snd_wavefront_midi_t *midi = &card->wavefront.midi; + int istimer = 0; + bool pending_output = false; + + midi->timer_card = card; + + scoped_guard(spinlock_irqsave, &midi->virtual) { + if (midi->mode[internal_mpu] & MPU401_MODE_OUTPUT_TRIGGER) + istimer++; + if (midi->mode[external_mpu] & MPU401_MODE_OUTPUT_TRIGGER) + istimer++; + if (!istimer) + return; + + midi->istimer = istimer; + timer_setup(&midi->timer, snd_wavefront_midi_output_timer, 0); + mod_timer(&midi->timer, 1 + jiffies); + pending_output = true; + } + + if (pending_output) + snd_wavefront_midi_output_write(card); +} + int snd_wavefront_midi_start (snd_wavefront_card_t *card) @@ -466,6 +509,7 @@ snd_wavefront_midi_start (snd_wavefront_card_t *card) dev = &card->wavefront; midi = &dev->midi; + midi->timer_card = card; /* The ICS2115 MPU-401 interface doesn't do anything until its set into UART mode. @@ -511,6 +555,8 @@ snd_wavefront_midi_start (snd_wavefront_card_t *card) dev_warn(card->wavefront.card->dev, "can't enable MIDI-IN-2-synth routing.\n"); /* XXX error ? */ + } else { + dev->midi_in_to_synth = 1; } /* Turn on Virtual MIDI, but first *always* turn it off, @@ -553,4 +599,3 @@ const struct snd_rawmidi_ops snd_wavefront_midi_input = .close = snd_wavefront_midi_input_close, .trigger = snd_wavefront_midi_input_trigger, }; - diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 33b563707a58db..2f57a6795d22b8 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1626,6 +1626,14 @@ wavefront_synth_control (snd_wavefront_card_t *acard, "support for sample aliases still being considered.\n"); break; + case WFC_MISYNTH_OFF: + dev->midi_in_to_synth = 0; + break; + + case WFC_MISYNTH_ON: + dev->midi_in_to_synth = 1; + break; + case WFC_VMIDI_OFF: snd_wavefront_midi_disable_virtual (acard); break; @@ -1639,6 +1647,83 @@ wavefront_synth_control (snd_wavefront_card_t *acard, return 0; } +static int +wavefront_restore_midi_state(snd_wavefront_card_t *acard, char isvirtual, + char midi_in_to_synth) +{ + snd_wavefront_t *dev = &acard->wavefront; + unsigned char rbuf[4], wbuf[4]; + + if (dev->midi_in_to_synth != midi_in_to_synth) { + if (snd_wavefront_cmd(dev, midi_in_to_synth ? + WFC_MISYNTH_ON : WFC_MISYNTH_OFF, + rbuf, wbuf)) { + dev_err(dev->card->dev, + "cannot restore MIDI-IN routing after resume\n"); + return -EIO; + } + dev->midi_in_to_synth = midi_in_to_synth; + } + + if (dev->midi.isvirtual != isvirtual) { + if (snd_wavefront_cmd(dev, isvirtual ? + WFC_VMIDI_ON : WFC_VMIDI_OFF, + rbuf, wbuf)) { + dev_err(dev->card->dev, + "cannot restore virtual MIDI mode after resume\n"); + return -EIO; + } + if (isvirtual) + snd_wavefront_midi_enable_virtual(acard); + else + snd_wavefront_midi_disable_virtual(acard); + } + + return 0; +} + +int snd_wavefront_resume_synth(snd_wavefront_card_t *acard) +{ + snd_wavefront_t *dev = &acard->wavefront; + char was_virtual = dev->midi.isvirtual; + char midi_in_to_synth = dev->midi_in_to_synth; + char rom_samples_rdonly = dev->rom_samples_rdonly; + int err; + + err = snd_wavefront_detect(acard); + if (err < 0) + dev->israw = 1; + + if (dev->israw) { + dev->fx_initialized = 0; + err = snd_wavefront_start(dev); + if (err < 0) + return err; + } else { + dev->has_fx = (snd_wavefront_fx_detect(dev) == 0); + wavefront_get_sample_status(dev, 0); + wavefront_get_program_status(dev); + wavefront_get_patch_status(dev); + outb(0x80 | 0x40 | 0x20, dev->control_port); + } + + dev->rom_samples_rdonly = rom_samples_rdonly; + dev->midi.base = dev->base; + + err = snd_wavefront_midi_start(acard); + if (err < 0) + return err; + + err = wavefront_restore_midi_state(acard, was_virtual, + midi_in_to_synth); + if (err < 0) + return err; + + snd_wavefront_midi_resume(acard); + + return 0; +} + int snd_wavefront_synth_open (struct snd_hwdep *hw, struct file *file) @@ -2032,6 +2117,17 @@ wavefront_download_firmware (snd_wavefront_t *dev, char *path) return 1; } +void snd_wavefront_cache_firmware(snd_wavefront_t *dev) +{ + int err; + + err = firmware_request_cache(dev->card->dev, ospath); + if (err < 0) + dev_warn(dev->card->dev, + "unable to cache firmware %s for resume: %d\n", + ospath, err); +} + static int wavefront_do_reset (snd_wavefront_t *dev) diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c index a718b75bb6a09d..e80f730d08038f 100644 --- a/sound/oss/dmasound/dmasound_core.c +++ b/sound/oss/dmasound/dmasound_core.c @@ -574,11 +574,6 @@ static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft, uWritten = 0 ; } -/* FIXME: I think that this may be the wrong behaviour when we get strapped - for time and the cpu is close to being (or actually) behind in sending data. - - because we've lost the time that the N samples, already in the buffer, - would have given us to get here with the next lot from the user. -*/ /* The interrupt doesn't start to play the last, incomplete frame. * Thus we can append to it without disabling the interrupts! (Note * also that write_sq.rear isn't affected by the interrupt.) @@ -598,6 +593,11 @@ static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft, write_sq.syncing &= ~2 ; /* take out POST status */ spin_unlock_irqrestore(&dmasound.lock, flags); + /* Start any already-complete fragments before we spend + * more time extending the incomplete tail fragment. + */ + sq_play(); + if (write_sq.count > 0 && (bLeft = write_sq.block_size-write_sq.rear_size) > 0) { dest = write_sq.buffers[write_sq.rear]; diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index 571d89a6a8da27..f07c5f4c23801b 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -247,8 +247,8 @@ struct snd_ali { }; static const struct pci_device_id snd_ali_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5451), 0, 0, 0}, - {0, } + { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5451) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_ali_ids); diff --git a/sound/pci/als300.c b/sound/pci/als300.c index a73893a2cbd668..00f0720169c3a9 100644 --- a/sound/pci/als300.c +++ b/sound/pci/als300.c @@ -127,9 +127,9 @@ struct snd_als300_substream_data { }; static const struct pci_device_id snd_als300_ids[] = { - { 0x4005, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300 }, - { 0x4005, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300_PLUS }, - { 0, } + { PCI_DEVICE(0x4005, 0x0300), .driver_data = DEVICE_ALS300 }, + { PCI_DEVICE(0x4005, 0x0308), .driver_data = DEVICE_ALS300_PLUS }, + { } }; MODULE_DEVICE_TABLE(pci, snd_als300_ids); diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c index 636f309c942407..6396aa6c3bf071 100644 --- a/sound/pci/als4000.c +++ b/sound/pci/als4000.c @@ -102,8 +102,8 @@ struct snd_card_als4000 { }; static const struct pci_device_id snd_als4000_ids[] = { - { 0x4005, 0x4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ALS4000 */ - { 0, } + { PCI_DEVICE(0x4005, 0x4000) }, /* ALS4000 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_als4000_ids); diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c index b1c7ed7f1604e8..4dbc79899c0919 100644 --- a/sound/pci/asihpi/asihpi.c +++ b/sound/pci/asihpi/asihpi.c @@ -2933,13 +2933,16 @@ static void snd_asihpi_remove(struct pci_dev *pci_dev) } static const struct pci_device_id asihpi_pci_tbl[] = { - {HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205, - HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0, - (kernel_ulong_t)HPI_6205}, - {HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040, - HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0, - (kernel_ulong_t)HPI_6000}, - {0,} + { + PCI_DEVICE_SUB(HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205, + HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID), + .driver_data = (kernel_ulong_t)HPI_6205, + }, { + PCI_DEVICE_SUB(HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040, + HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID), + .driver_data = (kernel_ulong_t)HPI_6000, + }, + {} }; MODULE_DEVICE_TABLE(pci, asihpi_pci_tbl); diff --git a/sound/pci/asihpi/hpipcida.h b/sound/pci/asihpi/hpipcida.h index 0673e827807071..6dceff2b47419c 100644 --- a/sound/pci/asihpi/hpipcida.h +++ b/sound/pci/asihpi/hpipcida.h @@ -15,12 +15,12 @@ */ { -HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205, - HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0, - (kernel_ulong_t) HPI_6205} -, { -HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040, - HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0, - (kernel_ulong_t) HPI_6000} -, { -0} + PCI_DEVICE_SUB(HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205, + HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID), + .driver_data = (kernel_ulong_t) HPI_6205, +}, { + PCI_DEVICE_SUB(HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040, + HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID), + .driver_data = (kernel_ulong_t) HPI_6000, +}, +{ } diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c index 2a0c59d5afa58e..b738295b41e5d0 100644 --- a/sound/pci/atiixp.c +++ b/sound/pci/atiixp.c @@ -272,11 +272,11 @@ struct atiixp { /* */ static const struct pci_device_id snd_atiixp_ids[] = { - { PCI_VDEVICE(ATI, 0x4341), 0 }, /* SB200 */ - { PCI_VDEVICE(ATI, 0x4361), 0 }, /* SB300 */ - { PCI_VDEVICE(ATI, 0x4370), 0 }, /* SB400 */ - { PCI_VDEVICE(ATI, 0x4382), 0 }, /* SB600 */ - { 0, } + { PCI_VDEVICE(ATI, 0x4341) }, /* SB200 */ + { PCI_VDEVICE(ATI, 0x4361) }, /* SB300 */ + { PCI_VDEVICE(ATI, 0x4370) }, /* SB400 */ + { PCI_VDEVICE(ATI, 0x4382) }, /* SB600 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_atiixp_ids); diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c index 91f31e2ad3d3b6..8aaeb197ce4583 100644 --- a/sound/pci/atiixp_modem.c +++ b/sound/pci/atiixp_modem.c @@ -247,9 +247,9 @@ struct atiixp_modem { /* */ static const struct pci_device_id snd_atiixp_ids[] = { - { PCI_VDEVICE(ATI, 0x434d), 0 }, /* SB200 */ - { PCI_VDEVICE(ATI, 0x4378), 0 }, /* SB400 */ - { 0, } + { PCI_VDEVICE(ATI, 0x434d) }, /* SB200 */ + { PCI_VDEVICE(ATI, 0x4378) }, /* SB400 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_atiixp_ids); diff --git a/sound/pci/au88x0/au8810.c b/sound/pci/au88x0/au8810.c index b2bfa50bfe30e1..f712e32d337007 100644 --- a/sound/pci/au88x0/au8810.c +++ b/sound/pci/au88x0/au8810.c @@ -2,8 +2,8 @@ #include "au8810.h" #include "au88x0.h" static const struct pci_device_id snd_vortex_ids[] = { - {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE), 1,}, - {0,} + { PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE), .driver_data = 1 }, + { } }; #include "au88x0_core.c" diff --git a/sound/pci/au88x0/au8820.c b/sound/pci/au88x0/au8820.c index dbc2263b49c6e4..aa841b615182c2 100644 --- a/sound/pci/au88x0/au8820.c +++ b/sound/pci/au88x0/au8820.c @@ -2,8 +2,8 @@ #include "au8820.h" #include "au88x0.h" static const struct pci_device_id snd_vortex_ids[] = { - {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1), 0,}, - {0,} + { PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1), .driver_data = 0 }, + { } }; #include "au88x0_synth.c" diff --git a/sound/pci/au88x0/au8830.c b/sound/pci/au88x0/au8830.c index e963c4e2f026c4..aeb8d458c6299b 100644 --- a/sound/pci/au88x0/au8830.c +++ b/sound/pci/au88x0/au8830.c @@ -2,8 +2,8 @@ #include "au8830.h" #include "au88x0.h" static const struct pci_device_id snd_vortex_ids[] = { - {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2), 0,}, - {0,} + { PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2), .driver_data = 0 }, + { } }; #include "au88x0_synth.c" diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c index e2c501f4394c1b..60a87322eae49d 100644 --- a/sound/pci/aw2/aw2-alsa.c +++ b/sound/pci/aw2/aw2-alsa.c @@ -142,9 +142,8 @@ module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard."); static const struct pci_device_id snd_aw2_ids[] = { - {PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, 0, 0, - 0, 0, 0}, - {0} + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, 0, 0) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_aw2_ids); diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index 6cdf76e2b7d2e3..ccca82417657b5 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c @@ -305,9 +305,9 @@ struct snd_azf3328 { }; static const struct pci_device_id snd_azf3328_ids[] = { - { 0x122D, 0x50DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* PCI168/3328 */ - { 0x122D, 0x80DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* 3328 */ - { 0, } + { PCI_DEVICE(0x122D, 0x50DC) }, /* PCI168/3328 */ + { PCI_DEVICE(0x122D, 0x80DA) }, /* 3328 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_azf3328_ids); diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 35392f6525b3b3..cf923f551211ee 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -1820,8 +1820,8 @@ static SIMPLE_DEV_PM_OPS(snd_ca0106_pm, snd_ca0106_suspend, snd_ca0106_resume); // PCI IDs static const struct pci_device_id snd_ca0106_ids[] = { - { PCI_VDEVICE(CREATIVE, 0x0007), 0 }, /* Audigy LS or Live 24bit */ - { 0, } + { PCI_VDEVICE(CREATIVE, 0x0007) }, /* Audigy LS or Live 24bit */ + { } }; MODULE_DEVICE_TABLE(pci, snd_ca0106_ids); diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index cd73b683363944..f5382b10865a35 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c @@ -2721,12 +2721,12 @@ static void snd_cmipci_proc_init(struct cmipci *cm) } static const struct pci_device_id snd_cmipci_ids[] = { - {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0}, - {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0}, - {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, - {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0}, - {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, - {0,}, + {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A) }, + {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B) }, + {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738) }, + {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B) }, + {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738) }, + { } }; diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c index d00b2c9fb1e38c..f51f4bb6376646 100644 --- a/sound/pci/cs4281.c +++ b/sound/pci/cs4281.c @@ -476,8 +476,8 @@ struct cs4281 { static irqreturn_t snd_cs4281_interrupt(int irq, void *dev_id); static const struct pci_device_id snd_cs4281_ids[] = { - { PCI_VDEVICE(CIRRUS, 0x6005), 0, }, /* CS4281 */ - { 0, } + { PCI_VDEVICE(CIRRUS, 0x6005) }, /* CS4281 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_cs4281_ids); diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c index 9c1995737eb7f6..0cb7c2a2929fed 100644 --- a/sound/pci/cs46xx/cs46xx.c +++ b/sound/pci/cs46xx/cs46xx.c @@ -43,10 +43,10 @@ module_param_array(mmap_valid, bool, NULL, 0444); MODULE_PARM_DESC(mmap_valid, "Support OSS mmap."); static const struct pci_device_id snd_cs46xx_ids[] = { - { PCI_VDEVICE(CIRRUS, 0x6001), 0, }, /* CS4280 */ - { PCI_VDEVICE(CIRRUS, 0x6003), 0, }, /* CS4612 */ - { PCI_VDEVICE(CIRRUS, 0x6004), 0, }, /* CS4615 */ - { 0, } + { PCI_VDEVICE(CIRRUS, 0x6001) }, /* CS4280 */ + { PCI_VDEVICE(CIRRUS, 0x6003) }, /* CS4612 */ + { PCI_VDEVICE(CIRRUS, 0x6004) }, /* CS4615 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_cs46xx_ids); diff --git a/sound/pci/echoaudio/darla20.c b/sound/pci/echoaudio/darla20.c index e295c71c7a3903..48c25bd3f401e9 100644 --- a/sound/pci/echoaudio/darla20.c +++ b/sound/pci/echoaudio/darla20.c @@ -52,8 +52,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0010, 0, 0, 0}, /* DSP 56301 Darla20 rev.0 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0010) }, /* DSP 56301 Darla20 rev.0 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/darla24.c b/sound/pci/echoaudio/darla24.c index ae816e78f5991f..c8cdd1b052df6c 100644 --- a/sound/pci/echoaudio/darla24.c +++ b/sound/pci/echoaudio/darla24.c @@ -56,9 +56,9 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0040, 0, 0, 0}, /* DSP 56301 Darla24 rev.0 */ - {0x1057, 0x1801, 0xECC0, 0x0041, 0, 0, 0}, /* DSP 56301 Darla24 rev.1 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0040) }, /* DSP 56301 Darla24 rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0041) }, /* DSP 56301 Darla24 rev.1 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/echo3g.c b/sound/pci/echoaudio/echo3g.c index 3d37bb4030ec2e..e8b476e073073b 100644 --- a/sound/pci/echoaudio/echo3g.c +++ b/sound/pci/echoaudio/echo3g.c @@ -70,8 +70,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x0100, 0, 0, 0}, /* Echo 3G */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0100) }, /* Echo 3G */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/gina20.c b/sound/pci/echoaudio/gina20.c index 4f864ddc95309e..b5f88922e19a24 100644 --- a/sound/pci/echoaudio/gina20.c +++ b/sound/pci/echoaudio/gina20.c @@ -56,8 +56,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0020, 0, 0, 0}, /* DSP 56301 Gina20 rev.0 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0020) }, /* DSP 56301 Gina20 rev.0 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/gina24.c b/sound/pci/echoaudio/gina24.c index eff69e83ca0a1b..fe0e510209ed34 100644 --- a/sound/pci/echoaudio/gina24.c +++ b/sound/pci/echoaudio/gina24.c @@ -74,11 +74,11 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0050, 0, 0, 0}, /* DSP 56301 Gina24 rev.0 */ - {0x1057, 0x1801, 0xECC0, 0x0051, 0, 0, 0}, /* DSP 56301 Gina24 rev.1 */ - {0x1057, 0x3410, 0xECC0, 0x0050, 0, 0, 0}, /* DSP 56361 Gina24 rev.0 */ - {0x1057, 0x3410, 0xECC0, 0x0051, 0, 0, 0}, /* DSP 56361 Gina24 rev.1 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0050) }, /* DSP 56301 Gina24 rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0051) }, /* DSP 56301 Gina24 rev.1 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0050) }, /* DSP 56361 Gina24 rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0051) }, /* DSP 56361 Gina24 rev.1 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/indigo.c b/sound/pci/echoaudio/indigo.c index a9f2efc58f6e0f..496cc5aa9516da 100644 --- a/sound/pci/echoaudio/indigo.c +++ b/sound/pci/echoaudio/indigo.c @@ -57,8 +57,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x0090, 0, 0, 0}, /* Indigo */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0090) }, /* Indigo */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/indigodj.c b/sound/pci/echoaudio/indigodj.c index 14e9769ceba141..45ad92e8f5311b 100644 --- a/sound/pci/echoaudio/indigodj.c +++ b/sound/pci/echoaudio/indigodj.c @@ -57,8 +57,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x00B0, 0, 0, 0}, /* Indigo DJ*/ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x00B0) }, /* Indigo DJ*/ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/indigodjx.c b/sound/pci/echoaudio/indigodjx.c index a14a7dc8c87db0..b1878ecb83753e 100644 --- a/sound/pci/echoaudio/indigodjx.c +++ b/sound/pci/echoaudio/indigodjx.c @@ -57,8 +57,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x00E0, 0, 0, 0}, /* Indigo DJx*/ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x00E0) }, /* Indigo DJx*/ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/indigoio.c b/sound/pci/echoaudio/indigoio.c index 97e024450d19c8..2c6a6f74b0bdcc 100644 --- a/sound/pci/echoaudio/indigoio.c +++ b/sound/pci/echoaudio/indigoio.c @@ -58,8 +58,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x00A0, 0, 0, 0}, /* Indigo IO*/ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x00A0) }, /* Indigo IO*/ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/indigoiox.c b/sound/pci/echoaudio/indigoiox.c index a017c966b4dced..dfaa563317ad14 100644 --- a/sound/pci/echoaudio/indigoiox.c +++ b/sound/pci/echoaudio/indigoiox.c @@ -58,8 +58,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x00D0, 0, 0, 0}, /* Indigo IOx */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x00D0) }, /* Indigo IOx */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/layla20.c b/sound/pci/echoaudio/layla20.c index 7e38bc9c025d54..82c089f7b452b7 100644 --- a/sound/pci/echoaudio/layla20.c +++ b/sound/pci/echoaudio/layla20.c @@ -65,9 +65,9 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0030, 0, 0, 0}, /* DSP 56301 Layla20 rev.0 */ - {0x1057, 0x1801, 0xECC0, 0x0031, 0, 0, 0}, /* DSP 56301 Layla20 rev.1 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0030) }, /* DSP 56301 Layla20 rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0031) }, /* DSP 56301 Layla20 rev.1 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/layla24.c b/sound/pci/echoaudio/layla24.c index 95c52210fb6540..efd676467a374c 100644 --- a/sound/pci/echoaudio/layla24.c +++ b/sound/pci/echoaudio/layla24.c @@ -76,8 +76,8 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x0060, 0, 0, 0}, /* DSP 56361 Layla24 rev.0 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0060) }, /* DSP 56361 Layla24 rev.0 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c index a2d4b0003b570e..950aaec74a439a 100644 --- a/sound/pci/echoaudio/mia.c +++ b/sound/pci/echoaudio/mia.c @@ -66,9 +66,9 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x3410, 0xECC0, 0x0080, 0, 0, 0}, /* DSP 56361 Mia rev.0 */ - {0x1057, 0x3410, 0xECC0, 0x0081, 0, 0, 0}, /* DSP 56361 Mia rev.1 */ - {0,} + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0080) }, /* DSP 56361 Mia rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0081) }, /* DSP 56361 Mia rev.1 */ + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/echoaudio/mona.c b/sound/pci/echoaudio/mona.c index 1b45a2b5066fec..4f999324f388d8 100644 --- a/sound/pci/echoaudio/mona.c +++ b/sound/pci/echoaudio/mona.c @@ -81,13 +81,19 @@ static const struct firmware card_fw[] = { }; static const struct pci_device_id snd_echo_ids[] = { - {0x1057, 0x1801, 0xECC0, 0x0070, 0, 0, 0}, /* DSP 56301 Mona rev.0 */ - {0x1057, 0x1801, 0xECC0, 0x0071, 0, 0, 0}, /* DSP 56301 Mona rev.1 */ - {0x1057, 0x1801, 0xECC0, 0x0072, 0, 0, 0}, /* DSP 56301 Mona rev.2 */ - {0x1057, 0x3410, 0xECC0, 0x0070, 0, 0, 0}, /* DSP 56361 Mona rev.0 */ - {0x1057, 0x3410, 0xECC0, 0x0071, 0, 0, 0}, /* DSP 56361 Mona rev.1 */ - {0x1057, 0x3410, 0xECC0, 0x0072, 0, 0, 0}, /* DSP 56361 Mona rev.2 */ - {0,} + /* DSP 56301 Mona rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0070) }, + /* DSP 56301 Mona rev.1 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0071) }, + /* DSP 56301 Mona rev.2 */ + { PCI_DEVICE_SUB(0x1057, 0x1801, 0xECC0, 0x0072) }, + /* DSP 56361 Mona rev.0 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0070) }, + /* DSP 56361 Mona rev.1 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0071) }, + /* DSP 56361 Mona rev.2 */ + { PCI_DEVICE_SUB(0x1057, 0x3410, 0xECC0, 0x0072) }, + { } }; static const struct snd_pcm_hardware pcm_hardware_skel = { diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index 548e7d04990139..3b21bd2883b64b 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c @@ -58,10 +58,10 @@ MODULE_PARM_DESC(subsystem, "Force card subsystem model."); * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 */ static const struct pci_device_id snd_emu10k1_ids[] = { - { PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */ - { PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */ - { PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */ - { 0, } + { PCI_VDEVICE(CREATIVE, 0x0002), .driver_data = 0 }, /* EMU10K1 */ + { PCI_VDEVICE(CREATIVE, 0x0004), .driver_data = 1 }, /* Audigy */ + { PCI_VDEVICE(CREATIVE, 0x0008), .driver_data = 1 }, /* Audigy 2 Value SB0400 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_emu10k1_ids); diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index 1b207ca25814e4..ed4630c8342b43 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c @@ -1518,8 +1518,8 @@ static int snd_emu10k1x_probe(struct pci_dev *pci, // PCI IDs static const struct pci_device_id snd_emu10k1x_ids[] = { - { PCI_VDEVICE(CREATIVE, 0x0006), 0 }, /* Dell OEM version (EMU10K1) */ - { 0, } + { PCI_VDEVICE(CREATIVE, 0x0006) }, /* Dell OEM version (EMU10K1) */ + { } }; MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids); diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c index 657056a59175bc..0b9eb55a212181 100644 --- a/sound/pci/ens1370.c +++ b/sound/pci/ens1370.c @@ -426,14 +426,14 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id); static const struct pci_device_id snd_audiopci_ids[] = { #ifdef CHIP1370 - { PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */ + { PCI_VDEVICE(ENSONIQ, 0x5000) }, /* ES1370 */ #endif #ifdef CHIP1371 - { PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */ - { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */ - { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */ + { PCI_VDEVICE(ENSONIQ, 0x1371) }, /* ES1371 */ + { PCI_VDEVICE(ENSONIQ, 0x5880) }, /* ES1373 - CT5880 */ + { PCI_VDEVICE(ECTIVA, 0x8938) }, /* Ectiva EV1938 */ #endif - { 0, } + { } }; MODULE_DEVICE_TABLE(pci, snd_audiopci_ids); diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c index 280125eff36245..f4c7a9532f4a6f 100644 --- a/sound/pci/es1938.c +++ b/sound/pci/es1938.c @@ -222,8 +222,8 @@ struct es1938 { static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id); static const struct pci_device_id snd_es1938_ids[] = { - { PCI_VDEVICE(ESS, 0x1969), 0, }, /* Solo-1 */ - { 0, } + { PCI_VDEVICE(ESS, 0x1969) }, /* Solo-1 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_es1938_ids); diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index b7282b3fa1b161..f04628c8cbb729 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -549,13 +549,26 @@ struct es1968 { static irqreturn_t snd_es1968_interrupt(int irq, void *dev_id); static const struct pci_device_id snd_es1968_ids[] = { - /* Maestro 1 */ - { 0x1285, 0x0100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TYPE_MAESTRO }, - /* Maestro 2 */ - { 0x125d, 0x1968, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TYPE_MAESTRO2 }, - /* Maestro 2E */ - { 0x125d, 0x1978, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TYPE_MAESTRO2E }, - { 0, } + { + /* Maestro 1 */ + PCI_DEVICE(0x1285, 0x0100), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + .driver_data = TYPE_MAESTRO, + }, { + /* Maestro 2 */ + PCI_DEVICE(0x125d, 0x1968), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + .driver_data = TYPE_MAESTRO2, + }, { + /* Maestro 2E */ + PCI_DEVICE(0x125d, 0x1978), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + .driver_data = TYPE_MAESTRO2E, + }, + { } }; MODULE_DEVICE_TABLE(pci, snd_es1968_ids); diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index 4ca992449ea301..9cc96b807dd798 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c @@ -240,9 +240,18 @@ static inline u16 fm801_ioread16(struct fm801 *chip, unsigned short offset) } static const struct pci_device_id snd_fm801_ids[] = { - { 0x1319, 0x0801, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* FM801 */ - { 0x5213, 0x0510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* Gallant Odyssey Sound 4 */ - { 0, } + { + /* FM801 */ + PCI_DEVICE(0x1319, 0x0801), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + /* Gallant Odyssey Sound 4 */ + PCI_DEVICE(0x5213, 0x0510), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, + { } }; MODULE_DEVICE_TABLE(pci, snd_fm801_ids); diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 1e39b985bef263..0e27e84d2ce453 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c @@ -86,8 +86,8 @@ MODULE_PARM_DESC(dxr_enable, "Enable DXR support for Terratec DMX6FIRE."); static const struct pci_device_id snd_ice1712_ids[] = { - { PCI_VDEVICE(ICE, PCI_DEVICE_ID_ICE_1712), 0 }, /* ICE1712 */ - { 0, } + { PCI_VDEVICE(ICE, PCI_DEVICE_ID_ICE_1712) }, /* ICE1712 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_ice1712_ids); diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index 65bf48647d0894..2e64f9c020e5a1 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c @@ -62,8 +62,8 @@ MODULE_PARM_DESC(model, "Use the given board model."); /* Both VT1720 and VT1724 have the same PCI IDs */ static const struct pci_device_id snd_vt1724_ids[] = { - { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 }, - { 0, } + { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_vt1724_ids); diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 3b53c5e63c298e..e2ea9016a73e46 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c @@ -384,30 +384,100 @@ struct intel8x0 { }; static const struct pci_device_id snd_intel8x0_ids[] = { - { PCI_VDEVICE(INTEL, 0x2415), DEVICE_INTEL }, /* 82801AA */ - { PCI_VDEVICE(INTEL, 0x2425), DEVICE_INTEL }, /* 82901AB */ - { PCI_VDEVICE(INTEL, 0x2445), DEVICE_INTEL }, /* 82801BA */ - { PCI_VDEVICE(INTEL, 0x2485), DEVICE_INTEL }, /* ICH3 */ - { PCI_VDEVICE(INTEL, 0x24c5), DEVICE_INTEL_ICH4 }, /* ICH4 */ - { PCI_VDEVICE(INTEL, 0x24d5), DEVICE_INTEL_ICH4 }, /* ICH5 */ - { PCI_VDEVICE(INTEL, 0x25a6), DEVICE_INTEL_ICH4 }, /* ESB */ - { PCI_VDEVICE(INTEL, 0x266e), DEVICE_INTEL_ICH4 }, /* ICH6 */ - { PCI_VDEVICE(INTEL, 0x27de), DEVICE_INTEL_ICH4 }, /* ICH7 */ - { PCI_VDEVICE(INTEL, 0x2698), DEVICE_INTEL_ICH4 }, /* ESB2 */ - { PCI_VDEVICE(INTEL, 0x7195), DEVICE_INTEL }, /* 440MX */ - { PCI_VDEVICE(SI, 0x7012), DEVICE_SIS }, /* SI7012 */ - { PCI_VDEVICE(NVIDIA, 0x01b1), DEVICE_NFORCE }, /* NFORCE */ - { PCI_VDEVICE(NVIDIA, 0x003a), DEVICE_NFORCE }, /* MCP04 */ - { PCI_VDEVICE(NVIDIA, 0x006a), DEVICE_NFORCE }, /* NFORCE2 */ - { PCI_VDEVICE(NVIDIA, 0x0059), DEVICE_NFORCE }, /* CK804 */ - { PCI_VDEVICE(NVIDIA, 0x008a), DEVICE_NFORCE }, /* CK8 */ - { PCI_VDEVICE(NVIDIA, 0x00da), DEVICE_NFORCE }, /* NFORCE3 */ - { PCI_VDEVICE(NVIDIA, 0x00ea), DEVICE_NFORCE }, /* CK8S */ - { PCI_VDEVICE(NVIDIA, 0x026b), DEVICE_NFORCE }, /* MCP51 */ - { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */ - { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */ - { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ - { 0, } + { + /* 82801AA */ + PCI_VDEVICE(INTEL, 0x2415), + .driver_data = DEVICE_INTEL, + }, { + /* 82901AB */ + PCI_VDEVICE(INTEL, 0x2425), + .driver_data = DEVICE_INTEL, + }, { + /* 82801BA */ + PCI_VDEVICE(INTEL, 0x2445), + .driver_data = DEVICE_INTEL, + }, { + /* ICH3 */ + PCI_VDEVICE(INTEL, 0x2485), + .driver_data = DEVICE_INTEL, + }, { + /* ICH4 */ + PCI_VDEVICE(INTEL, 0x24c5), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* ICH5 */ + PCI_VDEVICE(INTEL, 0x24d5), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* ESB */ + PCI_VDEVICE(INTEL, 0x25a6), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* ICH6 */ + PCI_VDEVICE(INTEL, 0x266e), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* ICH7 */ + PCI_VDEVICE(INTEL, 0x27de), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* ESB2 */ + PCI_VDEVICE(INTEL, 0x2698), + .driver_data = DEVICE_INTEL_ICH4, + }, { + /* 440MX */ + PCI_VDEVICE(INTEL, 0x7195), + .driver_data = DEVICE_INTEL, + }, { + /* SI7012 */ + PCI_VDEVICE(SI, 0x7012), + .driver_data = DEVICE_SIS, + }, { + /* NFORCE */ + PCI_VDEVICE(NVIDIA, 0x01b1), + .driver_data = DEVICE_NFORCE, + }, { + /* MCP04 */ + PCI_VDEVICE(NVIDIA, 0x003a), + .driver_data = DEVICE_NFORCE, + }, { + /* NFORCE2 */ + PCI_VDEVICE(NVIDIA, 0x006a), + .driver_data = DEVICE_NFORCE, + }, { + /* CK804 */ + PCI_VDEVICE(NVIDIA, 0x0059), + .driver_data = DEVICE_NFORCE, + }, { + /* CK8 */ + PCI_VDEVICE(NVIDIA, 0x008a), + .driver_data = DEVICE_NFORCE, + }, { + /* NFORCE3 */ + PCI_VDEVICE(NVIDIA, 0x00da), + .driver_data = DEVICE_NFORCE, + }, { + /* CK8S */ + PCI_VDEVICE(NVIDIA, 0x00ea), + .driver_data = DEVICE_NFORCE, + }, { + /* MCP51 */ + PCI_VDEVICE(NVIDIA, 0x026b), + .driver_data = DEVICE_NFORCE, + }, { + /* AMD8111 */ + PCI_VDEVICE(AMD, 0x746d), + .driver_data = DEVICE_INTEL, + }, { + /* AMD768 */ + PCI_VDEVICE(AMD, 0x7445), + .driver_data = DEVICE_INTEL, + }, { + /* Ali5455 */ + PCI_VDEVICE(AL, 0x5455), + .driver_data = DEVICE_ALI, + }, + { } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids); diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c index 84e1b7ea34e2a6..27dbf612542783 100644 --- a/sound/pci/intel8x0m.c +++ b/sound/pci/intel8x0m.c @@ -190,26 +190,78 @@ struct intel8x0m { }; static const struct pci_device_id snd_intel8x0m_ids[] = { - { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */ - { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */ - { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */ - { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */ - { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */ - { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */ - { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */ - { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */ - { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */ - { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */ - { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */ - { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */ - { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */ - { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */ - { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */ - { PCI_VDEVICE(AMD, 0x746e), DEVICE_INTEL }, /* AMD8111 */ + { + /* 82801AA */ + PCI_VDEVICE(INTEL, 0x2416), + .driver_data = DEVICE_INTEL, + }, { + /* 82901AB */ + PCI_VDEVICE(INTEL, 0x2426), + .driver_data = DEVICE_INTEL + }, { + /* 82801BA */ + PCI_VDEVICE(INTEL, 0x2446), + .driver_data = DEVICE_INTEL + }, { + /* ICH3 */ + PCI_VDEVICE(INTEL, 0x2486), + .driver_data = DEVICE_INTEL + }, { + /* ICH4 */ + PCI_VDEVICE(INTEL, 0x24c6), + .driver_data = DEVICE_INTEL, + }, { + /* ICH5 */ + PCI_VDEVICE(INTEL, 0x24d6), + .driver_data = DEVICE_INTEL, + }, { + /* ICH6 */ + PCI_VDEVICE(INTEL, 0x266d), + .driver_data = DEVICE_INTEL, + }, { + /* ICH7 */ + PCI_VDEVICE(INTEL, 0x27dd), + .driver_data = DEVICE_INTEL, + }, { + /* 440MX */ + PCI_VDEVICE(INTEL, 0x7196), + .driver_data = DEVICE_INTEL, + }, { + /* AMD768 */ + PCI_VDEVICE(AMD, 0x7446), + .driver_data = DEVICE_INTEL, + }, { + /* SI7013 */ + PCI_VDEVICE(SI, 0x7013), + .driver_data = DEVICE_SIS, + }, { + /* NFORCE */ + PCI_VDEVICE(NVIDIA, 0x01c1), + .driver_data = DEVICE_NFORCE, + }, { + /* NFORCE2 */ + PCI_VDEVICE(NVIDIA, 0x0069), + .driver_data = DEVICE_NFORCE, + }, { + /* NFORCE2s */ + PCI_VDEVICE(NVIDIA, 0x0089), + .driver_data = DEVICE_NFORCE, + }, { + /* NFORCE3 */ + PCI_VDEVICE(NVIDIA, 0x00d9), + .driver_data = DEVICE_NFORCE, + }, { + /* AMD8111 */ + PCI_VDEVICE(AMD, 0x746e), + .driver_data = DEVICE_INTEL #if 0 - { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ + }, { + /* Ali5455 */ + PCI_VDEVICE(AL, 0x5455), + .driver_data = DEVICE_ALI, #endif - { 0, } + }, + { } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids); diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c index 3353980d5cd8a2..dd45ffa171e183 100644 --- a/sound/pci/maestro3.c +++ b/sound/pci/maestro3.c @@ -779,23 +779,40 @@ struct snd_m3 { * pci ids */ static const struct pci_device_id snd_m3_ids[] = { - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO_1, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2LE, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_1, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_HW, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_2, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {0,}, + { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO_1), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2LE), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_1), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_HW), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_2), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, + { }, }; MODULE_DEVICE_TABLE(pci, snd_m3_ids); diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index a7760a23bfe900..f451554cff5f1f 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c @@ -48,8 +48,8 @@ MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); */ static const struct pci_device_id snd_mixart_ids[] = { - { PCI_VDEVICE(MOTOROLA, 0x0003), 0, }, /* MC8240 */ - { 0, } + { PCI_VDEVICE(MOTOROLA, 0x0003) }, /* MC8240 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_mixart_ids); diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c index da74b923bc88fd..a7da55d9c025a3 100644 --- a/sound/pci/nm256/nm256.c +++ b/sound/pci/nm256/nm256.c @@ -245,10 +245,10 @@ struct nm256 { * PCI ids */ static const struct pci_device_id snd_nm256_ids[] = { - {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO), 0}, - {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO), 0}, - {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO), 0}, - {0,}, + { PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO) }, + { PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO) }, + { PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO) }, + { }, }; MODULE_DEVICE_TABLE(pci, snd_nm256_ids); diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c index e7d63972c2cafb..1eea40e94c43cb 100644 --- a/sound/pci/pcxhr/pcxhr.c +++ b/sound/pci/pcxhr/pcxhr.c @@ -89,41 +89,41 @@ enum { }; static const struct pci_device_id pcxhr_ids[] = { - { 0x10b5, 0x9656, 0x1369, 0xb001, 0, 0, PCI_ID_VX882HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb101, 0, 0, PCI_ID_PCX882HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb201, 0, 0, PCI_ID_VX881HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb301, 0, 0, PCI_ID_PCX881HR, }, - { 0x10b5, 0x9056, 0x1369, 0xb021, 0, 0, PCI_ID_VX882E, }, - { 0x10b5, 0x9056, 0x1369, 0xb121, 0, 0, PCI_ID_PCX882E, }, - { 0x10b5, 0x9056, 0x1369, 0xb221, 0, 0, PCI_ID_VX881E, }, - { 0x10b5, 0x9056, 0x1369, 0xb321, 0, 0, PCI_ID_PCX881E, }, - { 0x10b5, 0x9656, 0x1369, 0xb401, 0, 0, PCI_ID_VX1222HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb501, 0, 0, PCI_ID_PCX1222HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb601, 0, 0, PCI_ID_VX1221HR, }, - { 0x10b5, 0x9656, 0x1369, 0xb701, 0, 0, PCI_ID_PCX1221HR, }, - { 0x10b5, 0x9056, 0x1369, 0xb421, 0, 0, PCI_ID_VX1222E, }, - { 0x10b5, 0x9056, 0x1369, 0xb521, 0, 0, PCI_ID_PCX1222E, }, - { 0x10b5, 0x9056, 0x1369, 0xb621, 0, 0, PCI_ID_VX1221E, }, - { 0x10b5, 0x9056, 0x1369, 0xb721, 0, 0, PCI_ID_PCX1221E, }, - { 0x10b5, 0x9056, 0x1369, 0xba01, 0, 0, PCI_ID_VX222HR, }, - { 0x10b5, 0x9056, 0x1369, 0xba21, 0, 0, PCI_ID_VX222E, }, - { 0x10b5, 0x9056, 0x1369, 0xbd01, 0, 0, PCI_ID_PCX22HR, }, - { 0x10b5, 0x9056, 0x1369, 0xbd21, 0, 0, PCI_ID_PCX22E, }, - { 0x10b5, 0x9056, 0x1369, 0xbc01, 0, 0, PCI_ID_VX222HRMIC, }, - { 0x10b5, 0x9056, 0x1369, 0xbc21, 0, 0, PCI_ID_VX222E_MIC, }, - { 0x10b5, 0x9056, 0x1369, 0xbb01, 0, 0, PCI_ID_PCX924HR, }, - { 0x10b5, 0x9056, 0x1369, 0xbb21, 0, 0, PCI_ID_PCX924E, }, - { 0x10b5, 0x9056, 0x1369, 0xbf01, 0, 0, PCI_ID_PCX924HRMIC, }, - { 0x10b5, 0x9056, 0x1369, 0xbf21, 0, 0, PCI_ID_PCX924E_MIC, }, - { 0x10b5, 0x9656, 0x1369, 0xd001, 0, 0, PCI_ID_VX442HR, }, - { 0x10b5, 0x9656, 0x1369, 0xd101, 0, 0, PCI_ID_PCX442HR, }, - { 0x10b5, 0x9056, 0x1369, 0xd021, 0, 0, PCI_ID_VX442E, }, - { 0x10b5, 0x9056, 0x1369, 0xd121, 0, 0, PCI_ID_PCX442E, }, - { 0x10b5, 0x9656, 0x1369, 0xd201, 0, 0, PCI_ID_VX822HR, }, - { 0x10b5, 0x9656, 0x1369, 0xd301, 0, 0, PCI_ID_PCX822HR, }, - { 0x10b5, 0x9056, 0x1369, 0xd221, 0, 0, PCI_ID_VX822E, }, - { 0x10b5, 0x9056, 0x1369, 0xd321, 0, 0, PCI_ID_PCX822E, }, - { 0, } + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb001), .driver_data = PCI_ID_VX882HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb101), .driver_data = PCI_ID_PCX882HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb201), .driver_data = PCI_ID_VX881HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb301), .driver_data = PCI_ID_PCX881HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb021), .driver_data = PCI_ID_VX882E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb121), .driver_data = PCI_ID_PCX882E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb221), .driver_data = PCI_ID_VX881E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb321), .driver_data = PCI_ID_PCX881E }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb401), .driver_data = PCI_ID_VX1222HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb501), .driver_data = PCI_ID_PCX1222HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb601), .driver_data = PCI_ID_VX1221HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xb701), .driver_data = PCI_ID_PCX1221HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb421), .driver_data = PCI_ID_VX1222E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb521), .driver_data = PCI_ID_PCX1222E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb621), .driver_data = PCI_ID_VX1221E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xb721), .driver_data = PCI_ID_PCX1221E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xba01), .driver_data = PCI_ID_VX222HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xba21), .driver_data = PCI_ID_VX222E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbd01), .driver_data = PCI_ID_PCX22HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbd21), .driver_data = PCI_ID_PCX22E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbc01), .driver_data = PCI_ID_VX222HRMIC }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbc21), .driver_data = PCI_ID_VX222E_MIC }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbb01), .driver_data = PCI_ID_PCX924HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbb21), .driver_data = PCI_ID_PCX924E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbf01), .driver_data = PCI_ID_PCX924HRMIC }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xbf21), .driver_data = PCI_ID_PCX924E_MIC }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xd001), .driver_data = PCI_ID_VX442HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xd101), .driver_data = PCI_ID_PCX442HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xd021), .driver_data = PCI_ID_VX442E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xd121), .driver_data = PCI_ID_PCX442E }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xd201), .driver_data = PCI_ID_VX822HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9656, 0x1369, 0xd301), .driver_data = PCI_ID_PCX822HR }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xd221), .driver_data = PCI_ID_VX822E }, + { PCI_DEVICE_SUB(0x10b5, 0x9056, 0x1369, 0xd321), .driver_data = PCI_ID_PCX822E }, + { } }; MODULE_DEVICE_TABLE(pci, pcxhr_ids); diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index ca9bbf55465091..454a30a2c07e17 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c @@ -211,10 +211,10 @@ struct rme32 { }; static const struct pci_device_id snd_rme32_ids[] = { - {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32), 0,}, - {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8), 0,}, - {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO), 0,}, - {0,} + { PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32) }, + { PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8) }, + { PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_rme32_ids); diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c index 58b8ebf1a24e7d..892fcc5985575d 100644 --- a/sound/pci/rme96.c +++ b/sound/pci/rme96.c @@ -242,11 +242,11 @@ struct rme96 { }; static const struct pci_device_id snd_rme96_ids[] = { - { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96), 0, }, - { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8), 0, }, - { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO), 0, }, - { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST), 0, }, - { 0, } + { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96) }, + { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8) }, + { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO) }, + { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_rme96_ids); diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c index a4c72799d0348d..a885d544acd654 100644 --- a/sound/pci/sonicvibes.c +++ b/sound/pci/sonicvibes.c @@ -227,8 +227,8 @@ struct sonicvibes { }; static const struct pci_device_id snd_sonic_ids[] = { - { PCI_VDEVICE(S3, 0xca00), 0, }, - { 0, } + { PCI_VDEVICE(S3, 0xca00) }, + { } }; MODULE_DEVICE_TABLE(pci, snd_sonic_ids); diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c index ddb6ccc72e443d..8dcbd022ca1fc5 100644 --- a/sound/pci/trident/trident.c +++ b/sound/pci/trident/trident.c @@ -36,12 +36,16 @@ module_param_array(wavetable_size, int, NULL, 0444); MODULE_PARM_DESC(wavetable_size, "Maximum memory size in kB for wavetable synth."); static const struct pci_device_id snd_trident_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX), - PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, - {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX), - 0, 0, 0}, - {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, 0}, - { 0, } + { + PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX), + .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, + .class_mask = 0xffff00, + }, { + PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX), + }, { + PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018) + }, + { } }; MODULE_DEVICE_TABLE(pci, snd_trident_ids); diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c index 41b322fbd9efd5..24ee1302f27d33 100644 --- a/sound/pci/via82xx.c +++ b/sound/pci/via82xx.c @@ -389,10 +389,10 @@ struct via82xx { static const struct pci_device_id snd_via82xx_ids[] = { /* 0x1106, 0x3058 */ - { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), TYPE_CARD_VIA686, }, /* 686A */ + { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), .driver_data = TYPE_CARD_VIA686 }, /* 686A */ /* 0x1106, 0x3059 */ - { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), TYPE_CARD_VIA8233, }, /* VT8233 */ - { 0, } + { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), .driver_data = TYPE_CARD_VIA8233 }, /* VT8233 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_via82xx_ids); diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c index a6f176d612e3af..9b84d3fb9eaf5b 100644 --- a/sound/pci/via82xx_modem.c +++ b/sound/pci/via82xx_modem.c @@ -246,8 +246,8 @@ struct via82xx_modem { }; static const struct pci_device_id snd_via82xx_modem_ids[] = { - { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, }, - { 0, } + { PCI_VDEVICE(VIA, 0x3068), .driver_data = TYPE_CARD_VIA82XX_MODEM }, + { } }; MODULE_DEVICE_TABLE(pci, snd_via82xx_modem_ids); diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c index 693a4e471cf7f6..0b89ca859e667d 100644 --- a/sound/pci/vx222/vx222.c +++ b/sound/pci/vx222/vx222.c @@ -47,9 +47,9 @@ enum { }; static const struct pci_device_id snd_vx222_ids[] = { - { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ - { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ - { 0, } + { PCI_DEVICE_SUB(0x10b5, 0x9050, 0x1369, PCI_ANY_ID), .driver_data = VX_PCI_VX222_OLD }, /* PLX */ + { PCI_DEVICE_SUB(0x10b5, 0x9030, 0x1369, PCI_ANY_ID), .driver_data = VX_PCI_VX222_NEW }, /* PLX */ + { } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c index 764ca59e98d1d3..d3fb047c9a450a 100644 --- a/sound/pci/ymfpci/ymfpci.c +++ b/sound/pci/ymfpci/ymfpci.c @@ -46,13 +46,13 @@ module_param_array(rear_switch, bool, NULL, 0444); MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); static const struct pci_device_id snd_ymfpci_ids[] = { - { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */ - { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */ - { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */ - { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */ - { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */ - { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */ - { 0, } + { PCI_VDEVICE(YAMAHA, 0x0004) }, /* YMF724 */ + { PCI_VDEVICE(YAMAHA, 0x000d) }, /* YMF724F */ + { PCI_VDEVICE(YAMAHA, 0x000a) }, /* YMF740 */ + { PCI_VDEVICE(YAMAHA, 0x000c) }, /* YMF740C */ + { PCI_VDEVICE(YAMAHA, 0x0010) }, /* YMF744 */ + { PCI_VDEVICE(YAMAHA, 0x0012) }, /* YMF754 */ + { } }; MODULE_DEVICE_TABLE(pci, snd_ymfpci_ids); diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index c5cf45881416f3..7a637d6b557644 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -59,6 +59,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15-fc0xxx"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Gaming Laptop 16-ap0xxx"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -479,6 +486,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VE"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -668,6 +682,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_BOARD_NAME, "8EE4"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8E35"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index c43861b7d41ed4..069ec05e4a6152 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -264,6 +264,7 @@ config SND_SOC_ALL_CODECS imply SND_SOC_STA529 imply SND_SOC_STAC9766 imply SND_SOC_STI_SAS + imply SND_SOC_TAC5XX2_SDW imply SND_SOC_TAS2552 imply SND_SOC_TAS2562 imply SND_SOC_TAS2764 @@ -2140,6 +2141,16 @@ config SND_SOC_STAC9766 config SND_SOC_STI_SAS tristate "codec Audio support for STI SAS codec" +config SND_SOC_TAC5XX2_SDW + tristate "Texas Instruments TAC5XX2 SoundWire Smart Amplifier" + depends on SOUNDWIRE + depends on SND_SOC_SDCA + help + This option enables support for Texas Instruments TAC5XX2 family + of SoundWire Smart Amplifiers. This includes TAC5572, TAC5672, + TAC5682 and TAS2883. To compile this driver as a module, choose + M here: the module will be called snd-soc-tac5xx2. + config SND_SOC_TAS2552 tristate "Texas Instruments TAS2552 Mono Audio amplifier" depends on I2C diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 106fdc140d42be..2c2d0553f35fbb 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -313,6 +313,7 @@ snd-soc-sta350-y := sta350.o snd-soc-sta529-y := sta529.o snd-soc-stac9766-y := stac9766.o snd-soc-sti-sas-y := sti-sas.o +snd-soc-tac5xx2-sdw-y := tac5xx2-sdw.o snd-soc-tas5086-y := tas5086.o snd-soc-tas571x-y := tas571x.o snd-soc-tas5720-y := tas5720.o @@ -747,6 +748,7 @@ obj-$(CONFIG_SND_SOC_STA350) += snd-soc-sta350.o obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o +obj-$(CONFIG_SND_SOC_TAC5XX2_SDW) += snd-soc-tac5xx2-sdw.o obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o obj-$(CONFIG_SND_SOC_TAS2562) += snd-soc-tas2562.o obj-$(CONFIG_SND_SOC_TAS2764) += snd-soc-tas2764.o diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c index fdda1b747bf7e5..6e8ef9cd1b31a7 100644 --- a/sound/soc/codecs/ab8500-codec.c +++ b/sound/soc/codecs/ab8500-codec.c @@ -60,19 +60,6 @@ low before proceeding with the configuration sequence */ #define AB8500_ANC_SM_DELAY 2000 -#define AB8500_FILTER_CONTROL(xname, xcount, xmin, xmax) \ -{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ - .info = filter_control_info, \ - .get = filter_control_get, .put = filter_control_put, \ - .private_value = (unsigned long)&(struct filter_control) \ - {.count = xcount, .min = xmin, .max = xmax} } - -struct filter_control { - long min, max; - unsigned int count; - long value[128]; -}; - /* Sidetone states */ static const char * const enum_sid_state[] = { "Unconfigured", @@ -85,45 +72,13 @@ enum sid_state { SID_FIR_CONFIGURED = 2, }; -static const char * const enum_anc_state[] = { - "Unconfigured", - "Apply FIR and IIR", - "FIR and IIR are configured", - "Apply FIR", - "FIR is configured", - "Apply IIR", - "IIR is configured" -}; -enum anc_state { - ANC_UNCONFIGURED = 0, - ANC_APPLY_FIR_IIR = 1, - ANC_FIR_IIR_CONFIGURED = 2, - ANC_APPLY_FIR = 3, - ANC_FIR_CONFIGURED = 4, - ANC_APPLY_IIR = 5, - ANC_IIR_CONFIGURED = 6 -}; - -/* Analog microphones */ -enum amic_idx { - AMIC_IDX_1A, - AMIC_IDX_1B, - AMIC_IDX_2 -}; - /* Private data for AB8500 device-driver */ struct ab8500_codec_drvdata { struct regmap *regmap; struct mutex ctrl_lock; /* Sidetone */ - long *sid_fir_values; enum sid_state sid_status; - - /* ANC */ - long *anc_fir_values; - long *anc_iir_values; - enum anc_state anc_status; }; static inline const char *amic_micbias_str(enum amic_micbias micbias) @@ -1024,89 +979,6 @@ static const struct snd_soc_dapm_route ab8500_dapm_routes_mic2_vamicx[] = { {"MIC2 V-AMICx Enable", NULL, "V-AMIC2"}, }; -/* ANC FIR-coefficients configuration sequence */ -static void anc_fir(struct snd_soc_component *component, - unsigned int bnk, unsigned int par, unsigned int val) -{ - if (par == 0 && bnk == 0) - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCFIRUPDATE), - BIT(AB8500_ANCCONF1_ANCFIRUPDATE)); - - snd_soc_component_write(component, AB8500_ANCCONF5, val >> 8 & 0xff); - snd_soc_component_write(component, AB8500_ANCCONF6, val & 0xff); - - if (par == AB8500_ANC_FIR_COEFFS - 1 && bnk == 1) - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCFIRUPDATE), 0); -} - -/* ANC IIR-coefficients configuration sequence */ -static void anc_iir(struct snd_soc_component *component, unsigned int bnk, - unsigned int par, unsigned int val) -{ - if (par == 0) { - if (bnk == 0) { - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCIIRINIT), - BIT(AB8500_ANCCONF1_ANCIIRINIT)); - usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY*2); - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCIIRINIT), 0); - usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY*2); - } else { - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCIIRUPDATE), - BIT(AB8500_ANCCONF1_ANCIIRUPDATE)); - } - } else if (par > 3) { - snd_soc_component_write(component, AB8500_ANCCONF7, 0); - snd_soc_component_write(component, AB8500_ANCCONF8, val >> 16 & 0xff); - } - - snd_soc_component_write(component, AB8500_ANCCONF7, val >> 8 & 0xff); - snd_soc_component_write(component, AB8500_ANCCONF8, val & 0xff); - - if (par == AB8500_ANC_IIR_COEFFS - 1 && bnk == 1) - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ANCIIRUPDATE), 0); -} - -/* ANC IIR-/FIR-coefficients configuration sequence */ -static void anc_configure(struct snd_soc_component *component, - bool apply_fir, bool apply_iir) -{ - struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(component->dev); - unsigned int bnk, par, val; - - dev_dbg(component->dev, "%s: Enter.\n", __func__); - - if (apply_fir) - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ENANC), 0); - - snd_soc_component_update_bits(component, AB8500_ANCCONF1, - BIT(AB8500_ANCCONF1_ENANC), BIT(AB8500_ANCCONF1_ENANC)); - - if (apply_fir) - for (bnk = 0; bnk < AB8500_NR_OF_ANC_COEFF_BANKS; bnk++) - for (par = 0; par < AB8500_ANC_FIR_COEFFS; par++) { - val = snd_soc_component_read(component, - drvdata->anc_fir_values[par]); - anc_fir(component, bnk, par, val); - } - - if (apply_iir) - for (bnk = 0; bnk < AB8500_NR_OF_ANC_COEFF_BANKS; bnk++) - for (par = 0; par < AB8500_ANC_IIR_COEFFS; par++) { - val = snd_soc_component_read(component, - drvdata->anc_iir_values[par]); - anc_iir(component, bnk, par, val); - } - - dev_dbg(component->dev, "%s: Exit.\n", __func__); -} - /* * Control-events */ @@ -1130,7 +1002,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol, { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(component->dev); - unsigned int param, sidconf, val; + unsigned int param, sidconf; int status = 1; dev_dbg(component->dev, "%s: Enter\n", __func__); @@ -1159,9 +1031,8 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol, snd_soc_component_write(component, AB8500_SIDFIRADR, 0); for (param = 0; param < AB8500_SID_FIR_COEFFS; param++) { - val = snd_soc_component_read(component, drvdata->sid_fir_values[param]); - snd_soc_component_write(component, AB8500_SIDFIRCOEF1, val >> 8 & 0xff); - snd_soc_component_write(component, AB8500_SIDFIRCOEF2, val & 0xff); + snd_soc_component_write(component, AB8500_SIDFIRCOEF1, 0); + snd_soc_component_write(component, AB8500_SIDFIRCOEF2, 0); } snd_soc_component_update_bits(component, AB8500_SIDFIRADR, @@ -1180,136 +1051,6 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol, return status; } -static int anc_status_control_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(component->dev); - - mutex_lock(&drvdata->ctrl_lock); - ucontrol->value.enumerated.item[0] = drvdata->anc_status; - mutex_unlock(&drvdata->ctrl_lock); - - return 0; -} - -static int anc_status_control_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); - struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(component->dev); - struct device *dev = component->dev; - bool apply_fir, apply_iir; - unsigned int req; - int status; - - dev_dbg(dev, "%s: Enter.\n", __func__); - - mutex_lock(&drvdata->ctrl_lock); - - req = ucontrol->value.enumerated.item[0]; - if (req >= ARRAY_SIZE(enum_anc_state)) { - status = -EINVAL; - goto cleanup; - } - if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR && - req != ANC_APPLY_IIR) { - dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n", - __func__, enum_anc_state[req]); - status = -EINVAL; - goto cleanup; - } - apply_fir = req == ANC_APPLY_FIR || req == ANC_APPLY_FIR_IIR; - apply_iir = req == ANC_APPLY_IIR || req == ANC_APPLY_FIR_IIR; - - status = snd_soc_dapm_force_enable_pin(dapm, "ANC Configure Input"); - if (status < 0) { - dev_err(dev, - "%s: ERROR: Failed to enable power (status = %d)!\n", - __func__, status); - goto cleanup; - } - snd_soc_dapm_sync(dapm); - - anc_configure(component, apply_fir, apply_iir); - - if (apply_fir) { - if (drvdata->anc_status == ANC_IIR_CONFIGURED) - drvdata->anc_status = ANC_FIR_IIR_CONFIGURED; - else if (drvdata->anc_status != ANC_FIR_IIR_CONFIGURED) - drvdata->anc_status = ANC_FIR_CONFIGURED; - } - if (apply_iir) { - if (drvdata->anc_status == ANC_FIR_CONFIGURED) - drvdata->anc_status = ANC_FIR_IIR_CONFIGURED; - else if (drvdata->anc_status != ANC_FIR_IIR_CONFIGURED) - drvdata->anc_status = ANC_IIR_CONFIGURED; - } - - status = snd_soc_dapm_disable_pin(dapm, "ANC Configure Input"); - snd_soc_dapm_sync(dapm); - -cleanup: - mutex_unlock(&drvdata->ctrl_lock); - - if (status < 0) - dev_err(dev, "%s: Unable to configure ANC! (status = %d)\n", - __func__, status); - - dev_dbg(dev, "%s: Exit.\n", __func__); - - return (status < 0) ? status : 1; -} - -static int filter_control_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct filter_control *fc = - (struct filter_control *)kcontrol->private_value; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - uinfo->count = fc->count; - uinfo->value.integer.min = fc->min; - uinfo->value.integer.max = fc->max; - - return 0; -} - -static int filter_control_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct ab8500_codec_drvdata *drvdata = snd_soc_component_get_drvdata(component); - struct filter_control *fc = - (struct filter_control *)kcontrol->private_value; - unsigned int i; - - mutex_lock(&drvdata->ctrl_lock); - for (i = 0; i < fc->count; i++) - ucontrol->value.integer.value[i] = fc->value[i]; - mutex_unlock(&drvdata->ctrl_lock); - - return 0; -} - -static int filter_control_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct ab8500_codec_drvdata *drvdata = snd_soc_component_get_drvdata(component); - struct filter_control *fc = - (struct filter_control *)kcontrol->private_value; - unsigned int i; - - mutex_lock(&drvdata->ctrl_lock); - for (i = 0; i < fc->count; i++) - fc->value[i] = ucontrol->value.integer.value[i]; - mutex_unlock(&drvdata->ctrl_lock); - - return 0; -} - /* * Controls - Non-DAPM ASoC */ @@ -1597,7 +1338,6 @@ static SOC_ENUM_SINGLE_DECL(soc_enum_bfifomast, static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_sidstate, enum_sid_state); /* ANC */ -static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_ancstate, enum_anc_state); static struct snd_kcontrol_new ab8500_ctrls[] = { /* Charge pump */ @@ -1873,8 +1613,6 @@ static struct snd_kcontrol_new ab8500_ctrls[] = { AB8500_FIFOCONF6_BFIFOSAMPLE_MAX, 0), /* ANC */ - SOC_ENUM_EXT("ANC Status", soc_enum_ancstate, - anc_status_control_get, anc_status_control_put), SOC_SINGLE_XR_SX("ANC Warp Delay Shift", AB8500_ANCCONF2, 1, AB8500_ANCCONF2_SHIFT, AB8500_ANCCONF2_MIN, AB8500_ANCCONF2_MAX, 0), @@ -1895,21 +1633,6 @@ static struct snd_kcontrol_new ab8500_ctrls[] = { AB8500_SIDFIRADR, AB8500_SIDFIRADR_FIRSIDSET, 0), }; -static struct snd_kcontrol_new ab8500_filter_controls[] = { - AB8500_FILTER_CONTROL("ANC FIR Coefficients", AB8500_ANC_FIR_COEFFS, - AB8500_ANC_FIR_COEFF_MIN, AB8500_ANC_FIR_COEFF_MAX), - AB8500_FILTER_CONTROL("ANC IIR Coefficients", AB8500_ANC_IIR_COEFFS, - AB8500_ANC_IIR_COEFF_MIN, AB8500_ANC_IIR_COEFF_MAX), - AB8500_FILTER_CONTROL("Sidetone FIR Coefficients", - AB8500_SID_FIR_COEFFS, AB8500_SID_FIR_COEFF_MIN, - AB8500_SID_FIR_COEFF_MAX) -}; -enum ab8500_filter { - AB8500_FILTER_ANC_FIR = 0, - AB8500_FILTER_ANC_IIR = 1, - AB8500_FILTER_SID_FIR = 2, -}; - /* * Extended interface for codec-driver */ @@ -2454,7 +2177,6 @@ static int ab8500_codec_probe(struct snd_soc_component *component) struct device_node *np = dev->of_node; struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(dev); struct ab8500_codec_platform_data codec_pdata; - struct filter_control *fc; int status; dev_dbg(dev, "%s: Enter.\n", __func__); @@ -2486,25 +2208,6 @@ static int ab8500_codec_probe(struct snd_soc_component *component) snd_soc_component_write(component, AB8500_SHORTCIRCONF, BIT(AB8500_SHORTCIRCONF_HSZCDDIS)); - /* Add filter controls */ - status = snd_soc_add_component_controls(component, ab8500_filter_controls, - ARRAY_SIZE(ab8500_filter_controls)); - if (status < 0) { - dev_err(dev, - "%s: failed to add ab8500 filter controls (%d).\n", - __func__, status); - return status; - } - fc = (struct filter_control *) - &ab8500_filter_controls[AB8500_FILTER_ANC_FIR].private_value; - drvdata->anc_fir_values = (long *)fc->value; - fc = (struct filter_control *) - &ab8500_filter_controls[AB8500_FILTER_ANC_IIR].private_value; - drvdata->anc_iir_values = (long *)fc->value; - fc = (struct filter_control *) - &ab8500_filter_controls[AB8500_FILTER_SID_FIR].private_value; - drvdata->sid_fir_values = (long *)fc->value; - snd_soc_dapm_disable_pin(dapm, "ANC Configure Input"); mutex_init(&drvdata->ctrl_lock); @@ -2538,7 +2241,6 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev) if (!drvdata) return -ENOMEM; drvdata->sid_status = SID_UNCONFIGURED; - drvdata->anc_status = ANC_UNCONFIGURED; dev_set_drvdata(&pdev->dev, drvdata); drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev, diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index e05d975ba7945b..795e2764d67ec8 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -108,8 +108,6 @@ int cs35l56_set_patch(struct cs35l56_base *cs35l56_base) EXPORT_SYMBOL_NS_GPL(cs35l56_set_patch, "SND_SOC_CS35L56_SHARED"); static const struct reg_default cs35l56_reg_defaults[] = { - /* no defaults for OTP_MEM - first read populates cache */ - { CS35L56_ASP1_ENABLES1, 0x00000000 }, { CS35L56_ASP1_CONTROL1, 0x00000028 }, { CS35L56_ASP1_CONTROL2, 0x18180200 }, @@ -138,8 +136,6 @@ static const struct reg_default cs35l56_reg_defaults[] = { }; static const struct reg_default cs35l63_reg_defaults[] = { - /* no defaults for OTP_MEM - first read populates cache */ - { CS35L56_ASP1_ENABLES1, 0x00000000 }, { CS35L56_ASP1_CONTROL1, 0x00000028 }, { CS35L56_ASP1_CONTROL2, 0x18180200 }, @@ -282,6 +278,9 @@ static bool cs35l56_common_volatile_reg(unsigned int reg) case CS35L56_GLOBAL_ENABLES: /* owned by firmware */ case CS35L56_BLOCK_ENABLES: /* owned by firmware */ case CS35L56_BLOCK_ENABLES2: /* owned by firmware */ + case CS35L56_OTP_MEM_53: + case CS35L56_OTP_MEM_54: + case CS35L56_OTP_MEM_55: case CS35L56_SYNC_GPIO1_CFG ... CS35L56_ASP2_DIO_GPIO13_CFG: case CS35L56_UPDATE_REGS: case CS35L56_REFCLK_INPUT: /* owned by firmware */ @@ -852,9 +851,11 @@ int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_sou err: regcache_cache_only(cs35l56_base->regmap, true); - regmap_multi_reg_write_bypassed(cs35l56_base->regmap, - cs35l56_hibernate_seq, - ARRAY_SIZE(cs35l56_hibernate_seq)); + if (cs35l56_base->can_hibernate) { + regmap_multi_reg_write_bypassed(cs35l56_base->regmap, + cs35l56_hibernate_seq, + ARRAY_SIZE(cs35l56_hibernate_seq)); + } return ret; } @@ -1729,8 +1730,7 @@ int cs35l56_read_onchip_spkid(struct cs35l56_base *cs35l56_base) ret = regmap_read(regmap, CS35L56_GPIO_STATUS1, &val); if (ret) { - dev_err(cs35l56_base->dev, "GPIO%d status read failed: %d\n", - cs35l56_base->onchip_spkid_gpios[i] + 1, ret); + dev_err(cs35l56_base->dev, "GPIO status read failed: %d\n", ret); return ret; } diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c index 378017fcea10f8..849d70ca23d6f5 100644 --- a/sound/soc/codecs/cs35l56.c +++ b/sound/soc/codecs/cs35l56.c @@ -1956,9 +1956,9 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) goto err; } - ret = devm_snd_soc_register_component(cs35l56->base.dev, - &soc_component_dev_cs35l56, - cs35l56_dai, ARRAY_SIZE(cs35l56_dai)); + ret = snd_soc_register_component(cs35l56->base.dev, + &soc_component_dev_cs35l56, + cs35l56_dai, ARRAY_SIZE(cs35l56_dai)); if (ret < 0) { dev_err_probe(cs35l56->base.dev, ret, "Register codec failed\n"); goto err; @@ -1970,6 +1970,9 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies); + if (cs35l56->dsp_wq) + destroy_workqueue(cs35l56->dsp_wq); + return ret; } EXPORT_SYMBOL_NS_GPL(cs35l56_common_probe, "SND_SOC_CS35L56_CORE"); @@ -2057,6 +2060,8 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_init, "SND_SOC_CS35L56_CORE"); void cs35l56_remove(struct cs35l56_private *cs35l56) { + snd_soc_unregister_component(cs35l56->base.dev); + cs35l56->base.init_done = false; /* diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index dd3528537ae42a..80964d58bb87d6 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c @@ -712,8 +712,8 @@ static void nau8825_xtalk_measure(struct nau8825 *nau8825) /* In left headphone IMM state, read out left headphone * impedance measure result, and delay some time to wait * detection sine wave output finish. Then, we can calculate - * the cross talk suppresstion side tone according to the L/R - * headphone imedance. + * the cross talk suppression side tone according to the L/R + * headphone impedance. */ regmap_read(nau8825->regmap, NAU8825_REG_IMM_RMS_L, &nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]); @@ -2237,7 +2237,7 @@ static void nau8825_component_remove(struct snd_soc_component *component) { struct nau8825 *nau8825 = snd_soc_component_get_drvdata(component); - /* Cancel and reset cross tak suppresstion detection funciton */ + /* Cancel and reset cross talk suppression detection function */ nau8825_xtalk_cancel(nau8825); } @@ -2651,7 +2651,7 @@ static int nau8825_set_bias_level(struct snd_soc_component *component, /* ground HPL/HPR, MICGRND1/2 */ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSD_CTRL, 0xf, 0xf); - /* Cancel and reset cross talk detection funciton */ + /* Cancel and reset cross talk detection function */ nau8825_xtalk_cancel(nau8825); /* Turn off all interruptions before system shutdown. Keep the * interruption quiet before resume setup completes. diff --git a/sound/soc/codecs/tac5xx2-sdw.c b/sound/soc/codecs/tac5xx2-sdw.c new file mode 100644 index 00000000000000..917b36ac1cd3b4 --- /dev/null +++ b/sound/soc/codecs/tac5xx2-sdw.c @@ -0,0 +1,2047 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// ALSA SoC Texas Instruments TAC5XX2 Audio Smart Amplifier +// +// Copyright (C) 2025 Texas Instruments Incorporated +// https://www.ti.com +// +// Author: Niranjan H Y + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tac5xx2.h" + +#define TAC5XX2_PROBE_TIMEOUT_MS 3000 +#define TAC5XX2_FW_CACHE_TIMEOUT_MS 300 + +#define TAC5XX2_DEVICE_RATES (SNDRV_PCM_RATE_44100 | \ + SNDRV_PCM_RATE_48000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_88200) +#define TAC5XX2_DEVICE_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) +/* Define channel constants */ +#define TAC_CHANNEL_LEFT 1 +#define TAC_CHANNEL_RIGHT 2 +#define TAC_JACK_MONO_CS 2 + +#define TAC_MUTE_REG(func, fu, ch) \ + SDW_SDCA_CTL(TAC_FUNCTION_ID_##func, TAC_SDCA_ENT_##fu, \ + TAC_SDCA_CHANNEL_MUTE, TAC_CHANNEL_##ch) +#define TAC_USAGE_REG(func, ent) \ + SDW_SDCA_CTL(TAC_FUNCTION_ID_##func, TAC_SDCA_ENT_##ent, \ + TAC_SDCA_CTL_USAGE, 0) +#define TAC_XU_BYPASS_REG(func, xu) \ + SDW_SDCA_CTL(TAC_FUNCTION_ID_##func, TAC_SDCA_ENT_##xu, \ + TAC_SDCA_CTL_XU_BYPASS, 0) + +/* mute registers */ +#define FU21_L_MUTE_REG TAC_MUTE_REG(SA, FU21, LEFT) +#define FU21_R_MUTE_REG TAC_MUTE_REG(SA, FU21, RIGHT) +#define FU23_L_MUTE_REG TAC_MUTE_REG(SA, FU23, LEFT) +#define FU23_R_MUTE_REG TAC_MUTE_REG(SA, FU23, RIGHT) +#define FU26_MUTE_REG TAC_MUTE_REG(SA, FU26, LEFT) +#define FU11_L_MUTE_REG TAC_MUTE_REG(SM, FU11, LEFT) +#define FU11_R_MUTE_REG TAC_MUTE_REG(SM, FU11, RIGHT) +#define FU113_L_MUTE_REG TAC_MUTE_REG(SM, FU113, LEFT) +#define FU113_R_MUTE_REG TAC_MUTE_REG(SM, FU113, RIGHT) +#define FU41_L_MUTE_REG TAC_MUTE_REG(UAJ, FU41, LEFT) +#define FU41_R_MUTE_REG TAC_MUTE_REG(UAJ, FU41, RIGHT) +#define FU36_MUTE_REG TAC_MUTE_REG(UAJ, FU36, RIGHT) + +/* it/ot usage */ +#define IT11_USAGE_REG TAC_USAGE_REG(SM, IT11) +#define IT41_USAGE_REG TAC_USAGE_REG(UAJ, IT41) +#define IT33_USAGE_REG TAC_USAGE_REG(UAJ, IT33) +#define OT113_USAGE_REG TAC_USAGE_REG(SM, OT113) +#define OT45_USAGE_REG TAC_USAGE_REG(UAJ, OT45) +#define OT36_USAGE_REG TAC_USAGE_REG(UAJ, OT36) + +/* xu bypass */ +#define XU12_BYPASS_REG TAC_XU_BYPASS_REG(SM, XU12) +#define XU42_BYPASS_REG TAC_XU_BYPASS_REG(UAJ, XU42) + +#define TAC_DSP_ALGO_STATUS TAC_REG_SDW(0, 3, 12) +#define TAC_DSP_ALGO_STATUS_RUNNING 0x20 +#define TAC_FW_HDR_SIZE 88 +#define TAC_FW_FILE_HDR 20 +#define TAC_MAX_FW_CHUNKS 512 + +struct tac_fw_hdr { + u32 size; + u32 version_offset; + u32 plt_id; + u32 ppc3_ver; + u64 timestamp; + u8 ddc_name[64]; +}; + +/* Firmware file/chunk structure */ +struct tac_fw_file { + u32 vendor_id; + u32 file_id; + u32 version; + u32 length; + u32 dest_addr; + u8 *fw_data; +}; + +/* TLV for volume control */ +static const DECLARE_TLV_DB_SCALE(tac5xx2_amp_tlv, 0, 50, 0); +static const DECLARE_TLV_DB_SCALE(tac5xx2_dvc_tlv, -7200, 50, 0); + +/* Q7.8 volume control parameters: range -72dB to +6dB, step 0.5dB */ +#define TAC_DVC_STEP 128 /* 0.5 dB in Q7.8 format */ +#define TAC_DVC_MIN (-144) /* -72 dB / 0.5 dB step */ +#define TAC_DVC_MAX 12 /* +6 dB / 0.5 dB step */ + +/* TAC-specific stereo volume control macro using SDW_SDCA_CTL (single control for L/R) */ +#define TAC_DOUBLE_Q78_TLV(name, func_id, ent_id) \ + SDCA_DOUBLE_Q78_TLV(name, \ + SDW_SDCA_CTL(TAC_FUNCTION_ID_##func_id, TAC_SDCA_ENT_##ent_id, \ + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), \ + SDW_SDCA_CTL(TAC_FUNCTION_ID_##func_id, TAC_SDCA_ENT_##ent_id, \ + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), \ + TAC_DVC_MIN, TAC_DVC_MAX, TAC_DVC_STEP, tac5xx2_dvc_tlv) + +struct tac5xx2_prv { + struct snd_soc_component *component; + struct sdw_slave *sdw_peripheral; + struct sdca_function_data *sa_func_data; + struct sdca_function_data *sm_func_data; + struct sdca_function_data *uaj_func_data; + struct sdca_function_data *hid_func_data; + enum sdw_slave_status status; + struct regmap *regmap; + struct device *dev; + bool hw_init; + bool first_hw_init_done; + u32 part_id; + struct snd_soc_jack *hs_jack; + int jack_type; + /* Custom fw binary. UMP File Download is not used. */ + unsigned int fw_file_cnt; + struct tac_fw_file *fw_files; + struct completion fw_caching_complete; + bool fw_dl_success; + u8 fw_binaryname[64]; +}; + +static const struct reg_default tac_reg_default[] = { + {TAC_SW_RESET, 0x0}, + {TAC_SLEEP_MODEZ, 0x0}, + {TAC_FEATURE_PDZ, 0x0}, + {TAC_TX_CH_EN, 0xf0}, + {TAC_REG_SDW(0, 0, 0x5), 0xcf}, + {TAC_REG_SDW(0, 0, 0x6), 0xa}, + {TAC_REG_SDW(0, 0, 0x7), 0x0}, + {TAC_REG_SDW(0, 0, 0x8), 0xfe}, + {TAC_REG_SDW(0, 0, 0x9), 0x9}, + {TAC_REG_SDW(0, 0, 0xa), 0x28}, + {TAC_REG_SDW(0, 0, 0xb), 0x1}, + {TAC_REG_SDW(0, 0, 0xc), 0x11}, + {TAC_REG_SDW(0, 0, 0xd), 0x11}, + {TAC_REG_SDW(0, 0, 0xe), 0x61}, + {TAC_REG_SDW(0, 0, 0xf), 0x0}, + {TAC_REG_SDW(0, 0, 0x10), 0x50}, + {TAC_REG_SDW(0, 0, 0x11), 0x70}, + {TAC_REG_SDW(0, 0, 0x12), 0x60}, + {TAC_REG_SDW(0, 0, 0x13), 0x28}, + {TAC_REG_SDW(0, 0, 0x14), 0x0}, + {TAC_REG_SDW(0, 0, 0x15), 0x18}, + {TAC_REG_SDW(0, 0, 0x16), 0x20}, + {TAC_REG_SDW(0, 0, 0x17), 0x0}, + {TAC_REG_SDW(0, 0, 0x18), 0x18}, + {TAC_REG_SDW(0, 0, 0x19), 0x54}, + {TAC_REG_SDW(0, 0, 0x1a), 0x8}, + {TAC_REG_SDW(0, 0, 0x1b), 0x0}, + {TAC_REG_SDW(0, 0, 0x1c), 0x30}, + {TAC_REG_SDW(0, 0, 0x1d), 0x0}, + {TAC_REG_SDW(0, 0, 0x1e), 0x0}, + {TAC_REG_SDW(0, 0, 0x1f), 0x0}, + {TAC_REG_SDW(0, 0, 0x20), 0x0}, + {TAC_REG_SDW(0, 0, 0x21), 0x20}, + {TAC_REG_SDW(0, 0, 0x22), 0x21}, + {TAC_REG_SDW(0, 0, 0x23), 0x22}, + {TAC_REG_SDW(0, 0, 0x24), 0x23}, + {TAC_REG_SDW(0, 0, 0x25), 0x4}, + {TAC_REG_SDW(0, 0, 0x26), 0x5}, + {TAC_REG_SDW(0, 0, 0x27), 0x6}, + {TAC_REG_SDW(0, 0, 0x28), 0x7}, + {TAC_REG_SDW(0, 0, 0x29), 0x0}, + {TAC_REG_SDW(0, 0, 0x2a), 0x0}, + {TAC_REG_SDW(0, 0, 0x2b), 0x0}, + {TAC_REG_SDW(0, 0, 0x2c), 0x20}, + {TAC_REG_SDW(0, 0, 0x2d), 0x21}, + {TAC_REG_SDW(0, 0, 0x2e), 0x2}, + {TAC_REG_SDW(0, 0, 0x2f), 0x3}, + {TAC_REG_SDW(0, 0, 0x30), 0x4}, + {TAC_REG_SDW(0, 0, 0x31), 0x5}, + {TAC_REG_SDW(0, 0, 0x32), 0x6}, + {TAC_REG_SDW(0, 0, 0x33), 0x7}, + {TAC_REG_SDW(0, 0, 0x34), 0x0}, + {TAC_REG_SDW(0, 0, 0x35), 0x90}, + {TAC_REG_SDW(0, 0, 0x36), 0x80}, + {TAC_REG_SDW(0, 0, 0x37), 0x0}, + {TAC_REG_SDW(0, 0, 0x39), 0x0}, + {TAC_REG_SDW(0, 0, 0x3a), 0x90}, + {TAC_REG_SDW(0, 0, 0x3b), 0x80}, + {TAC_REG_SDW(0, 0, 0x3c), 0x0}, + {TAC_REG_SDW(0, 0, 0x3e), 0x0}, + {TAC_REG_SDW(0, 0, 0x3f), 0x90}, + {TAC_REG_SDW(0, 0, 0x40), 0x80}, + {TAC_REG_SDW(0, 0, 0x41), 0x0}, + {TAC_REG_SDW(0, 0, 0x43), 0x90}, + {TAC_REG_SDW(0, 0, 0x44), 0x80}, + {TAC_REG_SDW(0, 0, 0x45), 0x0}, + {TAC_REG_SDW(0, 0, 0x47), 0x90}, + {TAC_REG_SDW(0, 0, 0x48), 0x80}, + {TAC_REG_SDW(0, 0, 0x49), 0x0}, + {TAC_REG_SDW(0, 0, 0x4b), 0x90}, + {TAC_REG_SDW(0, 0, 0x4c), 0x80}, + {TAC_REG_SDW(0, 0, 0x4d), 0x0}, + {TAC_REG_SDW(0, 0, 0x4f), 0x31}, + {TAC_REG_SDW(0, 0, 0x50), 0x0}, + {TAC_REG_SDW(0, 0, 0x51), 0x0}, + {TAC_REG_SDW(0, 0, 0x52), 0x90}, + {TAC_REG_SDW(0, 0, 0x53), 0x80}, + {TAC_REG_SDW(0, 0, 0x55), 0x90}, + {TAC_REG_SDW(0, 0, 0x56), 0x80}, + {TAC_REG_SDW(0, 0, 0x58), 0x90}, + {TAC_REG_SDW(0, 0, 0x59), 0x80}, + {TAC_REG_SDW(0, 0, 0x5b), 0x90}, + {TAC_REG_SDW(0, 0, 0x5c), 0x80}, + {TAC_REG_SDW(0, 0, 0x5e), 0x8}, + {TAC_REG_SDW(0, 0, 0x5f), 0x8}, + {TAC_REG_SDW(0, 0, 0x60), 0x0}, + {TAC_REG_SDW(0, 0, 0x61), 0x0}, + {TAC_REG_SDW(0, 0, 0x62), 0xff}, + {TAC_REG_SDW(0, 0, 0x63), 0xc0}, + {TAC_REG_SDW(0, 0, 0x64), 0x5}, + {TAC_REG_SDW(0, 0, 0x65), 0x3}, + {TAC_REG_SDW(0, 0, 0x66), 0x0}, + {TAC_REG_SDW(0, 0, 0x67), 0x0}, + {TAC_REG_SDW(0, 0, 0x68), 0x0}, + {TAC_REG_SDW(0, 0, 0x69), 0x8}, + {TAC_REG_SDW(0, 0, 0x6a), 0x0}, + {TAC_REG_SDW(0, 0, 0x6b), 0xa0}, + {TAC_REG_SDW(0, 0, 0x6c), 0x18}, + {TAC_REG_SDW(0, 0, 0x6d), 0x18}, + {TAC_REG_SDW(0, 0, 0x6e), 0x18}, + {TAC_REG_SDW(0, 0, 0x6f), 0x18}, + {TAC_REG_SDW(0, 0, 0x70), 0x88}, + {TAC_REG_SDW(0, 0, 0x71), 0xff}, + {TAC_REG_SDW(0, 0, 0x72), 0x0}, + {TAC_REG_SDW(0, 0, 0x73), 0x31}, + {TAC_REG_SDW(0, 0, 0x74), 0xc0}, + {TAC_REG_SDW(0, 0, 0x75), 0x0}, + {TAC_REG_SDW(0, 0, 0x76), 0x0}, + {TAC_REG_SDW(0, 0, 0x77), 0x0}, + {TAC_REG_SDW(0, 0, 0x78), 0x0}, + {TAC_REG_SDW(0, 0, 0x7b), 0x0}, + {TAC_REG_SDW(0, 0, 0x7c), 0xd0}, + {TAC_REG_SDW(0, 0, 0x7d), 0x0}, + {TAC_REG_SDW(0, 0, 0x7e), 0x0}, + {TAC_REG_SDW(0, 1, 0x1), 0x0}, + {TAC_REG_SDW(0, 1, 0x2), 0x0}, + {TAC_REG_SDW(0, 1, 0x3), 0x0}, + {TAC_REG_SDW(0, 1, 0x4), 0x4}, + {TAC_REG_SDW(0, 1, 0x5), 0x0}, + {TAC_REG_SDW(0, 1, 0x6), 0x0}, + {TAC_REG_SDW(0, 1, 0x7), 0x0}, + {TAC_REG_SDW(0, 1, 0x8), 0x0}, + {TAC_REG_SDW(0, 1, 0x9), 0x0}, + {TAC_REG_SDW(0, 1, 0xa), 0x0}, + {TAC_REG_SDW(0, 1, 0xb), 0x1}, + {TAC_REG_SDW(0, 1, 0xc), 0x0}, + {TAC_REG_SDW(0, 1, 0xd), 0x0}, + {TAC_REG_SDW(0, 1, 0xe), 0x0}, + {TAC_REG_SDW(0, 1, 0xf), 0x8}, + {TAC_REG_SDW(0, 1, 0x10), 0x0}, + {TAC_REG_SDW(0, 1, 0x11), 0x0}, + {TAC_REG_SDW(0, 1, 0x12), 0x1}, + {TAC_REG_SDW(0, 1, 0x13), 0x0}, + {TAC_REG_SDW(0, 1, 0x14), 0x0}, + {TAC_REG_SDW(0, 1, 0x15), 0x0}, + {TAC_REG_SDW(0, 1, 0x16), 0x0}, + {TAC_REG_SDW(0, 1, 0x17), 0x0}, + {TAC_REG_SDW(0, 1, 0x18), 0x0}, + {TAC_REG_SDW(0, 1, 0x19), 0x0}, + {TAC_REG_SDW(0, 1, 0x1a), 0x0}, + {TAC_REG_SDW(0, 1, 0x1b), 0x0}, + {TAC_REG_SDW(0, 1, 0x1c), 0x0}, + {TAC_REG_SDW(0, 1, 0x1d), 0x0}, + {TAC_REG_SDW(0, 1, 0x1e), 0x2}, + {TAC_REG_SDW(0, 1, 0x1f), 0x8}, + {TAC_REG_SDW(0, 1, 0x20), 0x9}, + {TAC_REG_SDW(0, 1, 0x21), 0xa}, + {TAC_REG_SDW(0, 1, 0x22), 0xb}, + {TAC_REG_SDW(0, 1, 0x23), 0xc}, + {TAC_REG_SDW(0, 1, 0x24), 0xd}, + {TAC_REG_SDW(0, 1, 0x25), 0xe}, + {TAC_REG_SDW(0, 1, 0x26), 0xf}, + {TAC_REG_SDW(0, 1, 0x27), 0x8}, + {TAC_REG_SDW(0, 1, 0x28), 0x9}, + {TAC_REG_SDW(0, 1, 0x29), 0xa}, + {TAC_REG_SDW(0, 1, 0x2a), 0xb}, + {TAC_REG_SDW(0, 1, 0x2b), 0xc}, + {TAC_REG_SDW(0, 1, 0x2c), 0xd}, + {TAC_REG_SDW(0, 1, 0x2d), 0xe}, + {TAC_REG_SDW(0, 1, 0x2e), 0xf}, + {TAC_REG_SDW(0, 1, 0x2f), 0x0}, + {TAC_REG_SDW(0, 1, 0x30), 0x0}, + {TAC_REG_SDW(0, 1, 0x31), 0x0}, + {TAC_REG_SDW(0, 1, 0x32), 0x0}, + {TAC_REG_SDW(0, 1, 0x33), 0x0}, + {TAC_REG_SDW(0, 1, 0x34), 0x0}, + {TAC_REG_SDW(0, 1, 0x35), 0x0}, + {TAC_REG_SDW(0, 1, 0x36), 0x0}, + {TAC_REG_SDW(0, 1, 0x37), 0x0}, + {TAC_REG_SDW(0, 1, 0x38), 0x98}, + {TAC_REG_SDW(0, 1, 0x39), 0x0}, + {TAC_REG_SDW(0, 1, 0x3a), 0x0}, + {TAC_REG_SDW(0, 1, 0x3b), 0x0}, + {TAC_REG_SDW(0, 1, 0x3c), 0x1}, + {TAC_REG_SDW(0, 1, 0x3d), 0x2}, + {TAC_REG_SDW(0, 1, 0x3e), 0x3}, + {TAC_REG_SDW(0, 1, 0x3f), 0x4}, + {TAC_REG_SDW(0, 1, 0x40), 0x5}, + {TAC_REG_SDW(0, 1, 0x41), 0x6}, + {TAC_REG_SDW(0, 1, 0x42), 0x7}, + {TAC_REG_SDW(0, 1, 0x43), 0x0}, + {TAC_REG_SDW(0, 1, 0x44), 0x0}, + {TAC_REG_SDW(0, 1, 0x45), 0x1}, + {TAC_REG_SDW(0, 1, 0x46), 0x2}, + {TAC_REG_SDW(0, 1, 0x47), 0x3}, + {TAC_REG_SDW(0, 1, 0x48), 0x4}, + {TAC_REG_SDW(0, 1, 0x49), 0x5}, + {TAC_REG_SDW(0, 1, 0x4a), 0x6}, + {TAC_REG_SDW(0, 1, 0x4b), 0x7}, + {TAC_REG_SDW(0, 1, 0x4c), 0x98}, + {TAC_REG_SDW(0, 1, 0x4d), 0x0}, + {TAC_REG_SDW(0, 1, 0x4e), 0x0}, + {TAC_REG_SDW(0, 1, 0x4f), 0x0}, + {TAC_REG_SDW(0, 1, 0x50), 0x1}, + {TAC_REG_SDW(0, 1, 0x51), 0x2}, + {TAC_REG_SDW(0, 1, 0x52), 0x3}, + {TAC_REG_SDW(0, 1, 0x53), 0x4}, + {TAC_REG_SDW(0, 1, 0x54), 0x5}, + {TAC_REG_SDW(0, 1, 0x55), 0x6}, + {TAC_REG_SDW(0, 1, 0x56), 0x7}, + {TAC_REG_SDW(0, 1, 0x57), 0x0}, + {TAC_REG_SDW(0, 1, 0x58), 0x0}, + {TAC_REG_SDW(0, 1, 0x59), 0x1}, + {TAC_REG_SDW(0, 1, 0x5a), 0x2}, + {TAC_REG_SDW(0, 1, 0x5b), 0x3}, + {TAC_REG_SDW(0, 1, 0x5c), 0x4}, + {TAC_REG_SDW(0, 1, 0x5d), 0x5}, + {TAC_REG_SDW(0, 1, 0x5e), 0x6}, + {TAC_REG_SDW(0, 1, 0x5f), 0x7}, + {TAC_REG_SDW(0, 1, 0x60), 0x98}, + {TAC_REG_SDW(0, 1, 0x61), 0x0}, + {TAC_REG_SDW(0, 1, 0x62), 0x0}, + {TAC_REG_SDW(0, 1, 0x63), 0x0}, + {TAC_REG_SDW(0, 1, 0x64), 0x1}, + {TAC_REG_SDW(0, 1, 0x65), 0x2}, + {TAC_REG_SDW(0, 1, 0x66), 0x3}, + {TAC_REG_SDW(0, 1, 0x67), 0x4}, + {TAC_REG_SDW(0, 1, 0x68), 0x5}, + {TAC_REG_SDW(0, 1, 0x69), 0x6}, + {TAC_REG_SDW(0, 1, 0x6a), 0x7}, + {TAC_REG_SDW(0, 1, 0x6b), 0x0}, + {TAC_REG_SDW(0, 1, 0x6c), 0x0}, + {TAC_REG_SDW(0, 1, 0x6d), 0x1}, + {TAC_REG_SDW(0, 1, 0x6e), 0x2}, + {TAC_REG_SDW(0, 1, 0x6f), 0x3}, + {TAC_REG_SDW(0, 1, 0x70), 0x4}, + {TAC_REG_SDW(0, 1, 0x71), 0x5}, + {TAC_REG_SDW(0, 1, 0x72), 0x6}, + {TAC_REG_SDW(0, 1, 0x73), 0x7}, +}; + +static const struct reg_sequence tac_spk_seq[] = { + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU21, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU21, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU23, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU23, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), 0), +}; + +static const struct reg_sequence tac_sm_seq[] = { + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU113, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU113, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU11, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU11, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), 0), +}; + +static const struct reg_sequence tac_uaj_seq[] = { + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU41, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU41, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT), 0), + REG_SEQ0(SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU36, + TAC_SDCA_CHANNEL_GAIN, TAC_JACK_MONO_CS), 0), +}; + +static bool tac_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case TAC_REG_SDW(0, 0, 1) ... TAC_REG_SDW(0, 0, 5): + case TAC_REG_SDW(0, 2, 1) ... TAC_REG_SDW(0, 2, 6): + case TAC_REG_SDW(0, 2, 24) ... TAC_REG_SDW(0, 2, 55): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_HID, TAC_SDCA_ENT_HID1, + TAC_SDCA_CTL_HIDTX_CURRENT_OWNER, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_HID, TAC_SDCA_ENT_HID1, + TAC_SDCA_CTL_HIDTX_MESSAGE_OFFSET, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_GE35, + TAC_SDCA_CTL_DET_MODE, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_PDE23, + TAC_SDCA_REQUESTED_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_PDE11, + TAC_SDCA_REQUESTED_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_PDE47, + TAC_SDCA_REQUESTED_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_PDE34, + TAC_SDCA_REQUESTED_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_PDE23, + TAC_SDCA_ACTUAL_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_PDE11, + TAC_SDCA_ACTUAL_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_PDE47, + TAC_SDCA_ACTUAL_PS, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_PDE34, + TAC_SDCA_ACTUAL_PS, 0): + case SDW_SCP_SDCA_INT1: + case SDW_SCP_SDCA_INT2: + case SDW_SCP_SDCA_INT3: + case SDW_SCP_SDCA_INT4: + case SDW_SDCA_CTL(1, 0, 0x10, 0): + case SDW_SDCA_CTL(2, 0, 0x10, 0): + case SDW_SDCA_CTL(3, 0, 0x10, 0): + case SDW_SDCA_CTL(4, 0, 0x1, 0): + case 0x44007F80 ... 0x44007F87: + case TAC_DSP_ALGO_STATUS: /* DSP algo status - always read from HW */ + return true; + default: + break; + } + + return false; +} + +static int tac_sdca_mbq_size(struct device *dev, unsigned int reg) +{ + switch (reg) { + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU21, + TAC_SDCA_CHANNEL_VOLUME, TAC_CHANNEL_LEFT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU21, + TAC_SDCA_CHANNEL_VOLUME, TAC_CHANNEL_RIGHT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU23, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU23, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SA, TAC_SDCA_ENT_FU23, + TAC_SDCA_MASTER_GAIN, 0): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU113, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU113, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU11, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_SM, TAC_SDCA_ENT_FU11, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU41, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_LEFT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU41, + TAC_SDCA_CHANNEL_GAIN, TAC_CHANNEL_RIGHT): + case SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU36, + TAC_SDCA_CHANNEL_GAIN, TAC_JACK_MONO_CS): + return 2; + + default: + return 1; + } +} + +static const struct regmap_sdw_mbq_cfg tac_mbq_cfg = { + .mbq_size = tac_sdca_mbq_size, +}; + +static const struct regmap_config tac_regmap = { + .reg_bits = 32, + .val_bits = 16, /* mbq support */ + .reg_defaults = tac_reg_default, + .num_reg_defaults = ARRAY_SIZE(tac_reg_default), + .max_register = 0x47FFFFFF, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = tac_volatile_reg, + .use_single_read = true, + .use_single_write = true, +}; + +/* Check if device has DSP algo that needs status monitoring */ +static bool tac_has_dsp_algo(struct tac5xx2_prv *tac_dev) +{ + switch (tac_dev->part_id) { + case 0x5682: + case 0x2883: + return true; + default: + return false; + } +} + +/* Check if device has UAJ (Universal Audio Jack) support */ +static bool tac_has_uaj_support(struct tac5xx2_prv *tac_dev) +{ + return tac_dev->uaj_func_data; +} + +/* Forward declaration for headset detection */ +static int tac5xx2_sdca_headset_detect(struct tac5xx2_prv *tac_dev); + +/* Volume controls for mic, hp and mic cap */ +static const struct snd_kcontrol_new tac5xx2_snd_controls[] = { + SOC_DOUBLE_R_RANGE_TLV("Amp Volume", TAC_AMP_LVL_CFG0, TAC_AMP_LVL_CFG1, + 2, 0, 44, 1, tac5xx2_amp_tlv), + TAC_DOUBLE_Q78_TLV("DMIC Capture Volume", SM, FU113), + TAC_DOUBLE_Q78_TLV("Speaker Volume", SA, FU21), +}; + +static const struct snd_kcontrol_new tac_uaj_controls[] = { + TAC_DOUBLE_Q78_TLV("UAJ Playback Volume", UAJ, FU41), + SDCA_SINGLE_Q78_TLV("UAJ Capture Volume", + SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_FU36, + TAC_SDCA_CHANNEL_GAIN, TAC_JACK_MONO_CS), + TAC_DVC_MIN, TAC_DVC_MAX, TAC_DVC_STEP, tac5xx2_dvc_tlv), +}; + +static const struct snd_soc_dapm_widget tac5xx2_common_widgets[] = { + /* Port 1: Speaker Playback Path */ + SND_SOC_DAPM_AIF_IN("AIF1 Playback", "DP1 Speaker Playback", 0, + SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_PGA("FU21_L", FU21_L_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU21_R", FU21_R_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU23_L", FU23_L_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU23_R", FU23_R_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_OUTPUT("SPK_L"), + SND_SOC_DAPM_OUTPUT("SPK_R"), + + /* Port 3: Smart Mic (DMIC) Capture Path */ + SND_SOC_DAPM_AIF_OUT("AIF3 Capture", "DP3 Mic Capture", 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_INPUT("DMIC_L"), + SND_SOC_DAPM_INPUT("DMIC_R"), + SND_SOC_DAPM_PGA("IT11", IT11_USAGE_REG, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("CS113", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("FU11_L", FU11_L_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU11_R", FU11_R_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("PPU11", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("XU12", XU12_BYPASS_REG, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("FU113_L", FU113_L_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU113_R", FU113_R_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("OT113", OT113_USAGE_REG, 0, 0, NULL, 0), +}; + +static const struct snd_soc_dapm_widget tac_uaj_widgets[] = { + /* Port 4: UAJ (Headphone) Playback Path */ + SND_SOC_DAPM_AIF_IN("AIF4 Playback", "DP4 UAJ Speaker Playback", 0, + SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_PGA("IT41", IT41_USAGE_REG, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("FU41_L", FU41_L_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("FU41_R", FU41_R_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_PGA("XU42", XU42_BYPASS_REG, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("CS41", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_DAC("OT45", "DP4 UAJ Speaker Playback", OT45_USAGE_REG, 0, 0), + SND_SOC_DAPM_OUTPUT("HP_L"), + SND_SOC_DAPM_OUTPUT("HP_R"), + + /* Port 7: UAJ (Headset Mic) Capture Path */ + SND_SOC_DAPM_AIF_OUT("AIF7 Capture", "DP7 UAJ Mic Capture", 0, + SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_INPUT("UAJ_MIC"), + SND_SOC_DAPM_ADC("IT33", "DP7 UAJ Mic Capture", IT33_USAGE_REG, 0, 0), + SND_SOC_DAPM_PGA("FU36", FU36_MUTE_REG, 0, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("CS36", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("OT36", OT36_USAGE_REG, 0, 0, NULL, 0), +}; + +static const struct snd_soc_dapm_route tac5xx2_common_routes[] = { + /* Speaker Playback Path */ + {"FU21_L", NULL, "AIF1 Playback"}, + {"FU21_R", NULL, "AIF1 Playback"}, + + {"FU23_L", NULL, "FU21_L"}, + {"FU23_R", NULL, "FU21_R"}, + + {"SPK_L", NULL, "FU23_L"}, + {"SPK_R", NULL, "FU23_R"}, + + /* Smart Mic DAPM Routes */ + {"IT11", NULL, "DMIC_L"}, + {"IT11", NULL, "DMIC_R"}, + {"FU11_L", NULL, "IT11"}, + {"FU11_R", NULL, "IT11"}, + {"PPU11", NULL, "FU11_L"}, + {"PPU11", NULL, "FU11_R"}, + {"XU12", NULL, "PPU11"}, + {"FU113_L", NULL, "XU12"}, + {"FU113_R", NULL, "XU12"}, + {"FU113_L", NULL, "CS113"}, + {"FU113_R", NULL, "CS113"}, + {"OT113", NULL, "FU113_L"}, + {"OT113", NULL, "FU113_R"}, + {"OT113", NULL, "CS113"}, + {"AIF3 Capture", NULL, "OT113"}, +}; + +static const struct snd_soc_dapm_route tac_uaj_routes[] = { + /* UAJ Playback routes */ + {"IT41", NULL, "AIF4 Playback"}, + {"IT41", NULL, "CS41"}, + {"FU41_L", NULL, "IT41"}, + {"FU41_R", NULL, "IT41"}, + {"XU42", NULL, "FU41_L"}, + {"XU42", NULL, "FU41_R"}, + {"OT45", NULL, "XU42"}, + {"OT45", NULL, "CS41"}, + {"HP_L", NULL, "OT45"}, + {"HP_R", NULL, "OT45"}, + + /* UAJ Capture routes */ + {"IT33", NULL, "UAJ_MIC"}, + {"IT33", NULL, "CS36"}, + {"FU36", NULL, "IT33"}, + {"OT36", NULL, "FU36"}, + {"OT36", NULL, "CS36"}, + {"AIF7 Capture", NULL, "OT36"}, +}; + +static s32 tac_set_sdw_stream(struct snd_soc_dai *dai, + void *sdw_stream, s32 direction) +{ + if (sdw_stream) + snd_soc_dai_dma_data_set(dai, direction, sdw_stream); + + return 0; +} + +static void tac_sdw_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + snd_soc_dai_set_dma_data(dai, substream, NULL); +} + +static int tac_clear_latch(struct tac5xx2_prv *priv) +{ + /* CLR_REG is a self-clearing bit */ + return regmap_update_bits(priv->regmap, TAC_INT_CFG, + TAC_INT_CFG_CLR_REG, TAC_INT_CFG_CLR_REG); +} + +static int tac_sdw_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct tac5xx2_prv *tac_dev = snd_soc_component_get_drvdata(component); + struct sdw_slave *sdw_peripheral = tac_dev->sdw_peripheral; + struct sdw_stream_runtime *sdw_stream; + struct sdw_stream_config stream_config = {0}; + struct sdw_port_config port_config = {0}; + u8 sample_rate_idx = 0; + int function_id; + int pde_entity; + int port_num; + int ret; + + if (!tac_dev->hw_init) { + dev_err(tac_dev->dev, + "error: operation without hw initialization"); + return -EINVAL; + } + + sdw_stream = snd_soc_dai_get_dma_data(dai, substream); + if (!sdw_stream) { + dev_err(tac_dev->dev, "failed to get dma data"); + return -EINVAL; + } + + ret = tac_clear_latch(tac_dev); + if (ret) + dev_warn(tac_dev->dev, "clear latch failed, err=%d", ret); + + switch (dai->id) { + case TAC5XX2_DMIC: + function_id = TAC_FUNCTION_ID_SM; + pde_entity = TAC_SDCA_ENT_PDE11; + port_num = TAC_SDW_PORT_NUM_DMIC; + break; + case TAC5XX2_UAJ: + function_id = TAC_FUNCTION_ID_UAJ; + pde_entity = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? + TAC_SDCA_ENT_PDE47 : TAC_SDCA_ENT_PDE34; + port_num = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? + TAC_SDW_PORT_NUM_UAJ_PLAYBACK : + TAC_SDW_PORT_NUM_UAJ_CAPTURE; + break; + case TAC5XX2_SPK: + function_id = TAC_FUNCTION_ID_SA; + pde_entity = TAC_SDCA_ENT_PDE23; + port_num = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? + TAC_SDW_PORT_NUM_SPK_PLAYBACK : + TAC_SDW_PORT_NUM_SPK_CAPTURE; + break; + default: + dev_err(tac_dev->dev, "Invalid dai id: %d for power up\n", dai->id); + return -EINVAL; + } + + snd_sdw_params_to_config(substream, params, &stream_config, &port_config); + port_config.num = port_num; + ret = sdw_stream_add_slave(sdw_peripheral, &stream_config, + &port_config, 1, sdw_stream); + if (ret) { + dev_err(dai->dev, + "Unable to configure port %d: %d\n", port_num, ret); + return ret; + } + + switch (params_rate(params)) { + case 48000: + sample_rate_idx = 0x01; + break; + case 44100: + sample_rate_idx = 0x02; + break; + case 96000: + sample_rate_idx = 0x03; + break; + case 88200: + sample_rate_idx = 0x04; + break; + default: + dev_dbg(tac_dev->dev, "Unsupported sample rate: %d Hz", + params_rate(params)); + return -EINVAL; + } + + switch (function_id) { + case TAC_FUNCTION_ID_SM: + ret = regmap_write(tac_dev->regmap, + SDW_SDCA_CTL(function_id, TAC_SDCA_ENT_CS113, + TAC_SDCA_CTL_CS_SAMP_RATE_IDX, 0), + sample_rate_idx); + if (ret) { + dev_err(tac_dev->dev, "Failed to set CS113 sample rate: %d", ret); + return ret; + } + + break; + case TAC_FUNCTION_ID_UAJ: + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + ret = regmap_write(tac_dev->regmap, + SDW_SDCA_CTL(function_id, TAC_SDCA_ENT_CS41, + TAC_SDCA_CTL_CS_SAMP_RATE_IDX, 0), + sample_rate_idx); + if (ret) { + dev_err(tac_dev->dev, "Failed to set CS41 sample rate: %d", ret); + return ret; + } + } else { + ret = regmap_write(tac_dev->regmap, + SDW_SDCA_CTL(function_id, TAC_SDCA_ENT_CS36, + TAC_SDCA_CTL_CS_SAMP_RATE_IDX, 0), + sample_rate_idx); + if (ret) { + dev_err(tac_dev->dev, "Failed to set CS36 sample rate: %d", ret); + return ret; + } + } + break; + case TAC_FUNCTION_ID_SA: + /* SmartAmp: no additional sample rate configuration needed */ + break; + } + + ret = regmap_write(tac_dev->regmap, SDW_SDCA_CTL(function_id, pde_entity, + TAC_SDCA_REQUESTED_PS, 0), 0); + if (ret) { + dev_err(tac_dev->dev, + "failed to set func %d, entity %d's requested PS to 0: %d\n", + function_id, pde_entity, ret); + return ret; + } + + ret = sdca_asoc_pde_poll_actual_ps(tac_dev->dev, tac_dev->regmap, function_id, pde_entity, + SDCA_PDE_PS3, SDCA_PDE_PS0, NULL, 0); + if (ret) + dev_err(tac_dev->dev, "failed to transition func %d, pde %d from PS3 -> PS0, err=%d\n", + function_id, pde_entity, ret); + return ret; +} + +static int tac_sdw_pcm_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct sdw_stream_runtime *sdw_stream = snd_soc_dai_get_dma_data(dai, substream); + struct tac5xx2_prv *tac_dev = snd_soc_component_get_drvdata(dai->component); + int pde_entity, function_id; + int ret; + + sdw_stream_remove_slave(tac_dev->sdw_peripheral, sdw_stream); + + switch (dai->id) { + case TAC5XX2_DMIC: + pde_entity = TAC_SDCA_ENT_PDE11; + function_id = TAC_FUNCTION_ID_SM; + break; + case TAC5XX2_UAJ: + function_id = TAC_FUNCTION_ID_UAJ; + pde_entity = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? + TAC_SDCA_ENT_PDE47 : TAC_SDCA_ENT_PDE34; + break; + case TAC5XX2_SPK: + function_id = TAC_FUNCTION_ID_SA; + pde_entity = TAC_SDCA_ENT_PDE23; + break; + default: + dev_err(tac_dev->dev, "unhandled dai %d for power down\n", dai->id); + return -EINVAL; + } + + ret = regmap_write(tac_dev->regmap, SDW_SDCA_CTL(function_id, pde_entity, + TAC_SDCA_REQUESTED_PS, 0), + SDCA_PDE_PS3); + if (ret) { + dev_err(tac_dev->dev, + "failed to set func %d, entity %d's requested PS to 3: %d\n", + function_id, pde_entity, ret); + return ret; + } + + ret = sdca_asoc_pde_poll_actual_ps(tac_dev->dev, tac_dev->regmap, function_id, + pde_entity, SDCA_PDE_PS0, SDCA_PDE_PS3, + NULL, 0); + if (ret) + dev_err(tac_dev->dev, + "failed to transition func %d, pde %d from PS0 -> PS3, err=%d\n", + function_id, pde_entity, ret); + + return ret; +} + +static const struct snd_soc_dai_ops tac_dai_ops = { + .hw_params = tac_sdw_hw_params, + .hw_free = tac_sdw_pcm_hw_free, + .set_stream = tac_set_sdw_stream, + .shutdown = tac_sdw_shutdown, +}; + +static int tac5xx2_sdca_btn_type(unsigned char *buffer, struct tac5xx2_prv *tac_dev) +{ + switch (*buffer) { + case 1: /* play pause */ + return SND_JACK_BTN_0; + case 10: /* vol down */ + return SND_JACK_BTN_3; + case 8: /* vol up */ + return SND_JACK_BTN_2; + case 4: /* long press */ + return SND_JACK_BTN_1; + case 2: /* next song */ + case 32: /* next song */ + return SND_JACK_BTN_4; + default: + return 0; + } +} + +static int tac5xx2_sdca_button_detect(struct tac5xx2_prv *tac_dev) +{ + unsigned int btn_type, offset, idx; + int ret, value, owner; + u8 buf[2]; + + ret = regmap_read(tac_dev->regmap, + SDW_SDCA_CTL(TAC_FUNCTION_ID_HID, TAC_SDCA_ENT_HID1, + TAC_SDCA_CTL_HIDTX_CURRENT_OWNER, 0), &owner); + if (ret) { + dev_err(tac_dev->dev, + "Failed to read current UMP message owner 0x%x", ret); + return ret; + } + + if (owner == SDCA_UMP_OWNER_DEVICE) { + dev_dbg(tac_dev->dev, "skip button detect as current owner is not host\n"); + return 0; + } + + ret = regmap_read(tac_dev->regmap, + SDW_SDCA_CTL(TAC_FUNCTION_ID_HID, TAC_SDCA_ENT_HID1, + TAC_SDCA_CTL_HIDTX_MESSAGE_OFFSET, 0), &offset); + if (ret) { + dev_err(tac_dev->dev, + "Failed to read current UMP message offset: %d", ret); + goto end_btn_det; + } + + dev_dbg(tac_dev->dev, "button detect: message offset = %x", offset); + + for (idx = 0; idx < sizeof(buf); idx++) { + ret = regmap_read(tac_dev->regmap, + TAC_BUF_ADDR_HID1 + offset + idx, &value); + if (ret) { + dev_err(tac_dev->dev, + "Failed to read HID buffer: %d", ret); + goto end_btn_det; + } + buf[idx] = value & 0xff; + } + + if (buf[0] == 0x1) { + btn_type = tac5xx2_sdca_btn_type(&buf[1], tac_dev); + ret = btn_type; + } + +end_btn_det: + regmap_write(tac_dev->regmap, + SDW_SDCA_CTL(TAC_FUNCTION_ID_HID, TAC_SDCA_ENT_HID1, + TAC_SDCA_CTL_HIDTX_CURRENT_OWNER, 0), 0x01); + + return ret; +} + +static int tac5xx2_sdca_headset_detect(struct tac5xx2_prv *tac_dev) +{ + int val, ret; + + ret = regmap_read(tac_dev->regmap, + SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_GE35, + TAC_SDCA_CTL_DET_MODE, 0), &val); + if (ret) { + dev_err(tac_dev->dev, "Failed to read the detect mode"); + return ret; + } + + switch (val) { + case 4: + tac_dev->jack_type = SND_JACK_MICROPHONE; + break; + case 5: + tac_dev->jack_type = SND_JACK_HEADPHONE; + break; + case 6: + tac_dev->jack_type = SND_JACK_HEADSET; + break; + case 0: + default: + tac_dev->jack_type = 0; + break; + } + + ret = regmap_write(tac_dev->regmap, + SDW_SDCA_CTL(TAC_FUNCTION_ID_UAJ, TAC_SDCA_ENT_GE35, + TAC_SDCA_CTL_SEL_MODE, 0), val); + if (ret) + dev_err(tac_dev->dev, "Failed to update the jack type to device"); + + return 0; +} + +static int tac5xx2_jack_init(struct tac5xx2_prv *tac_dev) +{ + int ret = 0; + + if (!tac_dev->hs_jack) + goto disable_interrupts; + + ret = regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INTMASK2, + SDW_SCP_SDCA_INTMASK_SDCA_11); + if (ret) { + dev_err(tac_dev->dev, + "Failed to register jack detection interrupt: %d\n", ret); + goto disable_interrupts; + } + + ret = regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INTMASK3, + SDW_SCP_SDCA_INTMASK_SDCA_16); + if (ret) { + dev_err(tac_dev->dev, + "Failed to register for button detect interrupt: %d\n", ret); + goto disable_interrupts; + } + + return 0; + +disable_interrupts: + /* ignore errors while disabling interrupts */ + regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INTMASK2, 0); + regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INTMASK3, 0); + + return ret; +} + +static int tac5xx2_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *hs_jack, void *data) +{ + struct tac5xx2_prv *tac_dev = snd_soc_component_get_drvdata(component); + int ret; + + tac_dev->hs_jack = hs_jack; + + /* resume can happen only after first hw_init */ + if (!tac_dev->first_hw_init_done) + return 0; + + ret = pm_runtime_resume_and_get(component->dev); + if (ret < 0) { + if (ret != -EACCES) { + dev_err(component->dev, + "%s: failed to resume %d\n", __func__, ret); + return ret; + } + + /* pm_runtime not enabled yet */ + dev_dbg(component->dev, + "%s: skipping jack init for now\n", __func__); + return 0; + } + + ret = tac5xx2_jack_init(tac_dev); + if (ret) + dev_err(tac_dev->dev, "jack init failed, err=%d\n", ret); + + pm_runtime_mark_last_busy(component->dev); + pm_runtime_put_autosuspend(component->dev); + + return ret; +} + +static int tac_interrupt_callback(struct sdw_slave *slave, + struct sdw_slave_intr_status *status) +{ + unsigned int sdca_int2, sdca_int3, jack_report_mask = 0; + struct tac5xx2_prv *tac_dev = dev_get_drvdata(&slave->dev); + struct device *dev = &slave->dev; + int btn_type = 0; + int ret = 0; + + if (status->control_port) { + if (status->control_port & SDW_SCP_INT1_PARITY) + dev_warn(dev, "SCP: Parity error interrupt"); + if (status->control_port & SDW_SCP_INT1_BUS_CLASH) + dev_warn(dev, "SCP: Bus clash interrupt"); + } + + if (!tac_has_uaj_support(tac_dev)) + return 0; + + ret = regmap_read(tac_dev->regmap, SDW_SCP_SDCA_INT2, &sdca_int2); + if (ret) { + dev_err(dev, "Failed to read UAJ Interrupt, reg:%#x err=%d\n", + SDW_SCP_SDCA_INT2, ret); + return ret; + } + + ret = regmap_read(tac_dev->regmap, SDW_SCP_SDCA_INT3, &sdca_int3); + if (ret) { + dev_err(dev, "Failed to read HID interrupt reg=%#x: err=%d", + SDW_SCP_SDCA_INT3, ret); + return ret; + } + + dev_dbg(dev, "SDCA_INT2: 0x%02x, SDCA_INT3: 0x%02x\n", + sdca_int2, sdca_int3); + + if (sdca_int2 & SDW_SCP_SDCA_INT_SDCA_11) { + ret = tac5xx2_sdca_headset_detect(tac_dev); + if (ret < 0) + goto clear; + jack_report_mask |= SND_JACK_HEADSET; + } + + if (sdca_int3 & SDW_SCP_SDCA_INT_SDCA_16) { + btn_type = tac5xx2_sdca_button_detect(tac_dev); + if (btn_type < 0) + btn_type = 0; + jack_report_mask |= SND_JACK_BTN_0 | SND_JACK_BTN_1 | + SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4; + } + + if (tac_dev->jack_type == 0) + btn_type = 0; + + dev_dbg(tac_dev->dev, "in %s, jack_type=%d\n", __func__, tac_dev->jack_type); + dev_dbg(tac_dev->dev, "in %s, btn_type=0x%x\n", __func__, btn_type); + + if (!tac_dev->hs_jack) + goto clear; + + snd_soc_jack_report(tac_dev->hs_jack, tac_dev->jack_type | btn_type, + jack_report_mask); + +clear: + if (sdca_int2) { + ret = regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INT2, sdca_int2); + if (ret) + dev_dbg(tac_dev->dev, "Failed to clear jack interrupt\n"); + } + + if (sdca_int3) { + ret = regmap_write(tac_dev->regmap, SDW_SCP_SDCA_INT3, sdca_int3); + if (ret) + dev_dbg(tac_dev->dev, "failed to clear hid interrupt\n"); + } + + return 0; +} + +static struct snd_soc_dai_driver tac5572_dai_driver[] = { + { + .name = "tac5xx2-aif1", + .id = TAC5XX2_SPK, + .playback = { + .stream_name = "DP1 Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, + { + .name = "tac5xx2-aif2", + .id = TAC5XX2_DMIC, + .capture = { + .stream_name = "DP3 Mic Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, + { + .name = "tac5xx2-aif3", + .id = TAC5XX2_UAJ, + .playback = { + .stream_name = "DP4 UAJ Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .capture = { + .stream_name = "DP7 UAJ Mic Capture", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, +}; + +static struct snd_soc_dai_driver tac5672_dai_driver[] = { + { + .name = "tac5xx2-aif1", + .id = TAC5XX2_SPK, + .playback = { + .stream_name = "DP1 Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .capture = { + .stream_name = "DP8 IV Sense Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + .symmetric_rate = 1, + }, + { + .name = "tac5xx2-aif2", + .id = TAC5XX2_DMIC, + .capture = { + .stream_name = "DP3 Mic Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, + { + .name = "tac5xx2-aif3", + .id = TAC5XX2_UAJ, + .playback = { + .stream_name = "DP4 UAJ Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .capture = { + .stream_name = "DP7 UAJ Mic Capture", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, +}; + +static struct snd_soc_dai_driver tac5682_dai_driver[] = { + { + .name = "tac5xx2-aif1", + .id = TAC5XX2_SPK, + .playback = { + .stream_name = "DP1 Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .capture = { + .stream_name = "DP2 Echo Reference Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + .symmetric_rate = 1, + }, + { + .name = "tac5xx2-aif2", + .id = TAC5XX2_DMIC, + .capture = { + .stream_name = "DP3 Mic Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, + { + .name = "tac5xx2-aif3", + .id = TAC5XX2_UAJ, + .playback = { + .stream_name = "DP4 UAJ Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .capture = { + .stream_name = "DP7 UAJ Mic Capture", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, +}; + +static struct snd_soc_dai_driver tas2883_dai_driver[] = { + { + .name = "tac5xx2-aif1", + .id = TAC5XX2_SPK, + .playback = { + .stream_name = "DP1 Speaker Playback", + .channels_min = 1, + .channels_max = 2, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + .symmetric_rate = 1, + }, + { + .name = "tac5xx2-aif2", + .id = TAC5XX2_DMIC, + .capture = { + .stream_name = "DP3 Mic Capture", + .channels_min = 1, + .channels_max = 4, + .rates = TAC5XX2_DEVICE_RATES, + .formats = TAC5XX2_DEVICE_FORMATS, + }, + .ops = &tac_dai_ops, + }, +}; + +static s32 tac_component_probe(struct snd_soc_component *component) +{ + struct tac5xx2_prv *tac_dev = snd_soc_component_get_drvdata(component); + int ret; + + ret = pm_runtime_resume(component->dev); + if (ret < 0 && ret != -EACCES) + return ret; + + if (!tac_has_uaj_support(tac_dev)) + goto done_comp_probe; + + ret = snd_soc_dapm_new_controls(snd_soc_component_to_dapm(component), + tac_uaj_widgets, + ARRAY_SIZE(tac_uaj_widgets)); + if (ret) { + dev_err(component->dev, "Failed to add UAJ widgets: %d\n", ret); + return ret; + } + + ret = snd_soc_dapm_add_routes(snd_soc_component_to_dapm(component), + tac_uaj_routes, ARRAY_SIZE(tac_uaj_routes)); + if (ret) { + dev_err(component->dev, "Failed to add UAJ routes: %d\n", ret); + return ret; + } + + ret = snd_soc_add_component_controls(component, tac_uaj_controls, + ARRAY_SIZE(tac_uaj_controls)); + if (ret) { + dev_err(component->dev, "Failed to add UAJ controls: %d\n", ret); + return ret; + } + +done_comp_probe: + tac_dev->component = component; + return 0; +} + +static void tac_component_remove(struct snd_soc_component *codec) +{ + struct tac5xx2_prv *tac_dev = snd_soc_component_get_drvdata(codec); + + tac_dev->component = NULL; +} + +static const struct snd_soc_component_driver soc_codec_driver_tacdevice = { + .probe = tac_component_probe, + .remove = tac_component_remove, + .controls = tac5xx2_snd_controls, + .num_controls = ARRAY_SIZE(tac5xx2_snd_controls), + .dapm_widgets = tac5xx2_common_widgets, + .num_dapm_widgets = ARRAY_SIZE(tac5xx2_common_widgets), + .dapm_routes = tac5xx2_common_routes, + .num_dapm_routes = ARRAY_SIZE(tac5xx2_common_routes), + .idle_bias_on = 0, + .endianness = 1, +}; + +static s32 tac_init(struct tac5xx2_prv *tac_dev) +{ + struct snd_soc_component_driver *component_driver; + struct snd_soc_dai_driver *dai_drv; + int num_dais; + s32 ret; + + dev_set_drvdata(tac_dev->dev, tac_dev); + + switch (tac_dev->part_id) { + case 0x5572: + dai_drv = tac5572_dai_driver; + num_dais = ARRAY_SIZE(tac5572_dai_driver); + break; + case 0x5672: + dai_drv = tac5672_dai_driver; + num_dais = ARRAY_SIZE(tac5672_dai_driver); + break; + case 0x5682: + dai_drv = tac5682_dai_driver; + num_dais = ARRAY_SIZE(tac5682_dai_driver); + break; + case 0x2883: + dai_drv = tas2883_dai_driver; + num_dais = ARRAY_SIZE(tas2883_dai_driver); + break; + default: + dev_err(tac_dev->dev, "Unsupported device: 0x%x\n", + tac_dev->part_id); + return -EINVAL; + } + + component_driver = devm_kzalloc(tac_dev->dev, sizeof(*component_driver), + GFP_KERNEL); + if (!component_driver) + return -ENOMEM; + + memcpy(component_driver, &soc_codec_driver_tacdevice, sizeof(*component_driver)); + if (tac_has_uaj_support(tac_dev)) + component_driver->set_jack = tac5xx2_set_jack; + + ret = devm_snd_soc_register_component(tac_dev->dev, component_driver, + dai_drv, num_dais); + if (ret) { + dev_err(tac_dev->dev, "%s: codec register error:%d.\n", + __func__, ret); + return ret; + } + + return 0; +} + +static s32 tac5xx2_sdca_dev_suspend(struct device *dev) +{ + struct tac5xx2_prv *tac_dev = dev_get_drvdata(dev); + + if (!tac_dev->hw_init) + return 0; + + regcache_cache_only(tac_dev->regmap, true); + return 0; +} + +static s32 tac5xx2_sdca_dev_system_suspend(struct device *dev) +{ + return tac5xx2_sdca_dev_suspend(dev); +} + +static s32 tac5xx2_sdca_dev_resume(struct device *dev) +{ + struct tac5xx2_prv *tac_dev = dev_get_drvdata(dev); + struct sdw_slave *slave = dev_to_sdw_dev(dev); + unsigned long t; + int ret; + + if (!tac_dev->first_hw_init_done) { + dev_dbg(dev, "Device not initialized yet, skipping resume sync\n"); + return 0; + } + + if (!slave->unattach_request) + goto regmap_sync; + + t = wait_for_completion_timeout(&slave->initialization_complete, + msecs_to_jiffies(TAC5XX2_PROBE_TIMEOUT_MS)); + if (!t) { + dev_err(&slave->dev, "resume: initialization timed out\n"); + sdw_show_ping_status(slave->bus, true); + return -ETIMEDOUT; + } + slave->unattach_request = 0; + +regmap_sync: + regcache_cache_only(tac_dev->regmap, false); + regcache_mark_dirty(tac_dev->regmap); + ret = regcache_sync(tac_dev->regmap); + if (ret < 0) + dev_warn(dev, "Failed to sync regcache: %d\n", ret); + + /* Detect and set jack type for UAJ path before playback. + * This is required as jack detection does not trigger interrupt + * when device is in runtime_pm suspend with bus in clock stop mode. + */ + if (tac_has_uaj_support(tac_dev)) + tac5xx2_sdca_headset_detect(tac_dev); + + return 0; +} + +static const struct dev_pm_ops tac5xx2_sdca_pm = { + SYSTEM_SLEEP_PM_OPS(tac5xx2_sdca_dev_system_suspend, tac5xx2_sdca_dev_resume) + RUNTIME_PM_OPS(tac5xx2_sdca_dev_suspend, tac5xx2_sdca_dev_resume, NULL) +}; + +static s32 tac_fw_read_hdr(const u8 *data, struct tac_fw_hdr *hdr) +{ + hdr->size = get_unaligned_le32(data); + hdr->version_offset = get_unaligned_le32(data + 4); + hdr->plt_id = get_unaligned_le32(data + 8); + hdr->ppc3_ver = get_unaligned_le32(data + 12); + memcpy(hdr->ddc_name, data + 16, 64); + hdr->ddc_name[63] = 0; + hdr->timestamp = get_unaligned_le64(data + 80); + + return TAC_FW_HDR_SIZE; +} + +static s32 tac_fw_get_next_file(const u8 *data, size_t data_size, struct tac_fw_file *file) +{ + u32 file_length; + + /* Validate file header size */ + if (data_size < TAC_FW_FILE_HDR) + return -EINVAL; + + file->vendor_id = get_unaligned_le32(&data[0]); + file->file_id = get_unaligned_le32(&data[4]); + file->version = get_unaligned_le32(&data[8]); + file->length = get_unaligned_le32(&data[12]); + file->dest_addr = get_unaligned_le32(&data[16]); + file_length = file->length; + + /* Validate file payload exists */ + if (data_size < TAC_FW_FILE_HDR + file_length) + return -EINVAL; + + file->fw_data = (u8 *)&data[20]; + + return file_length + sizeof(u32) * 5; +} + +static void tac5xx2_fw_ready(const struct firmware *fmw, void *context) +{ + struct tac5xx2_prv *tac_dev = context; + struct tac_fw_file *files; + u32 fw_hdr_size; + u32 num_files = 0; + struct tac_fw_hdr hdr; + struct tm tm_time; + size_t img_sz; + u32 offset; + s32 ret = 0; + u8 *buf; + + if (!fmw || !fmw->data || fmw->size == 0 || fmw->size < TAC_FW_HDR_SIZE + TAC_FW_FILE_HDR) { + dev_err(tac_dev->dev, "fw file: %s is empty or invalid\n", + tac_dev->fw_binaryname); + goto out; + } + + /* Verify firmware size from header */ + fw_hdr_size = get_unaligned_le32(fmw->data); + if (fw_hdr_size != fmw->size) { + dev_err(tac_dev->dev, "firmware size mismatch: hdr=%u, actual=%zu\n", + fw_hdr_size, fmw->size); + goto out; + } + + files = devm_kzalloc(tac_dev->dev, sizeof(*files) * TAC_MAX_FW_CHUNKS, GFP_KERNEL); + buf = devm_kmemdup(tac_dev->dev, fmw->data, fmw->size, GFP_KERNEL); + if (!files || !buf) + goto out; + + /* validate the cache the firmware */ + img_sz = fmw->size; + offset = tac_fw_read_hdr(buf, &hdr); + while (offset < img_sz && num_files < TAC_MAX_FW_CHUNKS) { + u32 file_length; + + if (offset + TAC_FW_FILE_HDR > img_sz) { + dev_warn(tac_dev->dev, "Incomplete block header at offset %d\n", + offset); + goto out; + } + /* Validate that the file payload doesn't exceed buffer */ + file_length = get_unaligned_le32(&buf[offset + 12]); + /* Check for integer overflow and buffer bounds */ + if (file_length > img_sz || offset > img_sz - TAC_FW_FILE_HDR || + file_length > img_sz - offset - TAC_FW_FILE_HDR) { + dev_warn(tac_dev->dev, "File at offset %d exceeds buffer: length=%u, available=%zu\n", + offset, file_length, img_sz - offset - TAC_FW_FILE_HDR); + goto out; + } + ret = tac_fw_get_next_file(&buf[offset], img_sz - offset, &files[num_files]); + if (ret < 0) { + dev_err(tac_dev->dev, "Failed to parse file at offset %d\n", offset); + goto out; + } + offset += ret; + num_files++; + } + + if (num_files == 0) { + dev_err(tac_dev->dev, "firmware with no files\n"); + goto out; + } + + /* cache ready to use validated firmware */ + tac_dev->fw_file_cnt = num_files; + tac_dev->fw_files = files; + + time64_to_tm(hdr.timestamp, 0, &tm_time); + dev_dbg(tac_dev->dev, "fw file: %s, num_files=%u, ts:%04ld-%02d-%02d %02d:%02d\n", + tac_dev->fw_binaryname, tac_dev->fw_file_cnt, + tm_time.tm_year + 1900, tm_time.tm_mon + 1, tm_time.tm_mday, + tm_time.tm_hour, tm_time.tm_min); + dev_dbg(tac_dev->dev, "fw file: DDC Name: %s\n", hdr.ddc_name); + dev_dbg(tac_dev->dev, "fw file: PPC3 Version: 3.%ld.%ld.%ld\n", + FIELD_GET(GENMASK(31, 24), hdr.ppc3_ver), + FIELD_GET(GENMASK(23, 16), hdr.ppc3_ver), + FIELD_GET(GENMASK(15, 8), hdr.ppc3_ver) & 0x3f); + +out: + complete_all(&tac_dev->fw_caching_complete); + if (fmw) + release_firmware(fmw); +} + +static int tac_load_and_cache_firmware_async(struct tac5xx2_prv *tac_dev) +{ + tac_dev->fw_file_cnt = 0; + tac_dev->fw_files = NULL; /* ready to download files */ + + return request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, + tac_dev->fw_binaryname, tac_dev->dev, + GFP_KERNEL, tac_dev, tac5xx2_fw_ready); +} + +static int tac_download(struct tac5xx2_prv *tac_dev) +{ + struct tac_fw_file *files = tac_dev->fw_files; + u32 num_files = tac_dev->fw_file_cnt; + u32 i; + int ret = 0; + + for (i = 0; i < num_files; i++) { + ret = sdw_nwrite_no_pm(tac_dev->sdw_peripheral, files[i].dest_addr, + files[i].length, files[i].fw_data); + if (ret < 0) { + dev_dbg(tac_dev->dev, + "FW write failed at addr 0x%x: %d\n", + files[i].dest_addr, ret); + return ret; + } + } + + return 0; +} + +/* + * tac5xx2 uses custom firmware binary fw. + * This is not using UMP File Download. + */ +static s32 tac_download_fw_to_hw(struct tac5xx2_prv *tac_dev) +{ + int ret; + + ret = tac_download(tac_dev); + if (ret < 0) { + dev_err(tac_dev->dev, "Firmware download failed: %d\n", ret); + return ret; + } + + dev_dbg(tac_dev->dev, "Firmware download complete: %d chunks\n", + tac_dev->fw_file_cnt); + tac_dev->fw_dl_success = true; + + return 0; +} + +static struct pci_dev *tac_get_pci_dev(struct sdw_slave *peripheral) +{ + struct device *dev = &peripheral->dev; + + for (; dev; dev = dev->parent) { + if (dev_is_pci(dev)) + return to_pci_dev(dev); + } + + return NULL; +} + +static void tac_generate_fw_name(struct sdw_slave *slave, char *name, size_t size) +{ + struct sdw_bus *bus = slave->bus; + u16 part_id = slave->id.part_id; + u8 unique_id = slave->id.unique_id; + struct pci_dev *pci = tac_get_pci_dev(slave); + + if (pci) + scnprintf(name, size, "%04X-%04X-%1X-%1X.bin", part_id, + pci->subsystem_device, bus->link_id, unique_id); + else + /* Default firmware name based on part ID */ + scnprintf(name, size, "%s%04x-%1X-%1X.bin", + part_id == 0x2883 ? "tas" : "tac", + part_id, bus->link_id, unique_id); +} + +static int tac_io_init(struct device *dev, struct sdw_slave *slave, bool first) +{ + struct tac5xx2_prv *tac_dev = dev_get_drvdata(dev); + u64 time; + int ret; + + if (tac_dev->hw_init) { + dev_dbg(dev, "early return hw_init already done.."); + return 0; + } + + time = wait_for_completion_timeout(&tac_dev->fw_caching_complete, + msecs_to_jiffies(TAC5XX2_FW_CACHE_TIMEOUT_MS)); + if (!time) { + ret = -ETIMEDOUT; + dev_warn(tac_dev->dev, "%s: fw caching timeout\n", __func__); + goto io_init_err; + } + + if (tac_dev->fw_files && tac_dev->fw_file_cnt > 0) { + ret = tac_download_fw_to_hw(tac_dev); + if (ret) { + dev_err(tac_dev->dev, "FW download failed, fw: %d\n", ret); + goto io_init_err; + } + } + + if (tac_dev->sa_func_data) { + ret = sdca_regmap_write_init(dev, tac_dev->regmap, + tac_dev->sa_func_data); + if (ret) { + dev_err(dev, "smartamp init table update failed\n"); + goto io_init_err; + } + dev_dbg(dev, "smartamp init done\n"); + + if (first) { + ret = regmap_multi_reg_write(tac_dev->regmap, tac_spk_seq, + ARRAY_SIZE(tac_spk_seq)); + if (ret) { + dev_err(dev, "init writes failed, err=%d", ret); + goto io_init_err; + } + } + } + + if (tac_dev->sm_func_data) { + ret = sdca_regmap_write_init(dev, tac_dev->regmap, + tac_dev->sm_func_data); + if (ret) { + dev_err(dev, "smartmic init table update failed\n"); + goto io_init_err; + } + dev_dbg(dev, "smartmic init done\n"); + + if (first) { + ret = regmap_multi_reg_write(tac_dev->regmap, tac_sm_seq, + ARRAY_SIZE(tac_sm_seq)); + if (ret) { + dev_err(tac_dev->dev, + "init writes failed, err=%d", ret); + goto io_init_err; + } + } + } + + if (tac_dev->uaj_func_data) { + ret = sdca_regmap_write_init(dev, tac_dev->regmap, + tac_dev->uaj_func_data); + if (ret) { + dev_err(dev, "uaj init table update failed\n"); + goto io_init_err; + } + dev_dbg(dev, "uaj init done\n"); + + if (first) { + ret = regmap_multi_reg_write(tac_dev->regmap, tac_uaj_seq, + ARRAY_SIZE(tac_uaj_seq)); + if (ret) { + dev_err(tac_dev->dev, + "init writes failed, err=%d", ret); + goto io_init_err; + } + + if (tac_dev->hs_jack) { + ret = tac5xx2_jack_init(tac_dev); + if (ret) { + dev_err(tac_dev->dev, "jack init failed"); + goto io_init_err; + } + } + } + } + + if (tac_dev->hid_func_data) { + ret = sdca_regmap_write_init(dev, tac_dev->regmap, + tac_dev->hid_func_data); + if (ret) { + dev_err(dev, "hid init table update failed\n"); + goto io_init_err; + } + dev_dbg(dev, "hid init done\n"); + } + + tac_dev->hw_init = true; + + return 0; + +io_init_err: + dev_err(dev, "init writes failed, err=%d", ret); + return ret; +} + +static int tac_update_status(struct sdw_slave *slave, + enum sdw_slave_status status) +{ + struct tac5xx2_prv *tac_dev = dev_get_drvdata(&slave->dev); + struct device *dev = &slave->dev; + bool first = false; + int ret; + + tac_dev->status = status; + if (status == SDW_SLAVE_UNATTACHED) { + tac_dev->hw_init = false; + tac_dev->fw_dl_success = false; + } + + if (tac_dev->hw_init || tac_dev->status != SDW_SLAVE_ATTACHED) { + dev_dbg(dev, "%s: early return, hw_init=%d, status=%d", + __func__, tac_dev->hw_init, tac_dev->status); + return 0; + } + + if (!tac_dev->first_hw_init_done) { + pm_runtime_set_active(tac_dev->dev); + tac_dev->first_hw_init_done = true; + first = true; + } + + pm_runtime_get_noresume(tac_dev->dev); + + regcache_mark_dirty(tac_dev->regmap); + regcache_cache_only(tac_dev->regmap, false); + ret = tac_io_init(&slave->dev, slave, first); + if (ret) { + dev_err(dev, "Device initialization failed: %d\n", ret); + goto err_out; + } + + ret = regcache_sync(tac_dev->regmap); + if (ret) + dev_warn(dev, "Failed to sync regcache after init: %d\n", ret); + +err_out: + pm_runtime_mark_last_busy(tac_dev->dev); + pm_runtime_put_autosuspend(tac_dev->dev); + + return ret; +} + +static int tac5xx2_sdw_read_prop(struct sdw_slave *peripheral) +{ + struct device *dev = &peripheral->dev; + int ret; + + ret = sdw_slave_read_prop(peripheral); + if (ret) { + dev_err(dev, "sdw_slave_read_prop failed: %d", ret); + return ret; + } + + return 0; +} + +static int tac_port_prep(struct sdw_slave *slave, struct sdw_prepare_ch *prep_ch, + enum sdw_port_prep_ops pre_ops) +{ + struct device *dev = &slave->dev; + struct tac5xx2_prv *tac_dev = dev_get_drvdata(dev); + unsigned int val; + int ret; + + if (pre_ops != SDW_OPS_PORT_POST_PREP) + return 0; + + if (!tac_dev->fw_dl_success) + return 0; + + ret = regmap_read(tac_dev->regmap, TAC_DSP_ALGO_STATUS, &val); + if (ret) { + dev_err(dev, "Failed to read algo status: %d\n", ret); + return ret; + } + + if (val != TAC_DSP_ALGO_STATUS_RUNNING) { + dev_dbg(dev, "Algo not running (0x%02x), re-enabling\n", val); + ret = regmap_write(tac_dev->regmap, TAC_DSP_ALGO_STATUS, + TAC_DSP_ALGO_STATUS_RUNNING); + if (ret) { + dev_err(dev, "Failed to re-enable algo: %d\n", ret); + return ret; + } + } + + return 0; +} + +static const struct sdw_slave_ops tac_sdw_ops = { + .read_prop = tac5xx2_sdw_read_prop, + .update_status = tac_update_status, + .interrupt_callback = tac_interrupt_callback, + .port_prep = tac_port_prep, +}; + +static s32 tac_sdw_probe(struct sdw_slave *peripheral, + const struct sdw_device_id *id) +{ + struct sdca_function_data *function_data = NULL; + struct device *dev = &peripheral->dev; + struct tac5xx2_prv *tac_dev; + struct regmap *regmap; + int ret, i; + + tac_dev = devm_kzalloc(dev, sizeof(*tac_dev), GFP_KERNEL); + if (!tac_dev) + return dev_err_probe(dev, -ENOMEM, + "Failed devm_kzalloc"); + + if (peripheral->sdca_data.num_functions > 0) { + dev_dbg(dev, "SDCA functions found: %d", + peripheral->sdca_data.num_functions); + + for (i = 0; i < peripheral->sdca_data.num_functions; i++) { + struct sdca_function_data **func_ptr; + const char *func_name; + + switch (peripheral->sdca_data.function[i].type) { + case SDCA_FUNCTION_TYPE_SMART_AMP: + func_ptr = &tac_dev->sa_func_data; + func_name = "smartamp"; + break; + case SDCA_FUNCTION_TYPE_SMART_MIC: + func_ptr = &tac_dev->sm_func_data; + func_name = "smartmic"; + break; + case SDCA_FUNCTION_TYPE_UAJ: + func_ptr = &tac_dev->uaj_func_data; + func_name = "uaj"; + break; + case SDCA_FUNCTION_TYPE_HID: + func_ptr = &tac_dev->hid_func_data; + func_name = "hid"; + break; + default: + continue; + } + + function_data = devm_kzalloc(dev, sizeof(*function_data), + GFP_KERNEL); + if (!function_data) + return dev_err_probe(dev, -ENOMEM, + "failed to allocate %s function data", + func_name); + function_data->desc = &peripheral->sdca_data.function[i]; + ret = sdca_parse_function(dev, peripheral, function_data); + if (!ret) + *func_ptr = function_data; + else + devm_kfree(dev, function_data); + } + } + + dev_dbg(dev, "SDCA functions enabled: SA=%s SM=%s UAJ=%s HID=%s", + tac_dev->sa_func_data ? "yes" : "no", + tac_dev->sm_func_data ? "yes" : "no", + tac_dev->uaj_func_data ? "yes" : "no", + tac_dev->hid_func_data ? "yes" : "no"); + + tac_dev->dev = dev; + tac_dev->sdw_peripheral = peripheral; + tac_dev->hw_init = false; + tac_dev->first_hw_init_done = false; + tac_dev->part_id = id->part_id; + dev_set_drvdata(dev, tac_dev); + + regmap = devm_regmap_init_sdw_mbq_cfg(&peripheral->dev, peripheral, + &tac_regmap, &tac_mbq_cfg); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed devm_regmap_init_sdw\n"); + + regcache_cache_only(regmap, true); + tac_dev->regmap = regmap; + tac_dev->jack_type = 0; + init_completion(&tac_dev->fw_caching_complete); + + if (tac_has_dsp_algo(tac_dev)) { + tac_generate_fw_name(peripheral, tac_dev->fw_binaryname, + sizeof(tac_dev->fw_binaryname)); + + ret = tac_load_and_cache_firmware_async(tac_dev); + if (ret) { + complete_all(&tac_dev->fw_caching_complete); + dev_dbg(dev, "failed to load fw: %d, use rom mode\n", ret); + } + } else { + complete_all(&tac_dev->fw_caching_complete); + } + + ret = tac_init(tac_dev); + if (ret) + return dev_err_probe(dev, ret, + "failed to initialize tac device\n"); + + /* set autosuspend parameters */ + pm_runtime_set_autosuspend_delay(dev, 3000); + pm_runtime_use_autosuspend(dev); + + /* make sure the device does not suspend immediately */ + pm_runtime_mark_last_busy(dev); + + pm_runtime_enable(dev); + /* the device is still not in active */ + + return 0; +} + +static void tac_sdw_remove(struct sdw_slave *peripheral) +{ + struct tac5xx2_prv *tac_dev = dev_get_drvdata(&peripheral->dev); + + pm_runtime_disable(tac_dev->dev); + + dev_set_drvdata(&peripheral->dev, NULL); +} + +static const struct sdw_device_id tac_sdw_id[] = { + SDW_SLAVE_ENTRY(0x0102, 0x5572, 0), + SDW_SLAVE_ENTRY(0x0102, 0x5672, 0), + SDW_SLAVE_ENTRY(0x0102, 0x5682, 0), + SDW_SLAVE_ENTRY(0x0102, 0x2883, 0), + {}, +}; +MODULE_DEVICE_TABLE(sdw, tac_sdw_id); + +static struct sdw_driver tac_sdw_driver = { + .driver = { + .name = "slave-tac5xx2", + .pm = pm_ptr(&tac5xx2_sdca_pm), + }, + .probe = tac_sdw_probe, + .remove = tac_sdw_remove, + .ops = &tac_sdw_ops, + .id_table = tac_sdw_id, +}; +module_sdw_driver(tac_sdw_driver); + +MODULE_IMPORT_NS("SND_SOC_SDCA"); +MODULE_AUTHOR("Texas Instruments Inc."); +MODULE_DESCRIPTION("ASoC TAC5XX2 SoundWire Driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/tac5xx2.h b/sound/soc/codecs/tac5xx2.h new file mode 100644 index 00000000000000..eed8e6cf3498b5 --- /dev/null +++ b/sound/soc/codecs/tac5xx2.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * ALSA SoC Texas Instruments TAC5XX2 Audio Smart Amplifier + * + * Copyright (C) 2025 Texas Instruments Incorporated + * https://www.ti.com + * + * This the header file for TAC5XX2 family of devices + * which includes TAC5572, TAC5672, TAC5682 and TAS2883 + * + * Author: Niranjan H Y + */ +#ifndef __RGL_TAC5XX2_H__ +#define __RGL_TAC5XX2_H__ + +/* for soundwire */ +#define TAC_REG_SDW(book, page, reg) (((book) * 256 * 128) + \ + 0x3000000 + ((page) * 128) + (reg)) + +/* page 0 registers */ +#define TAC_SW_RESET TAC_REG_SDW(0, 0, 1) +#define TAC_SLEEP_MODEZ TAC_REG_SDW(0, 0, 2) +#define TAC_FEATURE_PDZ TAC_REG_SDW(0, 0, 3) +#define TAC_TX_CH_EN TAC_REG_SDW(0, 0, 4) +#define TAC_RX_CH_PD TAC_REG_SDW(0, 0, 5) +#define TAC_SHDNZ_CFG TAC_REG_SDW(0, 0, 6) +#define TAC_MISC_CFG0 TAC_REG_SDW(0, 0, 7) +#define TAC_MISC_CFG1 TAC_REG_SDW(0, 0, 8) +#define TAC_GPIO1_CFG0 TAC_REG_SDW(0, 0, 9) +#define TAC_GPIO2_CFG0 TAC_REG_SDW(0, 0, 10) +#define TAC_GPIO3_CFG0 TAC_REG_SDW(0, 0, 11) +#define TAC_GPIO4_CFG0 TAC_REG_SDW(0, 0, 12) +#define TAC_GPIO5_CFG0 TAC_REG_SDW(0, 0, 13) +#define TAC_GPIO6_CFG0 TAC_REG_SDW(0, 0, 14) +#define TAC_INTF_CFG1 TAC_REG_SDW(0, 0, 15) +#define TAC_INTF_CFG5 TAC_REG_SDW(0, 0, 16) +#define TAC_PASI_BCLK_CFG0 TAC_REG_SDW(0, 0, 17) +#define TAC_PASI_FSYNC_CFG0 TAC_REG_SDW(0, 0, 18) +#define TAC_PASI_DIN1_CFG0 TAC_REG_SDW(0, 0, 19) +#define TAC_PASI_DIN2_CFG0 TAC_REG_SDW(0, 0, 20) +#define TAC_PDM_DIN1_CFG0 TAC_REG_SDW(0, 0, 21) +#define TAC_PDM_DIN2_CFG0 TAC_REG_SDW(0, 0, 22) +#define TAC_MCLK_SEL TAC_REG_SDW(0, 0, 23) +#define TAC_I2C2_CFG0 TAC_REG_SDW(0, 0, 24) +#define TAC_SDW_IO_CFG0 TAC_REG_SDW(0, 0, 25) +#define TAC_SDW_CLK_CFG0 TAC_REG_SDW(0, 0, 26) +#define TAC_PASI_CFG0 TAC_REG_SDW(0, 0, 27) +#define TAC_PASI_CFG1 TAC_REG_SDW(0, 0, 28) +#define TAC_PASI_TX_CFG0 TAC_REG_SDW(0, 0, 29) +#define TAC_PASI_TX_CFG1 TAC_REG_SDW(0, 0, 30) +#define TAC_PASI_TX_CFG2 TAC_REG_SDW(0, 0, 31) +#define TAC_PASI_TX_CFG3 TAC_REG_SDW(0, 0, 32) +#define TAC_PASI_TX_CH1_CFG0 TAC_REG_SDW(0, 0, 33) +#define TAC_PASI_TX_CH2_CFG0 TAC_REG_SDW(0, 0, 34) +#define TAC_PASI_TX_CH3_CFG0 TAC_REG_SDW(0, 0, 35) +#define TAC_PASI_TX_CH4_CFG0 TAC_REG_SDW(0, 0, 36) +#define TAC_PASI_TX_CH5_CFG0 TAC_REG_SDW(0, 0, 37) +#define TAC_PASI_TX_CH6_CFG0 TAC_REG_SDW(0, 0, 38) +#define TAC_PASI_TX_CH7_CFG0 TAC_REG_SDW(0, 0, 39) +#define TAC_PASI_TX_CH8_CFG0 TAC_REG_SDW(0, 0, 40) +#define TAC_PASI_RX_CFG0 TAC_REG_SDW(0, 0, 41) +#define TAC_PASI_RX_CFG1 TAC_REG_SDW(0, 0, 42) +#define TAC_PASI_RX_CFG2 TAC_REG_SDW(0, 0, 43) +#define TAC_PASI_RX_CH1_CFG0 TAC_REG_SDW(0, 0, 44) +#define TAC_PASI_RX_CH2_CFG0 TAC_REG_SDW(0, 0, 45) +#define TAC_PASI_RX_CH3_CFG0 TAC_REG_SDW(0, 0, 46) +#define TAC_PASI_RX_CH4_CFG0 TAC_REG_SDW(0, 0, 47) +#define TAC_PASI_RX_CH5_CFG0 TAC_REG_SDW(0, 0, 48) +#define TAC_PASI_RX_CH6_CFG0 TAC_REG_SDW(0, 0, 49) +#define TAC_PASI_RX_CH7_CFG0 TAC_REG_SDW(0, 0, 50) +#define TAC_PASI_RX_CH8_CFG0 TAC_REG_SDW(0, 0, 51) +#define TAC_ADC_CH1_CFG0 TAC_REG_SDW(0, 0, 52) +#define TAC_ADC_DVOL_CFG0 TAC_REG_SDW(0, 0, 53) +#define TAC_ADC_CH1_FGAIN TAC_REG_SDW(0, 0, 54) +#define TAC_ADC_CH1_CFG1 TAC_REG_SDW(0, 0, 55) +#define TAC_ADC_CH2_CFG0 TAC_REG_SDW(0, 0, 57) +#define TAC_ADC_DVOL_CFG1 TAC_REG_SDW(0, 0, 58) +#define TAC_ADC_CH2_FGAIN TAC_REG_SDW(0, 0, 59) +#define TAC_ADC_CH2_CFG1 TAC_REG_SDW(0, 0, 60) +#define TAC_ADC_CFG1 TAC_REG_SDW(0, 0, 62) +#define TAC_PDM_CH1_DVOL TAC_REG_SDW(0, 0, 63) +#define TAC_PDM_CH1_FGAIN TAC_REG_SDW(0, 0, 64) +#define TAC_PDM_CH1_CFG0 TAC_REG_SDW(0, 0, 65) +#define TAC_PDM_CH2_DVOL TAC_REG_SDW(0, 0, 67) +#define TAC_PDM_CH2_FGAIN TAC_REG_SDW(0, 0, 68) +#define TAC_PDM_CH2_CFG2 TAC_REG_SDW(0, 0, 69) +#define TAC_PDM_CH3_DVOL TAC_REG_SDW(0, 0, 71) +#define TAC_PDM_CH3_FGAIN TAC_REG_SDW(0, 0, 72) +#define TAC_PDM_CH3_CFG0 TAC_REG_SDW(0, 0, 73) +#define TAC_PDM_CH4_DVOL TAC_REG_SDW(0, 0, 75) +#define TAC_PDM_CH4_FGAIN TAC_REG_SDW(0, 0, 76) +#define TAC_PDM_CH4_CFG0 TAC_REG_SDW(0, 0, 77) +#define TAC_MICBIAS_CFG0 TAC_REG_SDW(0, 0, 79) +#define TAC_MICPREAMP_CFG TAC_REG_SDW(0, 0, 80) +#define TAC_MICBIAS_CFG1 TAC_REG_SDW(0, 0, 81) +#define TAC_CLASSD_CH1_DVOL TAC_REG_SDW(0, 0, 82) +#define TAC_CLASSD_CH1_FGAIN TAC_REG_SDW(0, 0, 83) +#define TAC_CLASSD_CH2_DVOL TAC_REG_SDW(0, 0, 85) +#define TAC_CLASSD_CH2_FGAIN TAC_REG_SDW(0, 0, 86) +#define TAC_GCHP_CH1_DVOL TAC_REG_SDW(0, 0, 88) +#define TAC_GCHP_CH1_FGAIN TAC_REG_SDW(0, 0, 89) +#define TAC_GCHP_CH2_DVOL TAC_REG_SDW(0, 0, 91) +#define TAC_GCHP_CH2_FGAIN TAC_REG_SDW(0, 0, 92) +#define TAC_AMP_LVL_CFG0 TAC_REG_SDW(0, 0, 94) +#define TAC_AMP_LVL_CFG1 TAC_REG_SDW(0, 0, 95) +#define TAC_AMP_LVL_CFG2 TAC_REG_SDW(0, 0, 96) +#define TAC_AMP_LVL_CFG3 TAC_REG_SDW(0, 0, 97) +#define TAC_EFF_MODE_CFG0 TAC_REG_SDW(0, 0, 98) +#define TAC_EFF_MODE_CFG1 TAC_REG_SDW(0, 0, 99) +#define TAC_CLASSD_CFG0 TAC_REG_SDW(0, 0, 100) +#define TAC_CLASSD_CFG1 TAC_REG_SDW(0, 0, 101) +#define TAC_CLASSD_CFG3 TAC_REG_SDW(0, 0, 102) +#define TAC_CLASSD_CFG4 TAC_REG_SDW(0, 0, 103) +#define TAC_CLASSD_CFG5 TAC_REG_SDW(0, 0, 104) +#define TAC_CLASSD_CFG6 TAC_REG_SDW(0, 0, 105) +#define TAC_CLASSD_CFG8 TAC_REG_SDW(0, 0, 106) +#define TAC_ISNS_CFG TAC_REG_SDW(0, 0, 107) +#define TAC_DSP_CFG0 TAC_REG_SDW(0, 0, 108) +#define TAC_DSP_CFG1 TAC_REG_SDW(0, 0, 109) +#define TAC_DSP_CFG2 TAC_REG_SDW(0, 0, 110) +#define TAC_DSP_CFG3 TAC_REG_SDW(0, 0, 111) +#define TAC_JACK_DET_CFG1 TAC_REG_SDW(0, 0, 112) +#define TAC_JACK_DET_CFG2 TAC_REG_SDW(0, 0, 113) +#define TAC_JACK_DET_CFG3 TAC_REG_SDW(0, 0, 114) +#define TAC_JACK_DET_CFG4 TAC_REG_SDW(0, 0, 115) +#define TAC_JACK_DET_CFG7 TAC_REG_SDW(0, 0, 116) +#define TAC_UJ_IMPEDANCE_L TAC_REG_SDW(0, 0, 117) +#define TAC_UJ_IMPEDANCE_R TAC_REG_SDW(0, 0, 118) +#define UJ_IMPEDANCE_L TAC_REG_SDW(0, 0, 119) +#define UJ_IMPEDANCE_R TAC_REG_SDW(0, 0, 120) +#define TAC_GP_ANA_STS TAC_REG_SDW(0, 0, 123) +#define TAC_DEV_ID TAC_REG_SDW(0, 0, 124) +#define TAC_REV_ID TAC_REG_SDW(0, 0, 125) +#define TAC_I2C_CKSUM TAC_REG_SDW(0, 0, 126) +#define TAC_BOOK TAC_REG_SDW(0, 0, 127) + +#define TAC_INT_CFG TAC_REG_SDW(0, 2, 1) +#define TAC_INT_CFG_CLR_REG BIT(3) + +/* smartamp function */ +#define TAC_FUNCTION_ID_SA 0x1 + +#define TAC_SDCA_ENT_ENT0 0x0 +#define TAC_SDCA_ENT_PPU21 0x1 +#define TAC_SDCA_ENT_FU21 0x2 +#define TAC_SDCA_ENT_FU26 0x3 +#define TAC_SDCA_ENT_XU22 0x4 +#define TAC_SDCA_ENT_CS24 0x5 +#define TAC_SDCA_ENT_CS21 0x6 +#define TAC_SDCA_ENT_CS25 0x7 +#define TAC_SDCA_ENT_CS26 0x8 +#define TAC_SDCA_ENT_CS28 0x9 +#define TAC_SDCA_ENT_PPU26 0xa +#define TAC_SDCA_ENT_FU23 0xb +#define TAC_SDCA_ENT_PDE23 0xc +#define TAC_SDCA_ENT_TG23 0x12 +#define TAC_SDCA_ENT_IT21 0x13 +#define TAC_SDCA_ENT_IT29 0x14 +#define TAC_SDCA_ENT_IT26 0x15 +#define TAC_SDCA_ENT_IT28 0x16 +#define TAC_SDCA_ENT_OT24 0x17 +#define TAC_SDCA_ENT_OT23 0x18 +#define TAC_SDCA_ENT_OT25 0x19 +#define TAC_SDCA_ENT_OT28 0x1a +#define TAC_SDCA_ENT_OT27 0x1c +#define TAC_SDCA_ENT_SPE199 0x21 +#define TAC_SDCA_ENT_OT20 0x24 +#define TAC_SDCA_ENT_FU27 0x26 +#define TAC_SDCA_ENT_FU20 0x27 +#define TAC_SDCA_ENT_PDE24 0x2e +#define TAC_SDCA_ENT_PDE27 0x2f +#define TAC_SDCA_ENT_PDE28 0x30 +#define TAC_SDCA_ENT_PDE20 0x31 +#define TAC_SDCA_ENT_SAPU29 0x35 + +/* Control selector definitions */ +#define TAC_SDCA_MASTER_GAIN 0x0B +#define TAC_SDCA_MASTER_MUTE 0x01 +#define TAC_SDCA_CHANNEL_MUTE 0x01 +#define TAC_SDCA_CHANNEL_GAIN 0x02 +#define TAC_SDCA_POSTURENUMBER 0x10 +#define TAC_SDCA_REQUESTED_PS 0x01 +#define TAC_SDCA_ACTUAL_PS 0x10 +#define TAC_SDCA_CHANNEL_VOLUME 0x02 + +/* 2. smart mic function */ +#define TAC_FUNCTION_ID_SM 0x2 + +#define TAC_SDCA_ENT_IT11 0x1 +#define TAC_SDCA_ENT_OT113 0x2 +#define TAC_SDCA_ENT_CS11 0x3 +#define TAC_SDCA_ENT_CS18 0x4 +#define TAC_SDCA_ENT_FU113 0x5 +#define TAC_SDCA_ENT_FU13 0x6 +#define TAC_SDCA_ENT_FU11 0x8 +#define TAC_SDCA_ENT_XU12 0xa +#define TAC_SDCA_ENT_CS113 0xc +#define TAC_SDCA_ENT_CX11 0xf +#define TAC_SDCA_ENT_PDE11 0x12 +#define TAC_SDCA_ENT_PPU11 0x9 + +/* controls */ +#define TAC_SDCA_CTL_USAGE 0x04 +#define TAC_SDCA_CTL_IT_CLUSTER 0x10 +#define TAC_SDCA_CTL_OT_DP_SEL 0x11 +#define TAC_SDCA_CTL_XU_BYPASS 0x01 +/* cx */ +#define TAC_SDCA_CTL_CX_CLK_SEL 0x01 +/* cs */ +#define TAC_SDCA_CTL_CS_CLKVLD 0x02 +#define TAC_SDCA_CTL_CS_SAMP_RATE_IDX 0x10 +/* cs113 end */ +/* ppu */ +#define TAC_SDCA_CTL_PPU_POSTURE_NUM 0x10 + +/* 3. UAJ function */ +#define TAC_FUNCTION_ID_UAJ 0x3 +#define TAC_SDCA_ENT_PDE47 0x35 +#define TAC_SDCA_ENT_PDE34 0x32 +#define TAC_SDCA_ENT_FU41 0x26 /* user */ +#define TAC_SDCA_ENT_IT41 0x07 +#define TAC_SDCA_ENT_XU42 0x2C +#define TAC_SDCA_ENT_CS41 0x30 +#define TAC_SDCA_ENT_OT45 0x0E +#define TAC_SDCA_ENT_IT33 0x03 +#define TAC_SDCA_ENT_OT36 0x0A +#define TAC_SDCA_ENT_FU36 0x28 +#define TAC_SDCA_ENT_CS36 0x2E +#define TAC_SDCA_ENT_GE35 0x3B /* 59 */ + +#define TAC_SDCA_CTL_SEL_MODE 0x1 +#define TAC_SDCA_CTL_DET_MODE 0x2 + +/* 4. HID function */ +#define TAC_FUNCTION_ID_HID 0x4 +#define TAC_SDCA_ENT_HID1 0x1 +/* HID Control Selectors */ +#define TAC_SDCA_CTL_HIDTX_CURRENT_OWNER 0x10 +#define TAC_SDCA_CTL_HIDTX_MESSAGE_OFFSET 0x12 +#define TAC_SDCA_CTL_HIDTX_MESSAGE_LENGTH 0x13 +#define TAC_SDCA_CTL_DETECTED_MODE 0x10 +#define TAC_SDCA_CTL_SELECTED_MODE 0x11 + +#define TAC_BUF_ADDR_HID1 0x44007F80 + +/* DAI interfaces */ +#define TAC5XX2_SPK 0 +#define TAC5XX2_DMIC 2 +#define TAC5XX2_UAJ 3 + +/* Port numbers for DAIs */ +#define TAC_SDW_PORT_NUM_SPK_PLAYBACK 1 +#define TAC_SDW_PORT_NUM_SPK_CAPTURE 2 +#define TAC_SDW_PORT_NUM_DMIC 3 +#define TAC_SDW_PORT_NUM_UAJ_PLAYBACK 4 +#define TAC_SDW_PORT_NUM_UAJ_CAPTURE 7 +#define TAC_SDW_PORT_NUM_IV_SENSE 8 + +#endif diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 6aab6d2b741910..55211266927d1a 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -684,18 +684,33 @@ static int tas2764_read_die_temp(struct tas2764_priv *tas2764, long *result) * As per datasheet, subtract 93 from raw value to get degrees * Celsius. hwmon wants millidegrees. * - * NOTE: The chip will initialise the TAS2764_TEMP register to - * 2.6 *C to avoid triggering temperature protection. Since the - * ADC is powered down during software shutdown, this value will - * persist until the chip is fully powered up (e.g. the PCM it's - * attached to is opened). The ADC will power down again when - * the chip is put back into software shutdown, with the last - * value sampled persisting in the ADC's register. + * NOTE: The TAS2764 datasheet mentions initialising TAS2764_TEMP + * such that the temperature is 2.6 *C, however the register + * is actually initialised to 0. The ADC is also powered down during + * software shutdown. The last sampled temperature will persist + * in the register while the amp is in this power state. */ + if (reg == 0) + return -ENODATA; + *result = (reg - 93) * 1000; return 0; } +static int tas2764_hwmon_is_fault(struct tas2764_priv *tas2764, long *result) +{ + int ret; + long temp; + + ret = tas2764_read_die_temp(tas2764, &temp); + if (ret == -ENODATA) { + *result = true; + return 0; + } + + return ret; +} + static umode_t tas2764_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) @@ -705,6 +720,7 @@ static umode_t tas2764_hwmon_is_visible(const void *data, switch (attr) { case hwmon_temp_input: + case hwmon_temp_fault: return 0444; default: break; @@ -724,6 +740,9 @@ static int tas2764_hwmon_read(struct device *dev, case hwmon_temp_input: ret = tas2764_read_die_temp(tas2764, val); break; + case hwmon_temp_fault: + ret = tas2764_hwmon_is_fault(tas2764, val); + break; default: ret = -EOPNOTSUPP; break; @@ -733,7 +752,7 @@ static int tas2764_hwmon_read(struct device *dev, } static const struct hwmon_channel_info *const tas2764_hwmon_info[] = { - HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_FAULT), NULL }; diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 50501bcbe91678..dbda9f3275351a 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -633,10 +633,27 @@ static int tas2770_read_die_temp(struct tas2770_priv *tas2770, long *result) * value read back from its registers will be the last value sampled * before entering software shutdown. */ + if (reading == 0) + return -ENODATA; + *result = (reading - (93 * 16)) * 1000 / 16; return 0; } +static int tas2770_hwmon_is_fault(struct tas2770_priv *tas2770, long *result) +{ + int ret; + long temp; + + ret = tas2770_read_die_temp(tas2770, &temp); + if (ret == -ENODATA) { + *result = true; + return 0; + } + + return ret; +} + static umode_t tas2770_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) @@ -646,6 +663,7 @@ static umode_t tas2770_hwmon_is_visible(const void *data, switch (attr) { case hwmon_temp_input: + case hwmon_temp_fault: return 0444; default: break; @@ -665,6 +683,9 @@ static int tas2770_hwmon_read(struct device *dev, case hwmon_temp_input: ret = tas2770_read_die_temp(tas2770, val); break; + case hwmon_temp_fault: + ret = tas2770_hwmon_is_fault(tas2770, val); + break; default: ret = -EOPNOTSUPP; break; @@ -674,7 +695,7 @@ static int tas2770_hwmon_read(struct device *dev, } static const struct hwmon_channel_info *const tas2770_hwmon_info[] = { - HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_FAULT), NULL }; diff --git a/sound/soc/codecs/tas2783-sdw.c b/sound/soc/codecs/tas2783-sdw.c index 90008d2d06e2c7..38009168c5a114 100644 --- a/sound/soc/codecs/tas2783-sdw.c +++ b/sound/soc/codecs/tas2783-sdw.c @@ -1310,10 +1310,10 @@ static s32 tas_sdw_probe(struct sdw_slave *peripheral, return dev_err_probe(dev, -ENOMEM, "failed to parse sdca functions"); + function_data->desc = &peripheral->sdca_data.function[i]; + /* Parse the function */ - ret = sdca_parse_function(dev, peripheral, - &peripheral->sdca_data.function[i], - function_data); + ret = sdca_parse_function(dev, peripheral, function_data); if (!ret) tas_dev->sa_func_data = function_data; else diff --git a/sound/soc/codecs/wm_adsp_fw_find_test.c b/sound/soc/codecs/wm_adsp_fw_find_test.c index d0c7fb30a95dca..ae686dc4fa94b7 100644 --- a/sound/soc/codecs/wm_adsp_fw_find_test.c +++ b/sound/soc/codecs/wm_adsp_fw_find_test.c @@ -45,6 +45,34 @@ struct wm_adsp_fw_find_test_params { /* Dummy struct firmware to return from wm_adsp_request_firmware_files */ static const struct firmware wm_adsp_find_test_dummy_firmware; +static void wm_adsp_fw_find_test_release_firmware_files_stub(struct wm_adsp_fw_files *fw) +{ + /* + * fw->wmfw.firmware and fw->coeff.firmware allocated by this KUnit + * test are dummies not allocated by the real request_firmware() call + * so they must not be passed to release_firmware(). + * This function replaces wm_adsp_release_firmware_files(). + */ + + if (!fw) + return; + + kfree(fw->wmfw.filename); + kfree(fw->coeff.filename); + + fw->wmfw.firmware = NULL; + fw->coeff.firmware = NULL; + fw->wmfw.filename = NULL; + fw->coeff.filename = NULL; +} + +static void wm_adsp_free_found_fw(struct kunit *test) +{ + struct wm_adsp_fw_find_test *priv = test->priv; + + wm_adsp_fw_find_test_release_firmware_files_stub(&priv->found_fw); +} + /* Simple lookup of a filename in a list of names */ static int wm_adsp_fw_find_test_firmware_request_simple_stub(const struct firmware **firmware, const char *filename, @@ -97,9 +125,14 @@ static void wm_adsp_fw_find_test_pick_file(struct kunit *test) kunit_activate_static_stub(test, wm_adsp_firmware_request, wm_adsp_fw_find_test_firmware_request_simple_stub); + kunit_activate_static_stub(test, + wm_adsp_release_firmware_files, + wm_adsp_fw_find_test_release_firmware_files_stub); ret = wm_adsp_request_firmware_files(dsp, &priv->found_fw); kunit_deactivate_static_stub(test, wm_adsp_firmware_request); + kunit_deactivate_static_stub(test, wm_adsp_release_firmware_files); + KUNIT_EXPECT_EQ_MSG(test, ret, (params->expect_wmfw || params->expect_bin) ? 0 : -ENOENT, "%s\n", priv->searched_fw_files); @@ -173,10 +206,13 @@ static void wm_adsp_fw_find_test_search_order(struct kunit *test) kunit_activate_static_stub(test, wm_adsp_firmware_request, wm_adsp_fw_find_test_firmware_request_stub); + kunit_activate_static_stub(test, + wm_adsp_release_firmware_files, + wm_adsp_fw_find_test_release_firmware_files_stub); wm_adsp_request_firmware_files(dsp, &priv->found_fw); - kunit_deactivate_static_stub(test, wm_adsp_firmware_request); + kunit_deactivate_static_stub(test, wm_adsp_release_firmware_files); KUNIT_EXPECT_STREQ(test, priv->searched_fw_files, params->expected_searches); @@ -201,6 +237,7 @@ static void wm_adsp_fw_find_test_find_firmware_byindex(struct kunit *test) dsp->cs_dsp.name = "cs1234"; dsp->part = "dsp1"; + for (dsp->fw = 0;; dsp->fw++) { fw_name = wm_adsp_get_fwf_name_by_index(dsp->fw); if (!fw_name) @@ -209,14 +246,22 @@ static void wm_adsp_fw_find_test_find_firmware_byindex(struct kunit *test) kunit_activate_static_stub(test, wm_adsp_firmware_request, wm_adsp_fw_find_test_firmware_request_stub); + kunit_activate_static_stub(test, + wm_adsp_release_firmware_files, + wm_adsp_fw_find_test_release_firmware_files_stub); wm_adsp_request_firmware_files(dsp, &priv->found_fw); + kunit_deactivate_static_stub(test, wm_adsp_firmware_request); + kunit_deactivate_static_stub(test, wm_adsp_release_firmware_files); KUNIT_EXPECT_NOT_NULL_MSG(test, strstr(priv->searched_fw_files, fw_name), "fw#%d Did not find '%s' in '%s'\n", dsp->fw, fw_name, priv->searched_fw_files); + + wm_adsp_free_found_fw(test); + memset(priv->searched_fw_files, 0, sizeof(priv->searched_fw_files)); } } @@ -255,15 +300,7 @@ static int wm_adsp_fw_find_test_case_init(struct kunit *test) static void wm_adsp_fw_find_test_case_exit(struct kunit *test) { - struct wm_adsp_fw_find_test *priv = test->priv; - - /* - * priv->found_wmfw_firmware and priv->found_bin_firmware are - * dummies not allocated by the real request_firmware() call they - * must not be passed to release_firmware(). - */ - kfree(priv->found_fw.wmfw.filename); - kfree(priv->found_fw.coeff.filename); + wm_adsp_free_found_fw(test); } static void wm_adsp_fw_find_test_param_desc(const struct wm_adsp_fw_find_test_params *param, diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index 70a6159430ed39..709543308fe9e9 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -40,6 +40,33 @@ /* Default DAI format without Master and Slave flag */ #define DAI_FMT_BASE (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF) +static const u32 cs42888_rates_48k[] = { + 48000, 96000, 192000, +}; + +static const u32 cs42888_rates_44k[] = { + 44100, 88200, 176400, +}; + +static const u32 cs42888_channels[] = { + 1, 2, 4, 6, 8, +}; + +static const struct snd_pcm_hw_constraint_list cs42888_rate_48k_constraints = { + .list = cs42888_rates_48k, + .count = ARRAY_SIZE(cs42888_rates_48k), +}; + +static const struct snd_pcm_hw_constraint_list cs42888_rate_44k_constraints = { + .list = cs42888_rates_44k, + .count = ARRAY_SIZE(cs42888_rates_44k), +}; + +static const struct snd_pcm_hw_constraint_list cs42888_channel_constraints = { + .list = cs42888_channels, + .count = ARRAY_SIZE(cs42888_channels), +}; + /** * struct codec_priv - CODEC private data * @mclk: Main clock of the CODEC @@ -48,6 +75,9 @@ * @mclk_id: MCLK (or main clock) id for set_sysclk() * @fll_id: FLL (or secordary clock) id for set_sysclk() * @pll_id: PLL id for set_pll() + * @pll_ratio_s24: PLL output ratio for S24_LE format (PLL_freq = sample_rate × ratio) + * Default is 384, but some codecs (e.g., WM8904) require lower values + * to stay within PLL frequency limits */ struct codec_priv { struct clk *mclk; @@ -56,6 +86,7 @@ struct codec_priv { u32 mclk_id; int fll_id; int pll_id; + int pll_ratio_s24; }; /** @@ -87,12 +118,15 @@ struct cpu_priv { * @codec_priv: CODEC private data * @cpu_priv: CPU private data * @card: ASoC card structure + * @constraint_rates: array of supported rates + * @constraint_channels: array of supported channels * @streams: Mask of current active streams * @sample_rate: Current sample rate * @sample_format: Current sample format * @asrc_rate: ASRC sample rate used by Back-Ends * @asrc_format: ASRC sample format used by Back-Ends * @dai_fmt: DAI format between CPU and CODEC + * @exclude_format: excluded format; * @name: Card name */ @@ -104,12 +138,15 @@ struct fsl_asoc_card_priv { struct codec_priv codec_priv[2]; struct cpu_priv cpu_priv; struct snd_soc_card card; + const struct snd_pcm_hw_constraint_list *constraint_rates; + const struct snd_pcm_hw_constraint_list *constraint_channels; u8 streams; u32 sample_rate; snd_pcm_format_t sample_format; u32 asrc_rate; snd_pcm_format_t asrc_format; u32 dai_fmt; + u64 exclude_format; char name[32]; }; @@ -222,7 +259,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream, if (codec_priv->pll_id >= 0 && codec_priv->fll_id >= 0) { if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE) - pll_out = priv->sample_rate * 384; + pll_out = priv->sample_rate * codec_priv->pll_ratio_s24; else pll_out = priv->sample_rate * 256; @@ -291,7 +328,47 @@ static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream) return 0; } +static int fsl_asoc_card_startup(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + if (priv->exclude_format && !rtd->dai_link->no_pcm) { + ret = snd_pcm_hw_constraint_mask64(runtime, + SNDRV_PCM_HW_PARAM_FORMAT, + ~priv->exclude_format); + if (ret) + return ret; + } + + if (priv->constraint_channels) { + ret = snd_pcm_hw_constraint_list(runtime, 0, + SNDRV_PCM_HW_PARAM_CHANNELS, + priv->constraint_channels); + if (ret) + return ret; + } + + /* + * Apply rate constraints only to frontend DAI links (no_pcm = 0). + * Skip DPCM backend (no_pcm = 1) as rate is fixed by be_hw_params_fixup() + * and ASRC frontend handles rate conversion. + */ + if (priv->constraint_rates && !rtd->dai_link->no_pcm) { + ret = snd_pcm_hw_constraint_list(runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + priv->constraint_rates); + if (ret) + return ret; + } + + return 0; +} + static const struct snd_soc_ops fsl_asoc_card_ops = { + .startup = fsl_asoc_card_startup, .hw_params = fsl_asoc_card_hw_params, .hw_free = fsl_asoc_card_hw_free, }; @@ -742,6 +819,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) for (codec_idx = 0; codec_idx < 2; codec_idx++) { priv->codec_priv[codec_idx].fll_id = -1; priv->codec_priv[codec_idx].pll_id = -1; + priv->codec_priv[codec_idx].pll_ratio_s24 = 384; } /* Diversify the card configurations */ @@ -753,6 +831,14 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT; priv->cpu_priv.slot_width = 32; priv->dai_fmt |= SND_SOC_DAIFMT_CBC_CFC; + priv->constraint_channels = &cs42888_channel_constraints; + if (priv->codec_priv[0].mclk_freq % 12288000 == 0) + priv->constraint_rates = &cs42888_rate_48k_constraints; + else if (priv->codec_priv[0].mclk_freq % 11289600 == 0) + priv->constraint_rates = &cs42888_rate_44k_constraints; + else + dev_warn(&pdev->dev, "Unknown MCLK frequency %lu, no rate constraints\n", + priv->codec_priv[0].mclk_freq); } else if (of_device_is_compatible(np, "fsl,imx-audio-cs427x")) { codec_dai_name[0] = "cs4271-hifi"; priv->codec_priv[0].mclk_id = CS427x_SYSCLK_MCLK; @@ -779,11 +865,30 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->codec_priv[0].fll_id = WM8962_SYSCLK_FLL; priv->codec_priv[0].pll_id = WM8962_FLL; priv->dai_fmt |= SND_SOC_DAIFMT_CBP_CFP; + /* + * WM8962 has same BCLK generation limitations as WM8960. + * See WM8960 section for detailed explanation. + */ + if (of_node_name_eq(cpu_np, "sai")) + priv->exclude_format = SNDRV_PCM_FMTBIT_S20_3LE; } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) { codec_dai_name[0] = "wm8960-hifi"; priv->codec_priv[0].fll_id = WM8960_SYSCLK_AUTO; priv->codec_priv[0].pll_id = WM8960_SYSCLK_AUTO; priv->dai_fmt |= SND_SOC_DAIFMT_CBP_CFP; + /* + * WM8960 in master mode cannot generate exact 1.92 MHz BCLK + * required for S20_3LE (48kHz × 2ch × 20bit). Closest available + * is 2.048 MHz (SYSCLK/6), which causes right channel corruption. + * + * In SAI master mode, SAI derive BCLK from MCLK using integer + * dividers only. S20_3LE requires non-integer divider ratios + * with standard MCLK frequencies. For example, 48kHz stereo + * needs 1.920 MHz BCLK, which requires a divider of 6.4 from + * 12.288 MHz MCLK (not an integer). + */ + if (of_node_name_eq(cpu_np, "sai")) + priv->exclude_format = SNDRV_PCM_FMTBIT_S20_3LE; } else if (of_device_is_compatible(np, "fsl,imx-audio-ac97")) { codec_dai_name[0] = "ac97-hifi"; priv->dai_fmt = SND_SOC_DAIFMT_AC97; @@ -835,6 +940,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->codec_priv[0].mclk_id = WM8904_FLL_MCLK; priv->codec_priv[0].fll_id = WM8904_CLK_FLL; priv->codec_priv[0].pll_id = WM8904_FLL_MCLK; + priv->codec_priv[0].pll_ratio_s24 = 192; priv->dai_fmt |= SND_SOC_DAIFMT_CBP_CFP; } else if (of_device_is_compatible(np, "fsl,imx-audio-spdif")) { ret = fsl_asoc_card_spdif_init(codec_np, cpu_np, codec_dai_name, priv); @@ -989,6 +1095,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) if (asrc_pdev) { /* DPCM DAI Links only if ASRC exists */ + priv->dai_link[1].dpcm_merged_chan = 1; + priv->dai_link[1].ignore_pmdown_time = 1; priv->dai_link[1].cpus->of_node = asrc_np; priv->dai_link[1].platforms->of_node = asrc_np; for_each_link_codecs((&(priv->dai_link[2])), codec_idx, codec_comp) { @@ -998,6 +1106,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) } priv->dai_link[2].cpus->of_node = cpu_np; priv->dai_link[2].dai_fmt = priv->dai_fmt; + priv->dai_link[2].ignore_pmdown_time = 1; priv->card.num_links = 3; ret = of_property_read_u32(asrc_np, "fsl,asrc-rate", diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c index ee16cf681488c7..6677d3bf36ec27 100644 --- a/sound/soc/fsl/fsl_xcvr.c +++ b/sound/soc/fsl/fsl_xcvr.c @@ -228,10 +228,14 @@ static int fsl_xcvr_capds_put(struct snd_kcontrol *kcontrol, { struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol); struct fsl_xcvr *xcvr = snd_soc_dai_get_drvdata(dai); + int changed; - memcpy(xcvr->cap_ds, ucontrol->value.bytes.data, FSL_XCVR_CAPDS_SIZE); + changed = memcmp(xcvr->cap_ds, ucontrol->value.bytes.data, + sizeof(xcvr->cap_ds)) != 0; + memcpy(xcvr->cap_ds, ucontrol->value.bytes.data, + sizeof(xcvr->cap_ds)); - return 0; + return changed; } static struct snd_kcontrol_new fsl_xcvr_earc_capds_kctl = { @@ -1040,10 +1044,15 @@ static int fsl_xcvr_tx_cs_put(struct snd_kcontrol *kcontrol, { struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol); struct fsl_xcvr *xcvr = snd_soc_dai_get_drvdata(dai); + int changed; - memcpy(xcvr->tx_iec958.status, ucontrol->value.iec958.status, 24); + changed = memcmp(xcvr->tx_iec958.status, + ucontrol->value.iec958.status, + sizeof(xcvr->tx_iec958.status)) != 0; + memcpy(xcvr->tx_iec958.status, ucontrol->value.iec958.status, + sizeof(xcvr->tx_iec958.status)); - return 0; + return changed; } static struct snd_kcontrol_new fsl_xcvr_rx_ctls[] = { diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c index 4879f79aef292b..4aa0cf49b03359 100644 --- a/sound/soc/intel/boards/bytcr_wm5102.c +++ b/sound/soc/intel/boards/bytcr_wm5102.c @@ -170,6 +170,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, ret = byt_wm5102_prepare_and_enable_pll1(codec_dai, 48000); if (ret) { dev_err(card->dev, "Error setting codec sysclk: %d\n", ret); + clk_disable_unprepare(priv->mclk); return ret; } } else { diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c index 8e5670e590ed75..eddb179eaa210a 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5672.c +++ b/sound/soc/intel/boards/cht_bsw_rt5672.c @@ -63,13 +63,11 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, } if (SND_SOC_DAPM_EVENT_ON(event)) { - if (ctx->mclk) { - ret = clk_prepare_enable(ctx->mclk); - if (ret < 0) { - dev_err(card->dev, - "could not configure MCLK state: %d\n", ret); - return ret; - } + ret = clk_prepare_enable(ctx->mclk); + if (ret < 0) { + dev_err(card->dev, + "could not configure MCLK state: %d\n", ret); + return ret; } /* set codec PLL source to the 19.2MHz platform clock (MCLK) */ @@ -77,8 +75,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, CHT_PLAT_CLK_3_HZ, 48000 * 512); if (ret < 0) { dev_err(card->dev, "can't set codec pll: %d\n", ret); - if (ctx->mclk) - clk_disable_unprepare(ctx->mclk); + clk_disable_unprepare(ctx->mclk); return ret; } @@ -87,8 +84,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, 48000 * 512, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(card->dev, "can't set codec sysclk: %d\n", ret); - if (ctx->mclk) - clk_disable_unprepare(ctx->mclk); + clk_disable_unprepare(ctx->mclk); return ret; } } else { @@ -104,8 +100,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, return ret; } - if (ctx->mclk) - clk_disable_unprepare(ctx->mclk); + clk_disable_unprepare(ctx->mclk); } return 0; } @@ -244,28 +239,25 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); rt5670_set_jack_detect(component, &ctx->headset); - if (ctx->mclk) { - /* - * The firmware might enable the clock at - * boot (this information may or may not - * be reflected in the enable clock register). - * To change the rate we must disable the clock - * first to cover these cases. Due to common - * clock framework restrictions that do not allow - * to disable a clock that has not been enabled, - * we need to enable the clock first. - */ - ret = clk_prepare_enable(ctx->mclk); - if (!ret) - clk_disable_unprepare(ctx->mclk); - ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ); + /* + * The firmware might enable the clock at boot (this information + * may or may not be reflected in the enable clock register). + * To change the rate we must disable the clock first to cover + * these cases. Due to Common Clock Framework restrictions that + * do not allow to disable a clock that has not been enabled, we + * need to enable the clock first. + */ + ret = clk_prepare_enable(ctx->mclk); + if (!ret) + clk_disable_unprepare(ctx->mclk); - if (ret) { - dev_err(runtime->dev, "unable to set MCLK rate\n"); - return ret; - } + ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ); + if (ret) { + dev_err(runtime->dev, "unable to set MCLK rate\n"); + return ret; } + return 0; } diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c index 72c35e73078e3f..2e4222456f27f5 100644 --- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c @@ -122,6 +122,42 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = { .group_id = 1, }; +static const struct snd_soc_acpi_endpoint tac5xx2_endpoints[] = { + { /* Playback Endpoint */ + .num = 0, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* Mic Capture Endpoint */ + .num = 1, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* UAJ-HP with Mic Endpoint */ + .num = 2, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, +}; + +static const struct snd_soc_acpi_endpoint tas2883_endpoints[] = { + { /* Playback Endpoint */ + .num = 0, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* Mic Capture Endpoint */ + .num = 1, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, +}; + static const struct snd_soc_acpi_endpoint rt712_endpoints[] = { { .num = 0, @@ -1011,6 +1047,33 @@ static const struct snd_soc_acpi_adr_device cs42l42_0_adr[] = { } }; +static const struct snd_soc_acpi_adr_device tac5572_0_adr[] = { + { + .adr = 0x0000300102557201ull, + .num_endpoints = ARRAY_SIZE(tac5xx2_endpoints), + .endpoints = tac5xx2_endpoints, + .name_prefix = "tac5572" + } +}; + +static const struct snd_soc_acpi_adr_device tac5672_0_adr[] = { + { + .adr = 0x0000300102567201ull, + .num_endpoints = ARRAY_SIZE(tac5xx2_endpoints), + .endpoints = tac5xx2_endpoints, + .name_prefix = "tac5672" + } +}; + +static const struct snd_soc_acpi_adr_device tac5682_0_adr[] = { + { + .adr = 0x0000300102568201ull, + .num_endpoints = ARRAY_SIZE(tac5xx2_endpoints), + .endpoints = tac5xx2_endpoints, + .name_prefix = "tac5682" + } +}; + static const struct snd_soc_acpi_adr_device tas2783_0_adr[] = { { .adr = 0x00003c0102000001ull, @@ -1035,9 +1098,45 @@ static const struct snd_soc_acpi_adr_device tas2783_0_adr[] = { .num_endpoints = 1, .endpoints = &spk_r_endpoint, .name_prefix = "tas2783-4" + }, +}; + +static const struct snd_soc_acpi_adr_device tas2883_0_adr[] = { + { + .adr = 0x0000300102288301ull, + .num_endpoints = ARRAY_SIZE(tas2883_endpoints), + .endpoints = tas2883_endpoints, + .name_prefix = "tas2883" } }; +static const struct snd_soc_acpi_link_adr tac5572_l0[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(tac5572_0_adr), + .adr_d = tac5572_0_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr tac5672_l0[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(tac5672_0_adr), + .adr_d = tac5672_0_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr tac5682_l0[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(tac5682_0_adr), + .adr_d = tac5682_0_adr, + }, + {} +}; + static const struct snd_soc_acpi_link_adr tas2783_link0[] = { { .mask = BIT(0), @@ -1047,6 +1146,15 @@ static const struct snd_soc_acpi_link_adr tas2783_link0[] = { {} }; +static const struct snd_soc_acpi_link_adr tas2883_l0[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(tas2883_0_adr), + .adr_d = tas2883_0_adr, + }, + {} +}; + static const struct snd_soc_acpi_link_adr cs42l42_link0_max98363_link2[] = { /* Expected order: jack -> amp */ { @@ -1208,12 +1316,36 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = { .drv_name = "sof_sdw", .sof_tplg_filename = "sof-mtl-rt715-rt711-rt1308-mono.tplg", }, + { + .link_mask = BIT(0), + .links = tac5572_l0, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-mtl-tac5572.tplg", + }, + { + .link_mask = BIT(0), + .links = tac5672_l0, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-mtl-tac5672.tplg", + }, + { + .link_mask = BIT(0), + .links = tac5682_l0, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-mtl-tac5682.tplg", + }, { .link_mask = BIT(0), .links = tas2783_link0, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-mtl-tas2783.tplg", }, + { + .link_mask = BIT(0), + .links = tas2883_l0, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-mtl-tas2883.tplg", + }, { .link_mask = GENMASK(3, 0), .links = mtl_rt713_l0_rt1316_l12_rt1713_l3, diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 3a1e1fa3fe5cc7..4af7bbb58010d8 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -26,6 +26,16 @@ config SND_SOC_MT2701_CS42448 Select Y if you have such device. If unsure select "N". +config SND_SOC_MT2701_HDMI + tristate "ASoC Audio driver for MT2701 with on-chip HDMI codec" + depends on SND_SOC_MT2701 + select SND_SOC_HDMI_CODEC + help + This adds the ASoC machine driver for MediaTek MT2701 and + MT7623N boards routing the AFE I2S back-end to the on-chip + HDMI transmitter via the generic HDMI codec. + If unsure select "N". + config SND_SOC_MT2701_WM8960 tristate "ASoc Audio driver for MT2701 with WM8960 codec" depends on SND_SOC_MT2701 && I2C @@ -353,4 +363,34 @@ config SND_SOC_MT8365_MT6357 Select Y if you have such device. If unsure select "N". +config SND_SOC_MT8196 + tristate "ASoC support for Mediatek MT8196 chip" + depends on ARCH_MEDIATEK + select SND_SOC_MEDIATEK + help + This adds ASoC driver for Mediatek MT8196 boards + that can be used with other codecs. + Select Y if you have such device. + If unsure select "N". + +config SND_SOC_MT8196_NAU8825 + tristate "ASoc Audio driver for MT8196 with NAU8825 and I2S codec" + depends on SND_SOC_MT8196 + depends on I2C + select SND_SOC_HDMI_CODEC + select SND_SOC_DMIC + select SND_SOC_NAU8315 + select SND_SOC_NAU8825 + select SND_SOC_RT5645 + select SND_SOC_RT5682_I2C + select SND_SOC_RT5682S + select SND_SOC_TAS2781_COMLIB + select SND_SOC_TAS2781_FMWLIB + select SND_SOC_TAS2781_I2C + help + This adds support for ASoC machine driver for MediaTek MT8196 + boards with the NAU8825 and other I2S audio codecs. + Select Y if you have such device. + If unsure select "N". + endmenu diff --git a/sound/soc/mediatek/Makefile b/sound/soc/mediatek/Makefile index 7cd67bce92e9a4..a6815a3c598858 100644 --- a/sound/soc/mediatek/Makefile +++ b/sound/soc/mediatek/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_SND_SOC_MT8192) += mt8192/ obj-$(CONFIG_SND_SOC_MT8195) += mt8195/ obj-$(CONFIG_SND_SOC_MT8365) += mt8365/ obj-$(CONFIG_SND_SOC_MT8189) += mt8189/ +obj-$(CONFIG_SND_SOC_MT8196) += mt8196/ diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c index f2b39fc9ec81e5..477284e12396d8 100644 --- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c +++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c @@ -87,29 +87,49 @@ snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component, const struct mtk_base_memif_data *memif_data = memif->data; struct regmap *regmap = afe->regmap; struct device *dev = afe->dev; - int reg_ofs_base = memif_data->reg_ofs_base; - int reg_ofs_cur = memif_data->reg_ofs_cur; - unsigned int hw_ptr = 0, hw_base = 0; - int ret, pcm_ptr_bytes; - - ret = regmap_read(regmap, reg_ofs_cur, &hw_ptr); - if (ret || hw_ptr == 0) { - dev_err(dev, "%s hw_ptr err\n", __func__); - pcm_ptr_bytes = 0; - goto POINTER_RETURN_FRAMES; + unsigned int hw_ptr_lower32 = 0, hw_ptr_upper32 = 0; + unsigned int hw_base_lower32 = 0, hw_base_upper32 = 0; + unsigned long long hw_ptr = 0, hw_base = 0; + int ret; + unsigned long long pcm_ptr_bytes = 0; + + ret = regmap_read(regmap, memif_data->reg_ofs_cur, &hw_ptr_lower32); + if (ret) { + dev_err(dev, "%s hw_ptr_lower32 err\n", __func__); + return 0; } - ret = regmap_read(regmap, reg_ofs_base, &hw_base); - if (ret || hw_base == 0) { - dev_err(dev, "%s hw_ptr err\n", __func__); - pcm_ptr_bytes = 0; - goto POINTER_RETURN_FRAMES; + if (memif_data->reg_ofs_cur_msb) { + ret = regmap_read(regmap, memif_data->reg_ofs_cur_msb, &hw_ptr_upper32); + if (ret) { + dev_err(dev, "%s hw_ptr_upper32 err\n", __func__); + return 0; + } } - pcm_ptr_bytes = hw_ptr - hw_base; + ret = regmap_read(regmap, memif_data->reg_ofs_base, &hw_base_lower32); + if (ret) { + dev_err(dev, "%s hw_base_lower32 err\n", __func__); + return 0; + } + if (memif_data->reg_ofs_base_msb) { + ret = regmap_read(regmap, memif_data->reg_ofs_base_msb, &hw_base_upper32); + if (ret) { + dev_err(dev, "%s hw_base_upper32 err\n", __func__); + return 0; + } + } + + hw_ptr = ((unsigned long long)hw_ptr_upper32 << 32) | hw_ptr_lower32; + hw_base = ((unsigned long long)hw_base_upper32 << 32) | hw_base_lower32; + + if (!hw_ptr || !hw_base) { + dev_err(dev, "hw_ptr or hw_base = 0 err\n"); + return 0; + } -POINTER_RETURN_FRAMES: - return bytes_to_frames(substream->runtime, pcm_ptr_bytes); + pcm_ptr_bytes = MTK_ALIGN_16BYTES(hw_ptr - hw_base); + return bytes_to_frames(substream->runtime, (ssize_t)pcm_ptr_bytes); } EXPORT_SYMBOL_GPL(mtk_afe_pcm_pointer); diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.h b/sound/soc/mediatek/common/mtk-afe-platform-driver.h index fcc923b88f124f..71070b26f8f892 100644 --- a/sound/soc/mediatek/common/mtk-afe-platform-driver.h +++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.h @@ -12,6 +12,8 @@ #define AFE_PCM_NAME "mtk-afe-pcm" extern const struct snd_soc_component_driver mtk_afe_pcm_platform; +#define MTK_ALIGN_16BYTES(x) ((x) & GENMASK_ULL(39, 4)) + struct mtk_base_afe; struct snd_pcm; struct snd_soc_component; diff --git a/sound/soc/mediatek/mt2701/Makefile b/sound/soc/mediatek/mt2701/Makefile index 507fa26c394528..59623d3d3a0387 100644 --- a/sound/soc/mediatek/mt2701/Makefile +++ b/sound/soc/mediatek/mt2701/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_SND_SOC_MT2701) += snd-soc-mt2701-afe.o # machine driver obj-$(CONFIG_SND_SOC_MT2701_CS42448) += mt2701-cs42448.o +obj-$(CONFIG_SND_SOC_MT2701_HDMI) += mt2701-hdmi.o obj-$(CONFIG_SND_SOC_MT2701_WM8960) += mt2701-wm8960.o diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c index ae620890bb3ac9..5a2bcf027b4fbb 100644 --- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c +++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c @@ -95,6 +95,28 @@ int mt2701_init_clock(struct mtk_base_afe *afe) afe_priv->mrgif_ck = NULL; } + /* + * Optional HDMI audio clocks. Platforms that do not wire up the + * HDMI output (e.g. MT2701 devkits using only the I2S BE DAIs) + * may omit these; in that case the HDMI BE DAI simply cannot be + * enabled, but the rest of the AFE still probes. + */ + afe_priv->hadds2pll_ck = devm_clk_get_optional(afe->dev, "hadds2pll_294m"); + if (IS_ERR(afe_priv->hadds2pll_ck)) + return PTR_ERR(afe_priv->hadds2pll_ck); + + afe_priv->audio_hdmi_ck = devm_clk_get_optional(afe->dev, "audio_hdmi_pd"); + if (IS_ERR(afe_priv->audio_hdmi_ck)) + return PTR_ERR(afe_priv->audio_hdmi_ck); + + afe_priv->audio_spdf_ck = devm_clk_get_optional(afe->dev, "audio_spdf_pd"); + if (IS_ERR(afe_priv->audio_spdf_ck)) + return PTR_ERR(afe_priv->audio_spdf_ck); + + afe_priv->audio_apll_ck = devm_clk_get_optional(afe->dev, "audio_apll_pd"); + if (IS_ERR(afe_priv->audio_apll_ck)) + return PTR_ERR(afe_priv->audio_apll_ck); + return 0; } diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-common.h b/sound/soc/mediatek/mt2701/mt2701-afe-common.h index 32bef5e2a56d94..8b6f3a200048a0 100644 --- a/sound/soc/mediatek/mt2701/mt2701-afe-common.h +++ b/sound/soc/mediatek/mt2701/mt2701-afe-common.h @@ -33,6 +33,7 @@ enum { MT2701_MEMIF_UL5, MT2701_MEMIF_DLBT, MT2701_MEMIF_ULBT, + MT2701_MEMIF_HDMI, MT2701_MEMIF_NUM, MT2701_IO_I2S = MT2701_MEMIF_NUM, MT2701_IO_2ND_I2S, @@ -41,6 +42,7 @@ enum { MT2701_IO_5TH_I2S, MT2701_IO_6TH_I2S, MT2701_IO_MRG, + MT2701_IO_HDMI, }; enum { @@ -90,6 +92,10 @@ struct mt2701_afe_private { struct mt2701_i2s_path *i2s_path; struct clk *base_ck[MT2701_BASE_CLK_NUM]; struct clk *mrgif_ck; + struct clk *hadds2pll_ck; + struct clk *audio_hdmi_ck; + struct clk *audio_spdf_ck; + struct clk *audio_apll_ck; bool mrg_enable[MTK_STREAM_NUM]; const struct mt2701_soc_variants *soc; diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c index fcae38135d93fe..bb459faa6e0543 100644 --- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c +++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "mt2701-afe-common.h" #include "mt2701-afe-clock-ctrl.h" @@ -60,6 +61,7 @@ static const struct mt2701_afe_rate mt2701_afe_i2s_rates[] = { static const unsigned int mt2701_afe_backup_list[] = { AUDIO_TOP_CON0, + AUDIO_TOP_CON3, AUDIO_TOP_CON4, AUDIO_TOP_CON5, ASYS_TOP_CON, @@ -77,6 +79,9 @@ static const unsigned int mt2701_afe_backup_list[] = { AFE_CONN22, AFE_DAC_CON0, AFE_MEMIF_PBUF_SIZE, + AFE_HDMI_OUT_CON0, + AFE_HDMI_CONN0, + AFE_8CH_I2S_OUT_CON, }; static int mt2701_dai_num_to_i2s(struct mtk_base_afe *afe, int num) @@ -542,6 +547,220 @@ static const struct snd_soc_dai_ops mt2701_btmrg_ops = { .hw_params = mt2701_btmrg_hw_params, }; +/* + * HDMI BE DAI -- drives the on-SoC 8-channel I2S engine whose output + * feeds the HDMI transmitter audio port. + * + * The HDMI audio hardware path is: + * HDMI memif DMA (AFE_HDMI_OUT_*) -> interconnect mux (AFE_HDMI_CONN0) + * -> 8-channel I2S engine (AFE_8CH_I2S_OUT_CON) -> HDMI TX audio port + * + * The I2S3 clock tree provides the bit/master clocks; we set its + * mclk_rate to 128*fs (matching HDMI_AUD_MCLK_128FS) and let + * mt2701_mclk_configuration program the PLL/divider path. + */ +#define MT2701_HDMI_I2S_PATH 3 + +static int mt2701_afe_hdmi_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt2701_afe_private *afe_priv = afe->platform_priv; + int ret; + + if (!afe_priv->hadds2pll_ck || !afe_priv->audio_hdmi_ck) { + dev_err(afe->dev, "HDMI audio clocks not available\n"); + return -ENODEV; + } + + ret = clk_prepare_enable(afe_priv->hadds2pll_ck); + if (ret) + return ret; + + ret = clk_prepare_enable(afe_priv->audio_hdmi_ck); + if (ret) + goto err_hdmi; + + if (afe_priv->audio_spdf_ck) { + ret = clk_prepare_enable(afe_priv->audio_spdf_ck); + if (ret) + goto err_spdf; + } + + if (afe_priv->audio_apll_ck) { + ret = clk_prepare_enable(afe_priv->audio_apll_ck); + if (ret) + goto err_apll; + } + + ret = mt2701_afe_enable_mclk(afe, MT2701_HDMI_I2S_PATH); + if (ret) + goto err_mclk; + + return 0; + +err_mclk: + if (afe_priv->audio_apll_ck) + clk_disable_unprepare(afe_priv->audio_apll_ck); +err_apll: + if (afe_priv->audio_spdf_ck) + clk_disable_unprepare(afe_priv->audio_spdf_ck); +err_spdf: + clk_disable_unprepare(afe_priv->audio_hdmi_ck); +err_hdmi: + clk_disable_unprepare(afe_priv->hadds2pll_ck); + return ret; +} + +static void mt2701_afe_hdmi_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt2701_afe_private *afe_priv = afe->platform_priv; + + mt2701_afe_disable_mclk(afe, MT2701_HDMI_I2S_PATH); + if (afe_priv->audio_apll_ck) + clk_disable_unprepare(afe_priv->audio_apll_ck); + if (afe_priv->audio_spdf_ck) + clk_disable_unprepare(afe_priv->audio_spdf_ck); + clk_disable_unprepare(afe_priv->audio_hdmi_ck); + clk_disable_unprepare(afe_priv->hadds2pll_ck); +} + +static int mt2701_afe_hdmi_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt2701_afe_private *afe_priv = afe->platform_priv; + unsigned int channels = params_channels(params); + unsigned int rate = params_rate(params); + unsigned int divp1; + unsigned int val; + unsigned int i; + int ret; + + /* + * Compute AUDIO_TOP_CON3.HDMI_BCK_DIV up front. The divider + * drives an internal reference for the HDMI transmitter's + * audio packet engine; it must scale with the sample rate so + * that the packet engine's timing matches the data flowing in + * from the AFE memif/I2S3 side. Empirically, with audpll_sel + * parented to hadds2pll_98m (98.304 MHz), the correct value at + * 48 kHz is div = 44 (i.e. (div+1) = 45), giving 1.0923 MHz. + * Scaling inversely with rate: (div + 1) = 45 * 48000 / rate. + * Integer rounding introduces small (<1%) errors at 32 kHz; + * 44.1 kHz is nearly exact via round-to-nearest. Reject rates + * that fall outside the 6-bit divider range before touching + * any hardware so no side effects are left behind on error. + */ + divp1 = (45U * 48000U + rate / 2) / rate; + if (divp1 == 0 || divp1 > 64) + return -EINVAL; + + /* + * Park the I2S3 clock tree at 128*fs -- this is the MCLK that + * the ASYS I2S3 engine uses to derive its BCK/LRCK. The engine + * outputs BCK = 64*fs (stereo, 32-bit word length). + */ + afe_priv->i2s_path[MT2701_HDMI_I2S_PATH].mclk_rate = rate * 128; + ret = mt2701_mclk_configuration(afe, MT2701_HDMI_I2S_PATH); + if (ret) + return ret; + + /* Program and start the ASYS I2S3 engine (FS, I2S mode, enable). */ + mt2701_i2s_path_enable(afe, + &afe_priv->i2s_path[MT2701_HDMI_I2S_PATH], + SNDRV_PCM_STREAM_PLAYBACK, rate); + + regmap_update_bits(afe->regmap, AUDIO_TOP_CON3, + AUDIO_TOP_CON3_HDMI_BCK_DIV_MASK, + AUDIO_TOP_CON3_HDMI_BCK_DIV(divp1 - 1)); + + /* + * HDMI output memif: set channel count and confirm 16-bit + * sample width. Both fields must be written together so that + * stale reset-default or prior-stream values in BIT_WIDTH + * cannot persist. + */ + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, + AFE_HDMI_OUT_CON0_CH_NUM_MASK | + AFE_HDMI_OUT_CON0_BIT_WIDTH_MASK, + AFE_HDMI_OUT_CON0_CH_NUM(channels) | + AFE_HDMI_OUT_CON0_BIT_WIDTH_16); + + /* + * Interconnect mux -- map DMA input slots to HDMI output slots. + * Each output takes a 3-bit field at shift (i*3). Swap the first + * two inputs so that the DMA's interleaved L/R pair lands on the + * correct HDMI L/R output slots. Remaining slots are identity. + */ + val = (1 << 0) | (0 << 3); /* O20 <- I21, O21 <- I20 */ + for (i = 2; i < 8; i++) + val |= ((i & 0x7) << (i * 3)); + regmap_write(afe->regmap, AFE_HDMI_CONN0, val); + + /* + * 8-channel I2S framing: standard I2S, 32-bit slots, + * LRCK/BCK inverted. The wire protocol is fixed. + */ + regmap_update_bits(afe->regmap, AFE_8CH_I2S_OUT_CON, + AFE_8CH_I2S_OUT_CON_WLEN_MASK | + AFE_8CH_I2S_OUT_CON_I2S_DELAY | + AFE_8CH_I2S_OUT_CON_LRCK_INV | + AFE_8CH_I2S_OUT_CON_BCK_INV, + AFE_8CH_I2S_OUT_CON_WLEN_32BIT | + AFE_8CH_I2S_OUT_CON_I2S_DELAY | + AFE_8CH_I2S_OUT_CON_LRCK_INV | + AFE_8CH_I2S_OUT_CON_BCK_INV); + return 0; +} + +static int mt2701_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + /* Enable HDMI output memif. */ + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1); + /* Enable 8-channel I2S engine. */ + regmap_update_bits(afe->regmap, AFE_8CH_I2S_OUT_CON, + AFE_8CH_I2S_OUT_CON_EN, + AFE_8CH_I2S_OUT_CON_EN); + return 0; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + regmap_update_bits(afe->regmap, AFE_8CH_I2S_OUT_CON, + AFE_8CH_I2S_OUT_CON_EN, 0); + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0); + return 0; + } + return -EINVAL; +} + +static int mt2701_afe_hdmi_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt2701_afe_private *afe_priv = afe->platform_priv; + + mt2701_afe_i2s_path_disable(afe, + &afe_priv->i2s_path[MT2701_HDMI_I2S_PATH], + SNDRV_PCM_STREAM_PLAYBACK); + return 0; +} + +static const struct snd_soc_dai_ops mt2701_afe_hdmi_ops = { + .startup = mt2701_afe_hdmi_startup, + .shutdown = mt2701_afe_hdmi_shutdown, + .hw_params = mt2701_afe_hdmi_hw_params, + .hw_free = mt2701_afe_hdmi_hw_free, + .trigger = mt2701_afe_hdmi_trigger, +}; + static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = { /* FE DAIs: memory intefaces to CPU */ { @@ -628,6 +847,19 @@ static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = { }, .ops = &mt2701_single_memif_dai_ops, }, + { + .name = "PCM_HDMI", + .id = MT2701_MEMIF_HDMI, + .playback = { + .stream_name = "HDMI Multich", + .channels_min = 2, + .channels_max = 8, + .rates = (SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000), + .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, + .ops = &mt2701_single_memif_dai_ops, + }, /* BE DAIs */ { .name = "I2S0", @@ -748,7 +980,20 @@ static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = { }, .ops = &mt2701_btmrg_ops, .symmetric_rate = 1, - } + }, + { + .name = "HDMI I2S", + .id = MT2701_IO_HDMI, + .playback = { + .stream_name = "HDMI 8CH I2S Playback", + .channels_min = 2, + .channels_max = 8, + .rates = (SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000), + .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, + .ops = &mt2701_afe_hdmi_ops, + }, }; static const struct snd_kcontrol_new mt2701_afe_o00_mix[] = { @@ -927,6 +1172,14 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = { {"I16I17", "Multich I2S2 Out Switch", "DLM"}, {"I18I19", "Multich I2S3 Out Switch", "DLM"}, + /* + * HDMI FE -> BE direct route. The HDMI memif has its own DMA + * path that feeds the 8-channel internal I2S straight into the + * HDMI transmitter; no mixer/interconnect selection is exposed + * to the user. + */ + {"HDMI 8CH I2S Playback", NULL, "HDMI Multich"}, + { "I12", NULL, "I12I13" }, { "I13", NULL, "I12I13" }, { "I14", NULL, "I14I15" }, @@ -1207,6 +1460,35 @@ static const struct mtk_base_memif_data memif_data_array[MT2701_MEMIF_NUM] = { .agent_disable_shift = 16, .msb_reg = -1, }, + { + /* + * HDMI memif feeds the on-SoC 8-channel internal I2S that + * drives the HDMI transmitter audio port. Unlike the + * standard memifs, the enable bit, channel count and bit + * width all live in AFE_HDMI_OUT_CON0, so mono/fs/hd/agent + * fields are left at -1 and programmed from the BE DAI ops + * instead. + */ + .name = "HDMI", + .id = MT2701_MEMIF_HDMI, + .reg_ofs_base = AFE_HDMI_OUT_BASE, + .reg_ofs_cur = AFE_HDMI_OUT_CUR, + .reg_ofs_end = AFE_HDMI_OUT_END, + .fs_reg = -1, + .fs_shift = -1, + .fs_maskbit = 0, + .mono_reg = -1, + .mono_shift = -1, + .enable_reg = AFE_HDMI_OUT_CON0, + .enable_shift = 0, + .hd_reg = -1, + .hd_shift = -1, + .hd_align_reg = -1, + .hd_align_mshift = 0, + .agent_disable_reg = -1, + .agent_disable_shift = 0, + .msb_reg = -1, + }, }; static const struct mtk_base_irq_data irq_data[MT2701_IRQ_ASYS_END] = { diff --git a/sound/soc/mediatek/mt2701/mt2701-hdmi.c b/sound/soc/mediatek/mt2701/mt2701-hdmi.c new file mode 100644 index 00000000000000..a84907879c04e8 --- /dev/null +++ b/sound/soc/mediatek/mt2701/mt2701-hdmi.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mt2701-hdmi.c -- MT2701 HDMI ALSA SoC machine driver + * + * Copyright (c) 2026 Daniel Golle + * + * Based on mt2701-cs42448.c + */ + +#include +#include +#include +#include + +enum { + DAI_LINK_FE_HDMI_OUT, + DAI_LINK_BE_HDMI_I2S, +}; + +SND_SOC_DAILINK_DEFS(fe_hdmi_out, + DAILINK_COMP_ARRAY(COMP_CPU("PCM_HDMI")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); + +SND_SOC_DAILINK_DEFS(be_hdmi_i2s, + DAILINK_COMP_ARRAY(COMP_CPU("HDMI I2S")), + DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "i2s-hifi")), + DAILINK_COMP_ARRAY(COMP_EMPTY())); + +static struct snd_soc_dai_link mt2701_hdmi_dai_links[] = { + [DAI_LINK_FE_HDMI_OUT] = { + .name = "HDMI Playback", + .stream_name = "HDMI Playback", + .trigger = { SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST }, + .dynamic = 1, + .playback_only = 1, + SND_SOC_DAILINK_REG(fe_hdmi_out), + }, + [DAI_LINK_BE_HDMI_I2S] = { + .name = "HDMI BE", + .no_pcm = 1, + .playback_only = 1, + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBC_CFC, + SND_SOC_DAILINK_REG(be_hdmi_i2s), + }, +}; + +static struct snd_soc_card mt2701_hdmi_soc_card = { + .name = "mt2701-hdmi", + .owner = THIS_MODULE, + .dai_link = mt2701_hdmi_dai_links, + .num_links = ARRAY_SIZE(mt2701_hdmi_dai_links), +}; + +static int mt2701_hdmi_machine_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &mt2701_hdmi_soc_card; + struct device *dev = &pdev->dev; + struct device_node *platform_node; + struct device_node *codec_node; + struct snd_soc_dai_link *dai_link; + int ret; + int i; + + platform_node = of_parse_phandle(dev->of_node, "mediatek,platform", 0); + if (!platform_node) + return dev_err_probe(dev, -EINVAL, + "Property 'mediatek,platform' missing\n"); + + for_each_card_prelinks(card, i, dai_link) { + if (dai_link->platforms->name) + continue; + dai_link->platforms->of_node = platform_node; + } + + codec_node = of_parse_phandle(dev->of_node, "mediatek,audio-codec", 0); + if (!codec_node) { + of_node_put(platform_node); + return dev_err_probe(dev, -EINVAL, + "Property 'mediatek,audio-codec' missing\n"); + } + mt2701_hdmi_dai_links[DAI_LINK_BE_HDMI_I2S].codecs->of_node = codec_node; + + card->dev = dev; + + ret = devm_snd_soc_register_card(dev, card); + + of_node_put(platform_node); + of_node_put(codec_node); + return ret; +} + +static const struct of_device_id mt2701_hdmi_machine_dt_match[] = { + { .compatible = "mediatek,mt2701-hdmi-audio" }, + { .compatible = "mediatek,mt7623n-hdmi-audio" }, + {} +}; +MODULE_DEVICE_TABLE(of, mt2701_hdmi_machine_dt_match); + +static struct platform_driver mt2701_hdmi_machine = { + .driver = { + .name = "mt2701-hdmi", + .of_match_table = mt2701_hdmi_machine_dt_match, + }, + .probe = mt2701_hdmi_machine_probe, +}; +module_platform_driver(mt2701_hdmi_machine); + +MODULE_DESCRIPTION("MT2701 HDMI ALSA SoC machine driver"); +MODULE_AUTHOR("Daniel Golle "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mt2701-hdmi"); diff --git a/sound/soc/mediatek/mt2701/mt2701-reg.h b/sound/soc/mediatek/mt2701/mt2701-reg.h index c84d14cdd7ae8c..ca914df02c960e 100644 --- a/sound/soc/mediatek/mt2701/mt2701-reg.h +++ b/sound/soc/mediatek/mt2701/mt2701-reg.h @@ -10,10 +10,17 @@ #define _MT2701_REG_H_ #define AUDIO_TOP_CON0 0x0000 +#define AUDIO_TOP_CON3 0x000c #define AUDIO_TOP_CON4 0x0010 #define AUDIO_TOP_CON5 0x0014 #define AFE_DAIBT_CON0 0x001c #define AFE_MRGIF_CON 0x003c +#define AFE_HDMI_OUT_CON0 0x0370 +#define AFE_HDMI_OUT_BASE 0x0374 +#define AFE_HDMI_OUT_CUR 0x0378 +#define AFE_HDMI_OUT_END 0x037c +#define AFE_HDMI_CONN0 0x0390 +#define AFE_8CH_I2S_OUT_CON 0x0394 #define ASMI_TIMING_CON1 0x0100 #define ASMO_TIMING_CON1 0x0104 #define PWR1_ASM_CON1 0x0108 @@ -125,6 +132,28 @@ #define AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK (0x3 << 12) #define AFE_MEMIF_PBUF_SIZE_DLM_32BYTES (0x1 << 12) +/* AUDIO_TOP_CON3 (0x000c) -- HDMI BCK divider */ +#define AUDIO_TOP_CON3_HDMI_BCK_DIV_MASK (0x3f << 8) +#define AUDIO_TOP_CON3_HDMI_BCK_DIV(x) (((x) & 0x3f) << 8) + +/* AFE_HDMI_OUT_CON0 (0x0370) */ +#define AFE_HDMI_OUT_CON0_OUT_ON (0x1 << 0) +#define AFE_HDMI_OUT_CON0_BIT_WIDTH_MASK (0x1 << 1) +#define AFE_HDMI_OUT_CON0_BIT_WIDTH_16 (0x0 << 1) +#define AFE_HDMI_OUT_CON0_BIT_WIDTH_32 (0x1 << 1) +#define AFE_HDMI_OUT_CON0_CH_NUM_MASK (0xf << 4) +#define AFE_HDMI_OUT_CON0_CH_NUM(x) (((x) & 0xf) << 4) + +/* AFE_8CH_I2S_OUT_CON (0x0394) -- on-SoC 8-channel I2S that feeds HDMI TX */ +#define AFE_8CH_I2S_OUT_CON_EN (0x1 << 0) +#define AFE_8CH_I2S_OUT_CON_BCK_INV (0x1 << 1) +#define AFE_8CH_I2S_OUT_CON_LRCK_INV (0x1 << 2) +#define AFE_8CH_I2S_OUT_CON_I2S_DELAY (0x1 << 3) +#define AFE_8CH_I2S_OUT_CON_WLEN_MASK (0x3 << 4) +#define AFE_8CH_I2S_OUT_CON_WLEN_16BIT (0x1 << 4) +#define AFE_8CH_I2S_OUT_CON_WLEN_24BIT (0x2 << 4) +#define AFE_8CH_I2S_OUT_CON_WLEN_32BIT (0x3 << 4) + /* I2S in/out register bit control */ #define ASYS_I2S_CON_FS (0x1f << 8) #define ASYS_I2S_CON_FS_SET(x) ((x) << 8) diff --git a/sound/soc/mediatek/mt8196/Makefile b/sound/soc/mediatek/mt8196/Makefile new file mode 100644 index 00000000000000..91de200071d78f --- /dev/null +++ b/sound/soc/mediatek/mt8196/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +# platform driver +snd-soc-mt8196-afe-objs += \ + mt8196-afe-pcm.o \ + mt8196-afe-clk.o \ + mt8196-dai-adda.o \ + mt8196-dai-i2s.o \ + mt8196-dai-tdm.o + +obj-$(CONFIG_SND_SOC_MT8196) += snd-soc-mt8196-afe.o + +# machine driver +obj-$(CONFIG_SND_SOC_MT8196_NAU8825) += mt8196-nau8825.o diff --git a/sound/soc/mediatek/mt8196/mt8196-afe-clk.c b/sound/soc/mediatek/mt8196/mt8196-afe-clk.c new file mode 100644 index 00000000000000..286e39f53ae050 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-afe-clk.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mt8196-afe-clk.c -- Mediatek 8196 afe clock ctrl + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include +#include + +#include "mt8196-afe-clk.h" +#include "mt8196-afe-common.h" + +static const char *aud_clks[MT8196_CLK_NUM] = { + /* vlp clk */ + [MT8196_CLK_VLP_MUX_AUDIOINTBUS] = "top_aud_intbus", + [MT8196_CLK_VLP_MUX_AUD_ENG1] = "top_aud_eng1", + [MT8196_CLK_VLP_MUX_AUD_ENG2] = "top_aud_eng2", + [MT8196_CLK_VLP_MUX_AUDIO_H] = "top_aud_h", + /* pll */ + [MT8196_CLK_TOP_APLL1_CK] = "apll1", + [MT8196_CLK_TOP_APLL2_CK] = "apll2", + /* divider */ + [MT8196_CLK_TOP_APLL12_DIV_I2SIN0] = "apll12_div_i2sin0", + [MT8196_CLK_TOP_APLL12_DIV_I2SIN1] = "apll12_div_i2sin1", + [MT8196_CLK_TOP_APLL12_DIV_FMI2S] = "apll12_div_fmi2s", + [MT8196_CLK_TOP_APLL12_DIV_TDMOUT_M] = "apll12_div_tdmout_m", + [MT8196_CLK_TOP_APLL12_DIV_TDMOUT_B] = "apll12_div_tdmout_b", + /* mux */ + [MT8196_CLK_TOP_ADSP_SEL] = "top_adsp", +}; + +int mt8196_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk) +{ + int ret; + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(afe->dev, "failed to enable clk\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt8196_afe_enable_clk); + +void mt8196_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk) +{ + if (clk) + clk_disable_unprepare(clk); + else + dev_err(afe->dev, "NULL clk\n"); +} +EXPORT_SYMBOL_GPL(mt8196_afe_disable_clk); + +static int mt8196_afe_set_clk_rate(struct mtk_base_afe *afe, struct clk *clk, + unsigned int rate) +{ + int ret; + + if (clk) { + ret = clk_set_rate(clk, rate); + if (ret) { + dev_err(afe->dev, "failed to set clk rate\n"); + return ret; + } + } + + return 0; +} + +static unsigned int get_top_cg_reg(unsigned int cg_type) +{ + switch (cg_type) { + case MT8196_AUDIO_26M_EN_ON: + case MT8196_AUDIO_F3P25M_EN_ON: + case MT8196_AUDIO_APLL1_EN_ON: + case MT8196_AUDIO_APLL2_EN_ON: + return AUDIO_ENGEN_CON0; + case MT8196_CG_AUDIO_HOPPING_CK: + case MT8196_CG_AUDIO_F26M_CK: + case MT8196_CG_APLL1_CK: + case MT8196_CG_APLL2_CK: + case MT8196_PDN_APLL_TUNER2: + case MT8196_PDN_APLL_TUNER1: + return AUDIO_TOP_CON4; + default: + return 0; + } +} + +static unsigned int get_top_cg_mask(unsigned int cg_type) +{ + switch (cg_type) { + case MT8196_AUDIO_26M_EN_ON: + return AUDIO_26M_EN_ON_MASK_SFT; + case MT8196_AUDIO_F3P25M_EN_ON: + return AUDIO_F3P25M_EN_ON_MASK_SFT; + case MT8196_AUDIO_APLL1_EN_ON: + return AUDIO_APLL1_EN_ON_MASK_SFT; + case MT8196_AUDIO_APLL2_EN_ON: + return AUDIO_APLL2_EN_ON_MASK_SFT; + case MT8196_CG_AUDIO_HOPPING_CK: + return CG_AUDIO_HOPPING_CK_MASK_SFT; + case MT8196_CG_AUDIO_F26M_CK: + return CG_AUDIO_F26M_CK_MASK_SFT; + case MT8196_CG_APLL1_CK: + return CG_APLL1_CK_MASK_SFT; + case MT8196_CG_APLL2_CK: + return CG_APLL2_CK_MASK_SFT; + case MT8196_PDN_APLL_TUNER2: + return PDN_APLL_TUNER2_MASK_SFT; + case MT8196_PDN_APLL_TUNER1: + return PDN_APLL_TUNER1_MASK_SFT; + default: + return 0; + } +} + +static unsigned int get_top_cg_on_val(unsigned int cg_type) +{ + switch (cg_type) { + case MT8196_AUDIO_26M_EN_ON: + case MT8196_AUDIO_F3P25M_EN_ON: + case MT8196_AUDIO_APLL1_EN_ON: + case MT8196_AUDIO_APLL2_EN_ON: + return get_top_cg_mask(cg_type); + case MT8196_CG_AUDIO_HOPPING_CK: + case MT8196_CG_AUDIO_F26M_CK: + case MT8196_CG_APLL1_CK: + case MT8196_CG_APLL2_CK: + case MT8196_PDN_APLL_TUNER2: + case MT8196_PDN_APLL_TUNER1: + return 0; + default: + return 0; + } +} + +static unsigned int get_top_cg_off_val(unsigned int cg_type) +{ + switch (cg_type) { + case MT8196_AUDIO_26M_EN_ON: + case MT8196_AUDIO_F3P25M_EN_ON: + case MT8196_AUDIO_APLL1_EN_ON: + case MT8196_AUDIO_APLL2_EN_ON: + return 0; + case MT8196_CG_AUDIO_HOPPING_CK: + case MT8196_CG_AUDIO_F26M_CK: + case MT8196_CG_APLL1_CK: + case MT8196_CG_APLL2_CK: + case MT8196_PDN_APLL_TUNER2: + case MT8196_PDN_APLL_TUNER1: + return get_top_cg_mask(cg_type); + default: + return get_top_cg_mask(cg_type); + } +} + +static int mt8196_afe_enable_top_cg(struct mtk_base_afe *afe, unsigned int cg_type) +{ + int ret; + unsigned int reg = get_top_cg_reg(cg_type); + unsigned int mask = get_top_cg_mask(cg_type); + unsigned int val = get_top_cg_on_val(cg_type); + + if (!afe->regmap) { + dev_err(afe->dev, "afe regmap is null !!!\n"); + return 0; + } + + dev_dbg(afe->dev, "reg: 0x%x, mask: 0x%x, val: 0x%x\n", reg, mask, val); + + ret = regmap_update_bits(afe->regmap, reg, mask, val); + if (ret) + dev_err(afe->dev, "regmap_update_bits failed: %d\n", ret); + + return ret; +} + +static int mt8196_afe_disable_top_cg(struct mtk_base_afe *afe, unsigned int cg_type) +{ + int ret; + unsigned int reg = get_top_cg_reg(cg_type); + unsigned int mask = get_top_cg_mask(cg_type); + unsigned int val = get_top_cg_off_val(cg_type); + + if (!afe->regmap) { + dev_err(afe->dev, "afe regmap is null !!!\n"); + return 0; + } + + dev_dbg(afe->dev, "reg: 0x%x, mask: 0x%x, val: 0x%x\n", reg, mask, val); + + ret = regmap_update_bits(afe->regmap, reg, mask, val); + if (ret) + dev_err(afe->dev, "regmap_update_bits failed: %d\n", ret); + + return ret; +} + +static int apll1_mux_setting(struct mtk_base_afe *afe, bool enable) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int apll_rate; + int ret; + + dev_dbg(afe->dev, "enable: %d\n", enable); + + if (enable) { + apll_rate = mt8196_get_apll_rate(afe, MT8196_APLL1); + + /* 180.6336 / 4 = 45.1584MHz */ + ret = mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG1]); + if (ret) + return ret; + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG1], + MT8196_AUD_ENG1_CLK); + if (ret) + return ret; + + ret = mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + if (ret) + return ret; + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H], + apll_rate); + if (ret) + return ret; + } else { + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG1], + MT8196_AFE_26M); + if (ret) + return ret; + + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG1]); + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H], + MT8196_AFE_26M); + if (ret) + return ret; + + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + } + + return 0; +} + +static int apll2_mux_setting(struct mtk_base_afe *afe, bool enable) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int apll_rate; + int ret; + + dev_dbg(afe->dev, "enable: %d\n", enable); + + if (enable) { + apll_rate = mt8196_get_apll_rate(afe, MT8196_APLL2); + + /* 196.608 / 4 = 49.152MHz */ + ret = mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG2]); + if (ret) + return ret; + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG2], + MT8196_AUD_ENG2_CLK); + if (ret) + return ret; + + ret = mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + if (ret) + return ret; + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H], + apll_rate); + if (ret) + return ret; + } else { + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG2], + MT8196_AFE_26M); + if (ret) + return ret; + + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUD_ENG2]); + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H], + MT8196_AFE_26M); + if (ret) + return ret; + + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + } + + return 0; +} + +int mt8196_apll1_enable(struct mtk_base_afe *afe) +{ + int ret; + + /* setting for APLL */ + apll1_mux_setting(afe, true); + + ret = mt8196_afe_enable_top_cg(afe, MT8196_CG_APLL1_CK); + if (ret) + goto err_clk_apll1; + + ret = mt8196_afe_enable_top_cg(afe, MT8196_PDN_APLL_TUNER1); + if (ret) + goto err_clk_apll1_tuner; + + /* sel 44.1kHz:1, apll_div:7, upper bound:3 */ + regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, + XTAL_EN_128FS_SEL_MASK_SFT | APLL_DIV_MASK_SFT | UPPER_BOUND_MASK_SFT, + (0x1 << XTAL_EN_128FS_SEL_SFT) | (7 << APLL_DIV_SFT) | + (3 << UPPER_BOUND_SFT)); + + /* apll1 freq tuner enable */ + regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, + FREQ_TUNER_EN_MASK_SFT, + 0x1 << FREQ_TUNER_EN_SFT); + + /* audio apll1 on */ + mt8196_afe_enable_top_cg(afe, MT8196_AUDIO_APLL1_EN_ON); + + return 0; + +err_clk_apll1_tuner: + mt8196_afe_disable_top_cg(afe, MT8196_PDN_APLL_TUNER1); +err_clk_apll1: + mt8196_afe_disable_top_cg(afe, MT8196_CG_APLL1_CK); + return ret; +} + +void mt8196_apll1_disable(struct mtk_base_afe *afe) +{ + /* audio apll1 off */ + mt8196_afe_disable_top_cg(afe, MT8196_AUDIO_APLL1_EN_ON); + + /* apll1 freq tuner disable */ + regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, + FREQ_TUNER_EN_MASK_SFT, + 0x0); + + mt8196_afe_disable_top_cg(afe, MT8196_PDN_APLL_TUNER1); + mt8196_afe_disable_top_cg(afe, MT8196_CG_APLL1_CK); + apll1_mux_setting(afe, false); +} + +int mt8196_apll2_enable(struct mtk_base_afe *afe) +{ + int ret; + + /* setting for APLL */ + apll2_mux_setting(afe, true); + + ret = mt8196_afe_enable_top_cg(afe, MT8196_CG_APLL2_CK); + if (ret) + goto err_clk_apll2; + + ret = mt8196_afe_enable_top_cg(afe, MT8196_PDN_APLL_TUNER2); + if (ret) + goto err_clk_apll2_tuner; + + /* sel 48kHz: 2, apll_div: 7, upper bound: 3*/ + regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, + XTAL_EN_128FS_SEL_MASK_SFT | APLL_DIV_MASK_SFT | UPPER_BOUND_MASK_SFT, + (0x2 << XTAL_EN_128FS_SEL_SFT) | (7 << APLL_DIV_SFT) | + (3 << UPPER_BOUND_SFT)); + + /* apll2 freq tuner enable */ + regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, + FREQ_TUNER_EN_MASK_SFT, + 0x1 << FREQ_TUNER_EN_SFT); + + /* audio apll2 on */ + mt8196_afe_enable_top_cg(afe, MT8196_AUDIO_APLL2_EN_ON); + return 0; + +err_clk_apll2_tuner: + mt8196_afe_disable_top_cg(afe, MT8196_PDN_APLL_TUNER2); +err_clk_apll2: + mt8196_afe_disable_top_cg(afe, MT8196_CG_APLL2_CK); + return 0; +} + +void mt8196_apll2_disable(struct mtk_base_afe *afe) +{ + /* audio apll2 off */ + mt8196_afe_disable_top_cg(afe, MT8196_AUDIO_APLL2_EN_ON); + + /* apll2 freq tuner disable */ + regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, + FREQ_TUNER_EN_MASK_SFT, + 0x0); + + mt8196_afe_disable_top_cg(afe, MT8196_PDN_APLL_TUNER2); + mt8196_afe_disable_top_cg(afe, MT8196_CG_APLL2_CK); + apll2_mux_setting(afe, false); +} + +int mt8196_get_apll_rate(struct mtk_base_afe *afe, int apll) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int clk_id = 0; + + if (apll < MT8196_APLL1 || apll > MT8196_APLL2) { + dev_warn(afe->dev, "invalid clk id %d\n", apll); + return 0; + } + + if (apll == MT8196_APLL1) + clk_id = MT8196_CLK_TOP_APLL1_CK; + else + clk_id = MT8196_CLK_TOP_APLL2_CK; + + return clk_get_rate(afe_priv->clk[clk_id]); +} + +/* 48K: select APLL2; 44.1k: select APLL1 */ +int mt8196_get_apll_by_rate(struct mtk_base_afe *afe, int rate) +{ + return (rate % 8000) ? MT8196_APLL1 : MT8196_APLL2; +} + +int mt8196_get_apll_by_name(struct mtk_base_afe *afe, const char *name) +{ + if (strcmp(name, APLL1_W_NAME) == 0) + return MT8196_APLL1; + + return MT8196_APLL2; +} + +static const int mck_div[MT8196_MCK_NUM] = { + [MT8196_I2SIN0_MCK] = MT8196_CLK_TOP_APLL12_DIV_I2SIN0, + [MT8196_I2SIN1_MCK] = MT8196_CLK_TOP_APLL12_DIV_I2SIN1, + [MT8196_FMI2S_MCK] = MT8196_CLK_TOP_APLL12_DIV_FMI2S, + [MT8196_TDMOUT_MCK] = MT8196_CLK_TOP_APLL12_DIV_TDMOUT_M, + [MT8196_TDMOUT_BCK] = MT8196_CLK_TOP_APLL12_DIV_TDMOUT_B, +}; + +int mt8196_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int div_clk_id; + int ret; + + dev_dbg(afe->dev, "mck_id: %d, rate: %d\n", mck_id, rate); + + if (mck_id >= MT8196_MCK_NUM || mck_id < 0) + return -EINVAL; + + div_clk_id = mck_div[mck_id]; + + /* enable div, set rate */ + if (div_clk_id < 0) { + dev_err(afe->dev, "invalid div_clk_id %d\n", div_clk_id); + return -EINVAL; + } + + if (div_clk_id == MT8196_CLK_TOP_APLL12_DIV_TDMOUT_B) + rate *= 16; + + ret = mt8196_afe_enable_clk(afe, afe_priv->clk[div_clk_id]); + if (ret) + return ret; + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[div_clk_id], rate); + if (ret) + return ret; + + return 0; +} + +int mt8196_mck_disable(struct mtk_base_afe *afe, int mck_id) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int div_clk_id; + int ret; + + dev_dbg(afe->dev, "mck_id: %d.\n", mck_id); + + if (mck_id < 0) { + dev_err(afe->dev, "mck_id = %d < 0\n", mck_id); + return -EINVAL; + } + + div_clk_id = mck_div[mck_id]; + + if (div_clk_id < 0) { + dev_err(afe->dev, "div_clk_id = %d < 0\n", + div_clk_id); + return -EINVAL; + } + + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[div_clk_id], MT8196_AFE_26M); + if (ret) + return ret; + + mt8196_afe_disable_clk(afe, afe_priv->clk[div_clk_id]); + + return 0; +} + +int mt8196_afe_enable_reg_rw_clk(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int ret; + + /* bus clock for AFE external access, like DRAM */ + mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_TOP_ADSP_SEL]); + + /* bus clock for AFE internal access, like AFE SRAM */ + mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIOINTBUS]); + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIOINTBUS], + MT8196_AFE_26M); + if (ret) + return ret; + + /* enable audio h clock */ + mt8196_afe_enable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + ret = mt8196_afe_set_clk_rate(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H], + MT8196_AFE_26M); + if (ret) + return ret; + + /* AFE hw clock */ + /* IPM2.0: USE HOPPING & 26M */ + /* set in the regmap_register_patch */ + return 0; +} + +int mt8196_afe_disable_reg_rw_clk(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + + /* IPM2.0: Use HOPPING & 26M */ + /* set in the regmap_register_patch */ + + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIO_H]); + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_VLP_MUX_AUDIOINTBUS]); + mt8196_afe_disable_clk(afe, afe_priv->clk[MT8196_CLK_TOP_ADSP_SEL]); + return 0; +} + +int mt8196_afe_enable_main_clock(struct mtk_base_afe *afe) +{ + mt8196_afe_enable_top_cg(afe, MT8196_AUDIO_26M_EN_ON); + return 0; +} + +int mt8196_afe_disable_main_clock(struct mtk_base_afe *afe) +{ + mt8196_afe_disable_top_cg(afe, MT8196_AUDIO_26M_EN_ON); + return 0; +} + +int mt8196_init_clock(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int i; + + afe_priv->clk = devm_kcalloc(afe->dev, MT8196_CLK_NUM, sizeof(*afe_priv->clk), + GFP_KERNEL); + if (!afe_priv->clk) + return -ENOMEM; + + for (i = 0; i < MT8196_CLK_NUM; i++) { + afe_priv->clk[i] = devm_clk_get(afe->dev, aud_clks[i]); + if (IS_ERR(afe_priv->clk[i])) { + dev_err(afe->dev, "devm_clk_get %s fail\n", aud_clks[i]); + return PTR_ERR(afe_priv->clk[i]); + } + } + + return 0; +} + diff --git a/sound/soc/mediatek/mt8196/mt8196-afe-clk.h b/sound/soc/mediatek/mt8196/mt8196-afe-clk.h new file mode 100644 index 00000000000000..7d47dcff768b29 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-afe-clk.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mt8196-afe-clk.h -- Mediatek MT8196 AFE Clock Control definitions + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#ifndef _MT8196_AFE_CLOCK_CTRL_H_ +#define _MT8196_AFE_CLOCK_CTRL_H_ + +#define MT8196_AFE_26M 26000000 +#define MT8196_AUD_ENG1_CLK 45158400 +#define MT8196_AUD_ENG2_CLK 49152000 + +/* APLL */ +#define APLL1_W_NAME "APLL1" +#define APLL2_W_NAME "APLL2" + +enum { + MT8196_APLL1 = 0, + MT8196_APLL2, +}; + +enum { + /* vlp clk */ + MT8196_CLK_VLP_MUX_AUDIOINTBUS, + MT8196_CLK_VLP_MUX_AUD_ENG1, + MT8196_CLK_VLP_MUX_AUD_ENG2, + MT8196_CLK_VLP_MUX_AUDIO_H, + /* pll */ + MT8196_CLK_TOP_APLL1_CK, + MT8196_CLK_TOP_APLL2_CK, + /* divider */ + MT8196_CLK_TOP_APLL12_DIV_I2SIN0, + MT8196_CLK_TOP_APLL12_DIV_I2SIN1, + MT8196_CLK_TOP_APLL12_DIV_FMI2S, + MT8196_CLK_TOP_APLL12_DIV_TDMOUT_M, + MT8196_CLK_TOP_APLL12_DIV_TDMOUT_B, + /* mux */ + MT8196_CLK_TOP_ADSP_SEL, + MT8196_CLK_NUM, +}; + +struct mtk_base_afe; + +int mt8196_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate); +int mt8196_mck_disable(struct mtk_base_afe *afe, int mck_id); +int mt8196_get_apll_rate(struct mtk_base_afe *afe, int apll); +int mt8196_get_apll_by_rate(struct mtk_base_afe *afe, int rate); +int mt8196_get_apll_by_name(struct mtk_base_afe *afe, const char *name); +int mt8196_init_clock(struct mtk_base_afe *afe); +int mt8196_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk); +void mt8196_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk); +int mt8196_apll1_enable(struct mtk_base_afe *afe); +void mt8196_apll1_disable(struct mtk_base_afe *afe); +int mt8196_apll2_enable(struct mtk_base_afe *afe); +void mt8196_apll2_disable(struct mtk_base_afe *afe); +int mt8196_afe_enable_main_clock(struct mtk_base_afe *afe); +int mt8196_afe_disable_main_clock(struct mtk_base_afe *afe); +int mt8196_afe_enable_reg_rw_clk(struct mtk_base_afe *afe); +int mt8196_afe_disable_reg_rw_clk(struct mtk_base_afe *afe); + +#endif diff --git a/sound/soc/mediatek/mt8196/mt8196-afe-common.h b/sound/soc/mediatek/mt8196/mt8196-afe-common.h new file mode 100644 index 00000000000000..4951a6f32abbf3 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-afe-common.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mt8196-afe-common.h -- Mediatek 8196 audio driver definitions + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#ifndef _MT_8196_AFE_COMMON_H_ +#define _MT_8196_AFE_COMMON_H_ +#include +#include + +#include "mt8196-reg.h" +#include "../common/mtk-base-afe.h" + +/* HW IPM 2.0 */ +enum { + MTK_AFE_IPM2P0_RATE_8K = 0x0, + MTK_AFE_IPM2P0_RATE_11K = 0x1, + MTK_AFE_IPM2P0_RATE_12K = 0x2, + MTK_AFE_IPM2P0_RATE_16K = 0x4, + MTK_AFE_IPM2P0_RATE_22K = 0x5, + MTK_AFE_IPM2P0_RATE_24K = 0x6, + MTK_AFE_IPM2P0_RATE_32K = 0x8, + MTK_AFE_IPM2P0_RATE_44K = 0x9, + MTK_AFE_IPM2P0_RATE_48K = 0xa, + MTK_AFE_IPM2P0_RATE_88K = 0xd, + MTK_AFE_IPM2P0_RATE_96K = 0xe, + MTK_AFE_IPM2P0_RATE_176K = 0x11, + MTK_AFE_IPM2P0_RATE_192K = 0x12, + MTK_AFE_IPM2P0_RATE_352K = 0x15, + MTK_AFE_IPM2P0_RATE_384K = 0x16, +}; + +enum { + MTKAIF_PROTOCOL_1, + MTKAIF_PROTOCOL_2, + MTKAIF_PROTOCOL_2_CLK_P2, +}; + +enum { + MT8196_MEMIF_DL0, + MT8196_MEMIF_DL1, + MT8196_MEMIF_DL2, + MT8196_MEMIF_DL3, + MT8196_MEMIF_DL4, + MT8196_MEMIF_DL5, + MT8196_MEMIF_DL6, + MT8196_MEMIF_DL7, + MT8196_MEMIF_DL8, + MT8196_MEMIF_DL23, + MT8196_MEMIF_DL24, + MT8196_MEMIF_DL25, + MT8196_MEMIF_DL26, + MT8196_MEMIF_DL_4CH, + MT8196_MEMIF_DL_24CH, + MT8196_MEMIF_VUL0, + MT8196_MEMIF_VUL1, + MT8196_MEMIF_VUL2, + MT8196_MEMIF_VUL3, + MT8196_MEMIF_VUL4, + MT8196_MEMIF_VUL5, + MT8196_MEMIF_VUL6, + MT8196_MEMIF_VUL7, + MT8196_MEMIF_VUL8, + MT8196_MEMIF_VUL9, + MT8196_MEMIF_VUL10, + MT8196_MEMIF_VUL24, + MT8196_MEMIF_VUL25, + MT8196_MEMIF_VUL26, + MT8196_MEMIF_VUL_CM0, + MT8196_MEMIF_VUL_CM1, + MT8196_MEMIF_VUL_CM2, + MT8196_MEMIF_ETDM_IN0, + MT8196_MEMIF_ETDM_IN1, + MT8196_MEMIF_ETDM_IN2, + MT8196_MEMIF_ETDM_IN3, + MT8196_MEMIF_ETDM_IN4, + MT8196_MEMIF_ETDM_IN6, + MT8196_MEMIF_HDMI, + MT8196_MEMIF_NUM, + MT8196_DAI_ADDA = MT8196_MEMIF_NUM, + MT8196_DAI_ADDA_CH34, + MT8196_DAI_ADDA_CH56, + MT8196_DAI_AP_DMIC, + MT8196_DAI_AP_DMIC_CH34, + MT8196_DAI_AP_DMIC_MULTICH, + MT8196_DAI_I2S_IN0, + MT8196_DAI_I2S_IN1, + MT8196_DAI_I2S_IN2, + MT8196_DAI_I2S_IN3, + MT8196_DAI_I2S_IN4, + MT8196_DAI_I2S_IN6, + MT8196_DAI_I2S_OUT0, + MT8196_DAI_I2S_OUT1, + MT8196_DAI_I2S_OUT2, + MT8196_DAI_I2S_OUT3, + MT8196_DAI_I2S_OUT4, + MT8196_DAI_I2S_OUT6, + MT8196_DAI_FM_I2S_MASTER, + MT8196_DAI_TDM, + MT8196_DAI_TDM_DPTX, + MT8196_DAI_NUM, +}; + +#define MT8196_DAI_I2S_MAX_NUM 13 //depends each platform's max i2s num + +/* update irq ID (= enum) from AFE_IRQ_MCU_STATUS */ +enum { + MT8196_IRQ_0, + MT8196_IRQ_1, + MT8196_IRQ_2, + MT8196_IRQ_3, + MT8196_IRQ_4, + MT8196_IRQ_5, + MT8196_IRQ_6, + MT8196_IRQ_7, + MT8196_IRQ_8, + MT8196_IRQ_9, + MT8196_IRQ_10, + MT8196_IRQ_11, + MT8196_IRQ_12, + MT8196_IRQ_13, + MT8196_IRQ_14, + MT8196_IRQ_15, + MT8196_IRQ_16, + MT8196_IRQ_17, + MT8196_IRQ_18, + MT8196_IRQ_19, + MT8196_IRQ_20, + MT8196_IRQ_21, + MT8196_IRQ_22, + MT8196_IRQ_23, + MT8196_IRQ_24, + MT8196_IRQ_25, + MT8196_IRQ_26, + MT8196_IRQ_31, /* used only for TDM */ + MT8196_IRQ_NUM, +}; + +/* update irq ID (= enum) from AFE_IRQ_MCU_STATUS */ +enum { + MT8196_CUS_IRQ_TDM, /* used only for TDM */ + MT8196_CUS_IRQ_NUM, +}; + +enum { + /* AUDIO_ENGEN_CON0 */ + MT8196_AUDIO_26M_EN_ON, + MT8196_AUDIO_F3P25M_EN_ON, + MT8196_AUDIO_APLL1_EN_ON, + MT8196_AUDIO_APLL2_EN_ON, + MT8196_AUDIO_F26M_EN_RST, + MT8196_MULTI_USER_RST, + MT8196_MULTI_USER_BYPASS, + /* AUDIO_TOP_CON4 */ + MT8196_CG_AUDIO_HOPPING_CK, + MT8196_CG_AUDIO_F26M_CK, + MT8196_CG_APLL1_CK, + MT8196_CG_APLL2_CK, + MT8196_PDN_APLL_TUNER2, + MT8196_PDN_APLL_TUNER1, + MT8196_AUDIO_CG_NUM, +}; + +/* MCLK */ +enum { + MT8196_I2SIN0_MCK, + MT8196_I2SIN1_MCK, + MT8196_FMI2S_MCK, + MT8196_TDMOUT_MCK, + MT8196_TDMOUT_BCK, + MT8196_MCK_NUM, +}; + +/* CM*/ +enum { + CM0, + CM1, + CM2, + CM_NUM, +}; + +struct clk; +struct mtk_base_afe; + +struct mt8196_afe_private { + struct clk **clk; + /* dai */ + void *dai_priv[MT8196_DAI_NUM]; + /* mck */ + int mck_rate[MT8196_MCK_NUM]; + /* channel merge */ + u32 cm_rate[CM_NUM]; + u32 cm_channels; +}; + +int mt8196_dai_adda_register(struct mtk_base_afe *afe); +int mt8196_dai_i2s_register(struct mtk_base_afe *afe); +int mt8196_dai_tdm_register(struct mtk_base_afe *afe); +int mt8196_dai_set_priv(struct mtk_base_afe *afe, int id, + int priv_size, const void *priv_data); + +#endif diff --git a/sound/soc/mediatek/mt8196/mt8196-afe-pcm.c b/sound/soc/mediatek/mt8196/mt8196-afe-pcm.c new file mode 100644 index 00000000000000..511e888567be87 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-afe-pcm.c @@ -0,0 +1,2503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mediatek ALSA SoC AFE platform driver for 8196 + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mt8196-afe-clk.h" +#include "mt8196-afe-common.h" +#include "mt8196-interconnection.h" + +#include "../common/mtk-afe-fe-dai.h" +#include "../common/mtk-afe-platform-driver.h" + +static const struct snd_pcm_hardware mt8196_afe_hardware = { + .info = (SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | + SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP_VALID), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE), + .period_bytes_min = 96, + .period_bytes_max = 4 * 48 * 1024, + .periods_min = 2, + .periods_max = 256, + .buffer_bytes_max = 256 * 1024, + .fifo_size = 0, +}; + +static unsigned int mt8196_rate_transform(struct device *dev, + unsigned int rate) +{ + switch (rate) { + case 8000: + return MTK_AFE_IPM2P0_RATE_8K; + case 11025: + return MTK_AFE_IPM2P0_RATE_11K; + case 12000: + return MTK_AFE_IPM2P0_RATE_12K; + case 16000: + return MTK_AFE_IPM2P0_RATE_16K; + case 22050: + return MTK_AFE_IPM2P0_RATE_22K; + case 24000: + return MTK_AFE_IPM2P0_RATE_24K; + case 32000: + return MTK_AFE_IPM2P0_RATE_32K; + case 44100: + return MTK_AFE_IPM2P0_RATE_44K; + case 48000: + return MTK_AFE_IPM2P0_RATE_48K; + case 88200: + return MTK_AFE_IPM2P0_RATE_88K; + case 96000: + return MTK_AFE_IPM2P0_RATE_96K; + case 176400: + return MTK_AFE_IPM2P0_RATE_176K; + case 192000: + return MTK_AFE_IPM2P0_RATE_192K; + /* not support 260K */ + case 352800: + return MTK_AFE_IPM2P0_RATE_352K; + case 384000: + return MTK_AFE_IPM2P0_RATE_384K; + default: + dev_err(dev, "rate %u invalid, use %d!!!\n", + rate, MTK_AFE_IPM2P0_RATE_48K); + return MTK_AFE_IPM2P0_RATE_48K; + } +} + +static int mt8196_set_cm(struct mtk_base_afe *afe, int id, + bool update, bool swap, unsigned int ch) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int rate = afe_priv->cm_rate[id]; + unsigned int rate_val = mt8196_rate_transform(afe->dev, rate); + unsigned int ch_pair = ch / 2; + unsigned int update_val; + int reg = AFE_CM0_CON0 + 0x10 * id; + + if (update && ch_pair) + update_val = (26000000 / rate - 10) / ch_pair - 1; + else + update_val = 0x64; + + dev_dbg(afe->dev, "CM%d, rate %d, update %d, swap %d, ch %d, update_val: %d\n", + id, rate, update, swap, ch, update_val); + + /* update cnt */ + regmap_update_bits(afe->regmap, reg, + AFE_CM_UPDATE_CNT_MASK << AFE_CM_UPDATE_CNT_SFT, + update_val << AFE_CM_UPDATE_CNT_SFT); + + /* rate */ + regmap_update_bits(afe->regmap, reg, + AFE_CM_1X_EN_SEL_FS_MASK << AFE_CM_1X_EN_SEL_FS_SFT, + rate_val << AFE_CM_1X_EN_SEL_FS_SFT); + + /* ch num */ + ch = ch - 1; + regmap_update_bits(afe->regmap, reg, + AFE_CM_CH_NUM_MASK << AFE_CM_CH_NUM_SFT, + ch << AFE_CM_CH_NUM_SFT); + + /* swap */ + regmap_update_bits(afe->regmap, reg, + AFE_CM_BYTE_SWAP_MASK << AFE_CM_BYTE_SWAP_SFT, + swap << AFE_CM_BYTE_SWAP_SFT); + + return 0; +} + +static int mt8196_enable_cm_bypass(struct mtk_base_afe *afe, int id, bool en) +{ + return regmap_update_bits(afe->regmap, + AFE_CM0_CON0 + 0x10 * id, + AFE_CM_BYPASS_MODE_MASK << AFE_CM_BYPASS_MODE_SFT, + en << AFE_CM_BYPASS_MODE_SFT); +} + +static int mt8196_fe_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + int memif_num = cpu_dai->id; + struct mtk_base_afe_memif *memif = &afe->memif[memif_num]; + const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware; + int ret; + + dev_dbg(afe->dev, "memif_num: %d.\n", memif_num); + + memif->substream = substream; + + snd_pcm_hw_constraint_step(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16); + + if (memif_num == MT8196_MEMIF_VUL_CM0) + snd_pcm_hw_constraint_step(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16); + + snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware); + + ret = snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + dev_warn(afe->dev, "snd_pcm_hw_constraint_integer failed\n"); + + /* dynamic allocate irq to memif */ + if (memif->irq_usage < 0) { + int irq_id = mtk_dynamic_irq_acquire(afe); + + if (irq_id != afe->irqs_size) { + /* link */ + memif->irq_usage = irq_id; + } else { + dev_err(afe->dev, "no more asys irq\n"); + ret = -EBUSY; + } + } + return ret; +} + +static void mt8196_fe_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + int memif_num = cpu_dai->id; + struct mtk_base_afe_memif *memif = &afe->memif[memif_num]; + int irq_id = memif->irq_usage; + + dev_dbg(afe->dev, "memif_num: %d.\n", memif_num); + + memif->substream = NULL; + + if (!memif->const_irq) { + mtk_dynamic_irq_release(afe, irq_id); + memif->irq_usage = -1; + memif->substream = NULL; + } +} + +static int mt8196_fe_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + unsigned int channels = params_channels(params); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int id = snd_soc_rtd_to_cpu(rtd, 0)->id; + struct mtk_base_afe_memif *memif = &afe->memif[id]; + const struct mtk_base_memif_data *data = memif->data; + int cm; + + afe_priv->cm_channels = channels; + + /* set channels */ + if (data->ch_num_shift >= 0) { + regmap_update_bits(afe->regmap, data->ch_num_reg, + data->ch_num_maskbit << data->ch_num_shift, + channels << data->ch_num_shift); + } + + switch (id) { + case MT8196_MEMIF_VUL8: + case MT8196_MEMIF_VUL_CM0: + cm = CM0; + break; + case MT8196_MEMIF_VUL9: + case MT8196_MEMIF_VUL_CM1: + cm = CM1; + break; + case MT8196_MEMIF_VUL10: + case MT8196_MEMIF_VUL_CM2: + cm = CM2; + break; + default: + cm = CM0; + break; + } + + afe_priv->cm_rate[cm] = params_rate(params); + + return mtk_afe_fe_hw_params(substream, params, dai); +} + +static int mt8196_fe_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_pcm_runtime *const runtime = substream->runtime; + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + int id = snd_soc_rtd_to_cpu(rtd, 0)->id; + struct mtk_base_afe_memif *memif = &afe->memif[id]; + int irq_id = memif->irq_usage; + struct mtk_base_afe_irq *irqs = &afe->irqs[irq_id]; + const struct mtk_base_irq_data *irq_data = irqs->irq_data; + unsigned int counter = runtime->period_size; + unsigned int rate = runtime->rate; + unsigned int tmp_reg; + int fs; + int ret; + + dev_dbg(afe->dev, "%s cmd %d, irq_id %d\n", memif->data->name, cmd, irq_id); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + dev_dbg(afe->dev, "%s cmd %d, id %d\n", memif->data->name, cmd, id); + + ret = mtk_memif_set_enable(afe, id); + if (ret) { + dev_err(afe->dev, "id %d, memif enable fail.\n", id); + return ret; + } + + /* + * for small latency record + * ul memif need read some data before irq enable. + * the context of this ops triger is atmoic, so it cannot sleep. + */ + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) + if ((runtime->period_size * 1000) / rate <= 10) + udelay(300); + + regmap_update_bits(afe->regmap, + irq_data->irq_cnt_reg, + irq_data->irq_cnt_maskbit << irq_data->irq_cnt_shift, + counter << irq_data->irq_cnt_shift); + + /* set irq fs */ + fs = afe->irq_fs(substream, rate); + if (fs < 0) + return -EINVAL; + + if (irq_data->irq_fs_reg >= 0) + regmap_update_bits(afe->regmap, + irq_data->irq_fs_reg, + irq_data->irq_fs_maskbit << irq_data->irq_fs_shift, + fs << irq_data->irq_fs_shift); + + /* enable interrupt */ + regmap_update_bits(afe->regmap, + irq_data->irq_en_reg, + 1 << irq_data->irq_en_shift, + 1 << irq_data->irq_en_shift); + + return 0; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + ret = mtk_memif_set_disable(afe, id); + if (ret) + dev_warn(afe->dev, "id %d, memif disable fail\n", id); + + /* disable interrupt */ + regmap_update_bits(afe->regmap, + irq_data->irq_en_reg, + 1 << irq_data->irq_en_shift, + 0 << irq_data->irq_en_shift); + + /* + * clear pending IRQ, if the register read as one, there is no need to write + * one to clear operaton. + */ + regmap_read(afe->regmap, irq_data->irq_clr_reg, &tmp_reg); + regmap_update_bits(afe->regmap, irq_data->irq_clr_reg, + AFE_IRQ_CLR_CFG_MASK_SFT | AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT, + tmp_reg ^ (AFE_IRQ_CLR_CFG_MASK_SFT | + AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT)); + + return ret; + default: + return -EINVAL; + } +} + +static int mt8196_memif_fs(struct snd_pcm_substream *substream, + unsigned int rate) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_soc_component *component = + snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME); + struct mtk_base_afe *afe = NULL; + unsigned int rate_reg; + + if (!component) + return -EINVAL; + + afe = snd_soc_component_get_drvdata(component); + if (!afe) + return -EINVAL; + + rate_reg = mt8196_rate_transform(afe->dev, rate); + + return rate_reg; +} + +static int mt8196_get_dai_fs(struct mtk_base_afe *afe, + int dai_id, unsigned int rate) +{ + return mt8196_rate_transform(afe->dev, rate); +} + +static int mt8196_irq_fs(struct snd_pcm_substream *substream, unsigned int rate) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_soc_component *component = + snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME); + struct mtk_base_afe *afe = NULL; + + if (!component) + return -EINVAL; + afe = snd_soc_component_get_drvdata(component); + return mt8196_rate_transform(afe->dev, rate); +} + +static int mt8196_get_memif_pbuf_size(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + + if ((runtime->period_size * 1000) / runtime->rate > 10) + return MT8196_MEMIF_PBUF_SIZE_256_BYTES; + else + return MT8196_MEMIF_PBUF_SIZE_32_BYTES; +} + +/* FE DAIs */ +static const struct snd_soc_dai_ops mt8196_memif_dai_ops = { + .startup = mt8196_fe_startup, + .shutdown = mt8196_fe_shutdown, + .hw_params = mt8196_fe_hw_params, + .hw_free = mtk_afe_fe_hw_free, + .prepare = mtk_afe_fe_prepare, + .trigger = mt8196_fe_trigger, +}; + +#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000_48000 |\ + SNDRV_PCM_RATE_88200 |\ + SNDRV_PCM_RATE_96000 |\ + SNDRV_PCM_RATE_176400 |\ + SNDRV_PCM_RATE_192000) + +#define MTK_PCM_DAI_RATES (SNDRV_PCM_RATE_8000 |\ + SNDRV_PCM_RATE_16000 |\ + SNDRV_PCM_RATE_32000 |\ + SNDRV_PCM_RATE_48000) + +#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE) + +#define MT8196_FE_DAI(_name, _id, max_ch, dir) \ +{ \ + .name = #_name, \ + .id = _id, \ + .dir = { \ + .stream_name = #_name, \ + .channels_min = 1, \ + .channels_max = max_ch, \ + .rates = MTK_PCM_RATES, \ + .formats = MTK_PCM_FORMATS, \ + }, \ + .ops = &mt8196_memif_dai_ops, \ +} + +static struct snd_soc_dai_driver mt8196_memif_dai_driver[] = { + /* FE DAIs: memory intefaces to CPU */ + /* Playback */ + MT8196_FE_DAI(DL0, MT8196_MEMIF_DL0, 2, playback), + MT8196_FE_DAI(DL1, MT8196_MEMIF_DL1, 2, playback), + MT8196_FE_DAI(DL2, MT8196_MEMIF_DL2, 2, playback), + MT8196_FE_DAI(DL3, MT8196_MEMIF_DL3, 2, playback), + MT8196_FE_DAI(DL4, MT8196_MEMIF_DL4, 2, playback), + MT8196_FE_DAI(DL5, MT8196_MEMIF_DL5, 2, playback), + MT8196_FE_DAI(DL6, MT8196_MEMIF_DL6, 2, playback), + MT8196_FE_DAI(DL7, MT8196_MEMIF_DL7, 2, playback), + MT8196_FE_DAI(DL8, MT8196_MEMIF_DL8, 2, playback), + MT8196_FE_DAI(DL23, MT8196_MEMIF_DL23, 2, playback), + MT8196_FE_DAI(DL24, MT8196_MEMIF_DL24, 2, playback), + MT8196_FE_DAI(DL25, MT8196_MEMIF_DL25, 2, playback), + MT8196_FE_DAI(DL26, MT8196_MEMIF_DL26, 2, playback), + MT8196_FE_DAI(DL_4CH, MT8196_MEMIF_DL_4CH, 4, playback), + MT8196_FE_DAI(DL_24CH, MT8196_MEMIF_DL_24CH, 8, playback), + MT8196_FE_DAI(HDMI, MT8196_MEMIF_HDMI, 8, playback), + /* Capture */ + MT8196_FE_DAI(UL0, MT8196_MEMIF_VUL0, 2, capture), + MT8196_FE_DAI(UL1, MT8196_MEMIF_VUL1, 2, capture), + MT8196_FE_DAI(UL2, MT8196_MEMIF_VUL2, 2, capture), + MT8196_FE_DAI(UL3, MT8196_MEMIF_VUL3, 2, capture), + MT8196_FE_DAI(UL4, MT8196_MEMIF_VUL4, 2, capture), + MT8196_FE_DAI(UL5, MT8196_MEMIF_VUL5, 2, capture), + MT8196_FE_DAI(UL6, MT8196_MEMIF_VUL6, 2, capture), + MT8196_FE_DAI(UL7, MT8196_MEMIF_VUL7, 2, capture), + MT8196_FE_DAI(UL8, MT8196_MEMIF_VUL8, 2, capture), + MT8196_FE_DAI(UL9, MT8196_MEMIF_VUL9, 16, capture), + MT8196_FE_DAI(UL10, MT8196_MEMIF_VUL10, 2, capture), + MT8196_FE_DAI(UL24, MT8196_MEMIF_VUL24, 2, capture), + MT8196_FE_DAI(UL25, MT8196_MEMIF_VUL25, 2, capture), + MT8196_FE_DAI(UL26, MT8196_MEMIF_VUL26, 2, capture), + MT8196_FE_DAI(UL_CM0, MT8196_MEMIF_VUL_CM0, 8, capture), + MT8196_FE_DAI(UL_CM1, MT8196_MEMIF_VUL_CM1, 16, capture), + MT8196_FE_DAI(UL_CM2, MT8196_MEMIF_VUL_CM2, 32, capture), + MT8196_FE_DAI(UL_ETDM_IN0, MT8196_MEMIF_ETDM_IN0, 2, capture), + MT8196_FE_DAI(UL_ETDM_IN1, MT8196_MEMIF_ETDM_IN1, 2, capture), + MT8196_FE_DAI(UL_ETDM_IN2, MT8196_MEMIF_ETDM_IN2, 2, capture), + MT8196_FE_DAI(UL_ETDM_IN3, MT8196_MEMIF_ETDM_IN3, 2, capture), + MT8196_FE_DAI(UL_ETDM_IN4, MT8196_MEMIF_ETDM_IN4, 2, capture), + MT8196_FE_DAI(UL_ETDM_IN6, MT8196_MEMIF_ETDM_IN6, 2, capture), +}; + +static int ul_cm0_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int channels = afe_priv->cm_channels; + + dev_dbg(afe->dev, "event 0x%x, name %s, channels %u\n", + event, w->name, channels); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_enable_cm_bypass(afe, CM0, false); + mt8196_set_cm(afe, CM0, true, false, channels); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM0_MASK_SFT, 0 << PDN_CM0_SFT); + break; + case SND_SOC_DAPM_POST_PMD: + mt8196_enable_cm_bypass(afe, CM0, true); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM0_MASK_SFT, 1 << PDN_CM0_SFT); + break; + default: + break; + } + + return 0; +} + +static int ul_cm1_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int channels = afe_priv->cm_channels; + + dev_dbg(afe->dev, "event 0x%x, name %s, channels %u\n", + event, w->name, channels); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_enable_cm_bypass(afe, CM1, false); + mt8196_set_cm(afe, CM1, true, false, channels); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM1_MASK_SFT, 0 << PDN_CM1_SFT); + break; + case SND_SOC_DAPM_POST_PMD: + mt8196_enable_cm_bypass(afe, CM1, true); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM1_MASK_SFT, 1 << PDN_CM1_SFT); + break; + default: + break; + } + + return 0; +} + +static int ul_cm2_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int channels = afe_priv->cm_channels; + + dev_dbg(afe->dev, "event 0x%x, name %s, channels %u\n", + event, w->name, channels); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_enable_cm_bypass(afe, CM2, false); + mt8196_set_cm(afe, CM2, true, false, channels); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM2_MASK_SFT, 0 << PDN_CM2_SFT); + break; + case SND_SOC_DAPM_POST_PMD: + mt8196_enable_cm_bypass(afe, CM2, true); + regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, + PDN_CM2_MASK_SFT, 1 << PDN_CM2_SFT); + break; + default: + break; + } + + return 0; +} + +/* + * dma widget & routes + * The mixer controls and routes are by no means fully implemented, + * only the ones that are intended to be used are, as other wise a fully + * interconnected switch bar mixer would introduce way too many unused + * controls. + */ +static const struct snd_kcontrol_new memif_ul0_ch1_mix[] = { + /* Normal record */ + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN018_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul0_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN019_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul1_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN4_CH1", AFE_CONN020_4, + I_I2SIN4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH1", AFE_CONN020_5, + I_I2SIN6_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul1_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN4_CH2", AFE_CONN021_4, + I_I2SIN4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH2", AFE_CONN021_5, + I_I2SIN6_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul2_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN022_0, + I_ADDA_UL_CH3, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul2_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN023_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul3_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH1", AFE_CONN024_4, + I_I2SIN0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN1_CH1", AFE_CONN024_4, + I_I2SIN1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN3_CH1", AFE_CONN024_4, + I_I2SIN3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN4_CH1", AFE_CONN024_4, + I_I2SIN4_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul3_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH2", AFE_CONN025_4, + I_I2SIN0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN1_CH2", AFE_CONN025_4, + I_I2SIN1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN3_CH2", AFE_CONN025_4, + I_I2SIN3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN4_CH2", AFE_CONN025_4, + I_I2SIN4_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul4_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN026_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN026_1, + I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN026_1, + I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN026_1, + I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN026_1, + I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN026_1, + I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN026_1, + I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH1", AFE_CONN026_4, + I_I2SIN0_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul4_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN027_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN027_1, + I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN027_1, + I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN027_1, + I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN027_1, + I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN027_1, + I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN027_1, + I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH2", AFE_CONN027_4, + I_I2SIN0_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul5_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN3_CH1", AFE_CONN028_4, + I_I2SIN3_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul5_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN3_CH2", AFE_CONN029_4, + I_I2SIN3_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul6_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN030_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul6_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN031_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul7_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN032_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul7_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN033_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul8_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN034_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul8_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN035_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul9_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN036_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul9_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN037_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul10_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN038_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul10_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN039_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul24_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH1", AFE_CONN066_4, + I_I2SIN0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH1", AFE_CONN066_5, + I_I2SIN6_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul24_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH2", AFE_CONN067_4, + I_I2SIN0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH2", AFE_CONN067_5, + I_I2SIN6_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul25_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH1", AFE_CONN068_4, + I_I2SIN0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH1", AFE_CONN068_5, + I_I2SIN6_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul25_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH2", AFE_CONN069_4, + I_I2SIN0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH2", AFE_CONN069_5, + I_I2SIN6_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul26_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH1", AFE_CONN070_4, + I_I2SIN0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH1", AFE_CONN070_5, + I_I2SIN6_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul26_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN0_CH2", AFE_CONN071_4, + I_I2SIN0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("I2SIN6_CH2", AFE_CONN071_5, + I_I2SIN6_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN040_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN041_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch3_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN042_0, + I_ADDA_UL_CH3, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch4_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN043_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch5_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN044_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch6_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN045_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch7_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN046_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm0_ch8_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN047_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN048_0, + I_ADDA_UL_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN049_0, + I_ADDA_UL_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch3_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN050_0, + I_ADDA_UL_CH3, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch4_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN051_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch5_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN052_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN052_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN052_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN052_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch6_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN053_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN053_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN053_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN053_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch7_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN054_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN054_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN054_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN054_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch8_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN055_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN055_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN055_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN055_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch9_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN056_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN056_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN056_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN056_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch10_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN057_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN057_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN057_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN057_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch11_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN058_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN058_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN058_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN058_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch12_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN059_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN059_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN059_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN059_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch13_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN060_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN060_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN060_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN060_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch14_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN061_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN061_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN061_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN061_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch15_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN062_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN062_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN062_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN062_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm1_ch16_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN063_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN063_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN063_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN063_0, + I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN064_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN064_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN064_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN064_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN064_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN064_0, I_ADDA_UL_CH6, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN065_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN065_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN065_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN065_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN065_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN065_0, I_ADDA_UL_CH6, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch3_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN066_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN066_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN066_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN066_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN066_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN066_0, I_ADDA_UL_CH6, 1, 0) +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch4_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN067_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN067_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN067_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN067_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN067_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN067_0, I_ADDA_UL_CH6, 1, 0) +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch5_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN068_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN068_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN068_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN068_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN068_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN068_0, I_ADDA_UL_CH6, 1, 0) +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch6_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN069_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN069_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN069_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN069_0, I_ADDA_UL_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH5", AFE_CONN069_0, I_ADDA_UL_CH5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH6", AFE_CONN069_0, I_ADDA_UL_CH6, 1, 0) +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch7_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN070_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN070_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN070_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN070_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch8_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN071_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN071_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN071_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN071_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch9_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN072_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN072_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN072_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN072_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch10_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN073_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN073_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN073_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN073_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch11_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN074_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN074_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN074_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN074_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch12_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN075_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN075_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN075_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN075_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch13_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN076_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN076_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN076_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN076_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch14_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN077_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN077_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN077_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN077_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch15_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN078_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN078_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN078_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN078_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch16_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN079_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN079_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN079_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN079_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch17_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN080_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN080_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN080_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN080_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch18_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN081_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN081_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN081_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN081_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch19_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN082_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN082_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN082_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN082_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch20_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN083_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN083_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN083_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN083_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch21_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN084_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN084_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN084_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN084_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch22_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN085_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN085_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN085_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN085_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch23_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN086_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN086_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN086_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN086_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch24_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN087_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN087_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN087_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN087_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch25_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN088_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN088_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN088_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN088_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch26_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN089_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN089_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN089_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN089_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch27_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN090_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN090_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN090_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN090_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch28_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN091_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN091_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN091_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN091_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch29_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN092_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN092_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN092_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN092_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch30_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN093_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN093_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN093_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN093_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch31_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN094_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN094_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN094_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN094_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const struct snd_kcontrol_new memif_ul_cm2_ch32_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN095_0, I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN095_0, I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN095_0, I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4", AFE_CONN095_0, I_ADDA_UL_CH4, 1, 0), +}; + +static const char * const cm0_mux_map[] = { + "UL8_2CH_PATH", + "CM0_8CH_PATH", +}; + +static const char * const cm1_mux_map[] = { + "UL9_2CH_PATH", + "CM1_16CH_PATH", +}; + +static const char * const cm2_mux_map[] = { + "UL10_2CH_PATH", + "CM2_32CH_PATH", +}; + +static SOC_ENUM_SINGLE_DECL(ul_cm0_mux_map_enum, AFE_CM0_CON0, + AFE_CM0_OUTPUT_MUX_SFT, cm0_mux_map); + +static SOC_ENUM_SINGLE_DECL(ul_cm1_mux_map_enum, AFE_CM1_CON0, + AFE_CM1_OUTPUT_MUX_SFT, cm1_mux_map); + +static SOC_ENUM_SINGLE_DECL(ul_cm2_mux_map_enum, AFE_CM2_CON0, + AFE_CM2_OUTPUT_MUX_SFT, cm2_mux_map); + +static const struct snd_kcontrol_new ul_cm0_mux_control = + SOC_DAPM_ENUM("CM0_UL_MUX Route", ul_cm0_mux_map_enum); + +static const struct snd_kcontrol_new ul_cm1_mux_control = + SOC_DAPM_ENUM("CM1_UL_MUX Route", ul_cm1_mux_map_enum); + +static const struct snd_kcontrol_new ul_cm2_mux_control = + SOC_DAPM_ENUM("CM2_UL_MUX Route", ul_cm2_mux_map_enum); + +static const struct snd_soc_dapm_widget mt8196_memif_widgets[] = { + /* inter-connections */ + SND_SOC_DAPM_MIXER("UL0_CH1", SND_SOC_NOPM, 0, 0, + memif_ul0_ch1_mix, ARRAY_SIZE(memif_ul0_ch1_mix)), + SND_SOC_DAPM_MIXER("UL0_CH2", SND_SOC_NOPM, 0, 0, + memif_ul0_ch2_mix, ARRAY_SIZE(memif_ul0_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL1_CH1", SND_SOC_NOPM, 0, 0, + memif_ul1_ch1_mix, ARRAY_SIZE(memif_ul1_ch1_mix)), + SND_SOC_DAPM_MIXER("UL1_CH2", SND_SOC_NOPM, 0, 0, + memif_ul1_ch2_mix, ARRAY_SIZE(memif_ul1_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL2_CH1", SND_SOC_NOPM, 0, 0, + memif_ul2_ch1_mix, ARRAY_SIZE(memif_ul2_ch1_mix)), + SND_SOC_DAPM_MIXER("UL2_CH2", SND_SOC_NOPM, 0, 0, + memif_ul2_ch2_mix, ARRAY_SIZE(memif_ul2_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL3_CH1", SND_SOC_NOPM, 0, 0, + memif_ul3_ch1_mix, ARRAY_SIZE(memif_ul3_ch1_mix)), + SND_SOC_DAPM_MIXER("UL3_CH2", SND_SOC_NOPM, 0, 0, + memif_ul3_ch2_mix, ARRAY_SIZE(memif_ul3_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL4_CH1", SND_SOC_NOPM, 0, 0, + memif_ul4_ch1_mix, ARRAY_SIZE(memif_ul4_ch1_mix)), + SND_SOC_DAPM_MIXER("UL4_CH2", SND_SOC_NOPM, 0, 0, + memif_ul4_ch2_mix, ARRAY_SIZE(memif_ul4_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL5_CH1", SND_SOC_NOPM, 0, 0, + memif_ul5_ch1_mix, ARRAY_SIZE(memif_ul5_ch1_mix)), + SND_SOC_DAPM_MIXER("UL5_CH2", SND_SOC_NOPM, 0, 0, + memif_ul5_ch2_mix, ARRAY_SIZE(memif_ul5_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL6_CH1", SND_SOC_NOPM, 0, 0, + memif_ul6_ch1_mix, ARRAY_SIZE(memif_ul6_ch1_mix)), + SND_SOC_DAPM_MIXER("UL6_CH2", SND_SOC_NOPM, 0, 0, + memif_ul6_ch2_mix, ARRAY_SIZE(memif_ul6_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL7_CH1", SND_SOC_NOPM, 0, 0, + memif_ul7_ch1_mix, ARRAY_SIZE(memif_ul7_ch1_mix)), + SND_SOC_DAPM_MIXER("UL7_CH2", SND_SOC_NOPM, 0, 0, + memif_ul7_ch2_mix, ARRAY_SIZE(memif_ul7_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL8_CH1", SND_SOC_NOPM, 0, 0, + memif_ul8_ch1_mix, ARRAY_SIZE(memif_ul8_ch1_mix)), + SND_SOC_DAPM_MIXER("UL8_CH2", SND_SOC_NOPM, 0, 0, + memif_ul8_ch2_mix, ARRAY_SIZE(memif_ul8_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL9_CH1", SND_SOC_NOPM, 0, 0, + memif_ul9_ch1_mix, ARRAY_SIZE(memif_ul9_ch1_mix)), + SND_SOC_DAPM_MIXER("UL9_CH2", SND_SOC_NOPM, 0, 0, + memif_ul9_ch2_mix, ARRAY_SIZE(memif_ul9_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL10_CH1", SND_SOC_NOPM, 0, 0, + memif_ul10_ch1_mix, ARRAY_SIZE(memif_ul10_ch1_mix)), + SND_SOC_DAPM_MIXER("UL10_CH2", SND_SOC_NOPM, 0, 0, + memif_ul10_ch2_mix, ARRAY_SIZE(memif_ul10_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL24_CH1", SND_SOC_NOPM, 0, 0, + memif_ul24_ch1_mix, ARRAY_SIZE(memif_ul24_ch1_mix)), + SND_SOC_DAPM_MIXER("UL24_CH2", SND_SOC_NOPM, 0, 0, + memif_ul24_ch2_mix, ARRAY_SIZE(memif_ul24_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL25_CH1", SND_SOC_NOPM, 0, 0, + memif_ul25_ch1_mix, ARRAY_SIZE(memif_ul25_ch1_mix)), + SND_SOC_DAPM_MIXER("UL25_CH2", SND_SOC_NOPM, 0, 0, + memif_ul25_ch2_mix, ARRAY_SIZE(memif_ul25_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL26_CH1", SND_SOC_NOPM, 0, 0, + memif_ul26_ch1_mix, ARRAY_SIZE(memif_ul26_ch1_mix)), + SND_SOC_DAPM_MIXER("UL26_CH2", SND_SOC_NOPM, 0, 0, + memif_ul26_ch2_mix, ARRAY_SIZE(memif_ul26_ch2_mix)), + + SND_SOC_DAPM_MIXER("UL_CM0_CH1", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch1_mix, ARRAY_SIZE(memif_ul_cm0_ch1_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH2", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch2_mix, ARRAY_SIZE(memif_ul_cm0_ch2_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH3", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch3_mix, ARRAY_SIZE(memif_ul_cm0_ch3_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH4", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch4_mix, ARRAY_SIZE(memif_ul_cm0_ch4_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH5", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch5_mix, ARRAY_SIZE(memif_ul_cm0_ch5_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH6", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch6_mix, ARRAY_SIZE(memif_ul_cm0_ch6_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH7", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch7_mix, ARRAY_SIZE(memif_ul_cm0_ch7_mix)), + SND_SOC_DAPM_MIXER("UL_CM0_CH8", SND_SOC_NOPM, 0, 0, + memif_ul_cm0_ch8_mix, ARRAY_SIZE(memif_ul_cm0_ch8_mix)), + SND_SOC_DAPM_MUX("CM0_UL_MUX", SND_SOC_NOPM, 0, 0, + &ul_cm0_mux_control), + + SND_SOC_DAPM_MIXER("UL_CM1_CH1", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch1_mix, ARRAY_SIZE(memif_ul_cm1_ch1_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH2", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch2_mix, ARRAY_SIZE(memif_ul_cm1_ch2_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH3", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch3_mix, ARRAY_SIZE(memif_ul_cm1_ch3_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH4", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch4_mix, ARRAY_SIZE(memif_ul_cm1_ch4_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH5", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch5_mix, ARRAY_SIZE(memif_ul_cm1_ch5_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH6", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch6_mix, ARRAY_SIZE(memif_ul_cm1_ch6_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH7", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch7_mix, ARRAY_SIZE(memif_ul_cm1_ch7_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH8", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch8_mix, ARRAY_SIZE(memif_ul_cm1_ch8_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH9", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch9_mix, ARRAY_SIZE(memif_ul_cm1_ch9_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH10", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch10_mix, ARRAY_SIZE(memif_ul_cm1_ch10_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH11", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch11_mix, ARRAY_SIZE(memif_ul_cm1_ch11_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH12", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch12_mix, ARRAY_SIZE(memif_ul_cm1_ch12_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH13", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch13_mix, ARRAY_SIZE(memif_ul_cm1_ch13_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH14", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch14_mix, ARRAY_SIZE(memif_ul_cm1_ch14_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH15", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch15_mix, ARRAY_SIZE(memif_ul_cm1_ch15_mix)), + SND_SOC_DAPM_MIXER("UL_CM1_CH16", SND_SOC_NOPM, 0, 0, + memif_ul_cm1_ch16_mix, ARRAY_SIZE(memif_ul_cm1_ch16_mix)), + SND_SOC_DAPM_MUX("CM1_UL_MUX", SND_SOC_NOPM, 0, 0, + &ul_cm1_mux_control), + + SND_SOC_DAPM_MIXER("UL_CM2_CH1", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch1_mix, ARRAY_SIZE(memif_ul_cm2_ch1_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH2", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch2_mix, ARRAY_SIZE(memif_ul_cm2_ch2_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH3", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch3_mix, ARRAY_SIZE(memif_ul_cm2_ch3_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH4", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch4_mix, ARRAY_SIZE(memif_ul_cm2_ch4_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH5", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch5_mix, ARRAY_SIZE(memif_ul_cm2_ch5_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH6", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch6_mix, ARRAY_SIZE(memif_ul_cm2_ch6_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH7", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch7_mix, ARRAY_SIZE(memif_ul_cm2_ch7_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH8", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch8_mix, ARRAY_SIZE(memif_ul_cm2_ch8_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH9", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch9_mix, ARRAY_SIZE(memif_ul_cm2_ch9_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH10", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch10_mix, ARRAY_SIZE(memif_ul_cm2_ch10_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH11", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch11_mix, ARRAY_SIZE(memif_ul_cm2_ch11_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH12", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch12_mix, ARRAY_SIZE(memif_ul_cm2_ch12_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH13", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch13_mix, ARRAY_SIZE(memif_ul_cm2_ch13_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH14", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch14_mix, ARRAY_SIZE(memif_ul_cm2_ch14_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH15", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch15_mix, ARRAY_SIZE(memif_ul_cm2_ch15_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH16", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch16_mix, ARRAY_SIZE(memif_ul_cm2_ch16_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH17", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch17_mix, ARRAY_SIZE(memif_ul_cm2_ch17_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH18", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch18_mix, ARRAY_SIZE(memif_ul_cm2_ch18_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH19", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch19_mix, ARRAY_SIZE(memif_ul_cm2_ch19_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH20", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch20_mix, ARRAY_SIZE(memif_ul_cm2_ch20_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH21", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch21_mix, ARRAY_SIZE(memif_ul_cm2_ch21_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH22", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch22_mix, ARRAY_SIZE(memif_ul_cm2_ch22_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH23", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch23_mix, ARRAY_SIZE(memif_ul_cm2_ch23_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH24", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch24_mix, ARRAY_SIZE(memif_ul_cm2_ch24_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH25", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch25_mix, ARRAY_SIZE(memif_ul_cm2_ch25_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH26", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch26_mix, ARRAY_SIZE(memif_ul_cm2_ch26_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH27", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch27_mix, ARRAY_SIZE(memif_ul_cm2_ch27_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH28", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch28_mix, ARRAY_SIZE(memif_ul_cm2_ch28_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH29", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch29_mix, ARRAY_SIZE(memif_ul_cm2_ch29_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH30", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch30_mix, ARRAY_SIZE(memif_ul_cm2_ch30_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH31", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch31_mix, ARRAY_SIZE(memif_ul_cm2_ch31_mix)), + SND_SOC_DAPM_MIXER("UL_CM2_CH32", SND_SOC_NOPM, 0, 0, + memif_ul_cm2_ch32_mix, ARRAY_SIZE(memif_ul_cm2_ch32_mix)), + SND_SOC_DAPM_MUX("CM2_UL_MUX", SND_SOC_NOPM, 0, 0, + &ul_cm2_mux_control), + + SND_SOC_DAPM_SUPPLY("CM0_Enable", + AFE_CM0_CON0, AFE_CM0_ON_SFT, 0, + ul_cm0_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_SUPPLY("CM1_Enable", + AFE_CM1_CON0, AFE_CM1_ON_SFT, 0, + ul_cm1_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_SUPPLY("CM2_Enable", + AFE_CM2_CON0, AFE_CM2_ON_SFT, 0, + ul_cm2_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_PRE_PMD), + + /* dynamic pinctrl */ + SND_SOC_DAPM_PINCTRL("I2S3_PIN", "aud-gpio-i2s3-on", "aud-gpio-i2s3-off"), + SND_SOC_DAPM_PINCTRL("I2S4_PIN", "aud-gpio-i2s4-on", "aud-gpio-i2s4-off"), + SND_SOC_DAPM_PINCTRL("I2S6_PIN", "aud-gpio-i2s6-on", "aud-gpio-i2s6-off"), + SND_SOC_DAPM_PINCTRL("AP_DMIC0_PIN", "aud-gpio-ap-dmic-on", "aud-gpio-ap-dmic-off"), + SND_SOC_DAPM_PINCTRL("AP_DMIC1_PIN", "aud-gpio-ap-dmic1-on", "aud-gpio-ap-dmic1-off"), +}; + +static const struct snd_soc_dapm_route mt8196_memif_routes[] = { + {"UL0", NULL, "UL0_CH1"}, + {"UL0", NULL, "UL0_CH2"}, + /* Normal record */ + {"UL0_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL0_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + + {"UL1", NULL, "UL1_CH1"}, + {"UL1", NULL, "UL1_CH2"}, + {"UL1_CH1", "I2SIN4_CH1", "I2SIN4"}, + {"UL1_CH2", "I2SIN4_CH2", "I2SIN4"}, + {"UL1_CH1", "I2SIN6_CH1", "I2SIN6"}, + {"UL1_CH2", "I2SIN6_CH2", "I2SIN6"}, + + {"UL2", NULL, "UL2_CH1"}, + {"UL2", NULL, "UL2_CH2"}, + {"UL2_CH1", "ADDA_UL_CH3", "ADDA_CH34_UL_Mux"}, + {"UL2_CH2", "ADDA_UL_CH4", "ADDA_CH34_UL_Mux"}, + + {"UL3", NULL, "UL3_CH1"}, + {"UL3", NULL, "UL3_CH2"}, + {"UL3_CH1", "I2SIN0_CH1", "I2SIN0"}, + {"UL3_CH2", "I2SIN0_CH2", "I2SIN0"}, + {"UL3_CH1", "I2SIN1_CH1", "I2SIN1"}, + {"UL3_CH2", "I2SIN1_CH2", "I2SIN1"}, + {"UL3_CH1", "I2SIN3_CH1", "I2SIN3"}, + {"UL3_CH2", "I2SIN3_CH2", "I2SIN3"}, + {"UL3_CH1", "I2SIN4_CH1", "I2SIN4"}, + {"UL3_CH2", "I2SIN4_CH2", "I2SIN4"}, + + {"UL4", NULL, "UL4_CH1"}, + {"UL4", NULL, "UL4_CH2"}, + {"UL4_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL4_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + {"UL4_CH1", "I2SIN0_CH1", "I2SIN0"}, + {"UL4_CH2", "I2SIN0_CH2", "I2SIN0"}, + + {"UL5", NULL, "UL5_CH1"}, + {"UL5", NULL, "UL5_CH2"}, + {"UL5_CH1", "I2SIN3_CH1", "I2SIN3"}, + {"UL5_CH2", "I2SIN3_CH2", "I2SIN3"}, + + {"UL6", NULL, "UL6_CH1"}, + {"UL6", NULL, "UL6_CH2"}, + {"UL6_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL6_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + + {"UL7", NULL, "UL7_CH1"}, + {"UL7", NULL, "UL7_CH2"}, + {"UL7_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL7_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + + {"UL8", NULL, "CM0_UL_MUX"}, + {"CM0_UL_MUX", "UL8_2CH_PATH", "UL8_CH1"}, + {"CM0_UL_MUX", "UL8_2CH_PATH", "UL8_CH2"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH1"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH2"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH3"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH4"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH5"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH6"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH7"}, + {"CM0_UL_MUX", "CM0_8CH_PATH", "UL_CM0_CH8"}, + + {"UL_CM0", NULL, "CM0_Enable"}, + + /* UL9 */ + {"UL9", NULL, "CM1_UL_MUX"}, + {"CM1_UL_MUX", "UL9_2CH_PATH", "UL9_CH1"}, + {"CM1_UL_MUX", "UL9_2CH_PATH", "UL9_CH2"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH1"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH2"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH3"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH4"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH5"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH6"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH7"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH8"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH9"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH10"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH11"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH12"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH13"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH14"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH15"}, + {"CM1_UL_MUX", "CM1_16CH_PATH", "UL_CM1_CH16"}, + + {"UL_CM1", NULL, "CM1_Enable"}, + + {"UL9_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL9_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + + {"UL10", NULL, "CM2_UL_MUX"}, + {"CM2_UL_MUX", "UL10_2CH_PATH", "UL10_CH1"}, + {"CM2_UL_MUX", "UL10_2CH_PATH", "UL10_CH2"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH1"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH2"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH3"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH4"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH5"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH6"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH7"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH8"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH9"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH10"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH11"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH12"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH13"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH14"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH15"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH16"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH17"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH18"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH19"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH20"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH21"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH22"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH23"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH24"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH25"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH26"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH27"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH28"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH29"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH30"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH31"}, + {"CM2_UL_MUX", "CM2_32CH_PATH", "UL_CM2_CH32"}, + + {"UL_CM2", NULL, "CM2_Enable"}, + + {"UL10_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL10_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + + {"UL24", NULL, "UL24_CH1"}, + {"UL24", NULL, "UL24_CH2"}, + {"UL24_CH1", "I2SIN6_CH1", "I2SIN6"}, + {"UL24_CH2", "I2SIN6_CH2", "I2SIN6"}, + {"UL24_CH1", "I2SIN0_CH1", "I2SIN0"}, + {"UL24_CH2", "I2SIN0_CH2", "I2SIN0"}, + + {"UL25", NULL, "UL25_CH1"}, + {"UL25", NULL, "UL25_CH2"}, + {"UL25_CH1", "I2SIN6_CH1", "I2SIN6"}, + {"UL25_CH2", "I2SIN6_CH2", "I2SIN6"}, + {"UL25_CH1", "I2SIN0_CH1", "I2SIN0"}, + {"UL25_CH2", "I2SIN0_CH2", "I2SIN0"}, + + {"UL26", NULL, "UL26_CH1"}, + {"UL26", NULL, "UL26_CH2"}, + {"UL26_CH1", "I2SIN6_CH1", "I2SIN6"}, + {"UL26_CH2", "I2SIN6_CH2", "I2SIN6"}, + {"UL26_CH1", "I2SIN0_CH1", "I2SIN0"}, + {"UL26_CH2", "I2SIN0_CH2", "I2SIN0"}, + + {"UL_CM0", NULL, "UL_CM0_CH1"}, + {"UL_CM0", NULL, "UL_CM0_CH2"}, + {"UL_CM0", NULL, "UL_CM0_CH3"}, + {"UL_CM0", NULL, "UL_CM0_CH4"}, + {"UL_CM0", NULL, "UL_CM0_CH5"}, + {"UL_CM0", NULL, "UL_CM0_CH6"}, + {"UL_CM0", NULL, "UL_CM0_CH7"}, + {"UL_CM0", NULL, "UL_CM0_CH8"}, + {"UL_CM0_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL_CM0_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + {"UL_CM0_CH3", "ADDA_UL_CH3", "ADDA_CH34_UL_Mux"}, + {"UL_CM0_CH4", "ADDA_UL_CH4", "ADDA_CH34_UL_Mux"}, + + {"UL_CM1", NULL, "UL_CM1_CH1"}, + {"UL_CM1", NULL, "UL_CM1_CH2"}, + {"UL_CM1", NULL, "UL_CM1_CH3"}, + {"UL_CM1", NULL, "UL_CM1_CH4"}, + {"UL_CM1", NULL, "UL_CM1_CH5"}, + {"UL_CM1", NULL, "UL_CM1_CH6"}, + {"UL_CM1", NULL, "UL_CM1_CH7"}, + {"UL_CM1", NULL, "UL_CM1_CH8"}, + {"UL_CM1", NULL, "UL_CM1_CH9"}, + {"UL_CM1", NULL, "UL_CM1_CH10"}, + {"UL_CM1", NULL, "UL_CM1_CH11"}, + {"UL_CM1", NULL, "UL_CM1_CH12"}, + {"UL_CM1", NULL, "UL_CM1_CH13"}, + {"UL_CM1", NULL, "UL_CM1_CH14"}, + {"UL_CM1", NULL, "UL_CM1_CH15"}, + {"UL_CM1", NULL, "UL_CM1_CH16"}, + {"UL_CM1_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL_CM1_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + {"UL_CM1_CH3", "ADDA_UL_CH3", "ADDA_CH34_UL_Mux"}, + {"UL_CM1_CH4", "ADDA_UL_CH4", "ADDA_CH34_UL_Mux"}, + + {"UL_CM2", NULL, "UL_CM2_CH1"}, + {"UL_CM2", NULL, "UL_CM2_CH2"}, + {"UL_CM2", NULL, "UL_CM2_CH3"}, + {"UL_CM2", NULL, "UL_CM2_CH4"}, + {"UL_CM2", NULL, "UL_CM2_CH5"}, + {"UL_CM2", NULL, "UL_CM2_CH6"}, + {"UL_CM2", NULL, "UL_CM2_CH7"}, + {"UL_CM2", NULL, "UL_CM2_CH8"}, + {"UL_CM2", NULL, "UL_CM2_CH9"}, + {"UL_CM2", NULL, "UL_CM2_CH10"}, + {"UL_CM2", NULL, "UL_CM2_CH11"}, + {"UL_CM2", NULL, "UL_CM2_CH12"}, + {"UL_CM2", NULL, "UL_CM2_CH13"}, + {"UL_CM2", NULL, "UL_CM2_CH14"}, + {"UL_CM2", NULL, "UL_CM2_CH15"}, + {"UL_CM2", NULL, "UL_CM2_CH16"}, + {"UL_CM2", NULL, "UL_CM2_CH17"}, + {"UL_CM2", NULL, "UL_CM2_CH18"}, + {"UL_CM2", NULL, "UL_CM2_CH19"}, + {"UL_CM2", NULL, "UL_CM2_CH20"}, + {"UL_CM2", NULL, "UL_CM2_CH21"}, + {"UL_CM2", NULL, "UL_CM2_CH22"}, + {"UL_CM2", NULL, "UL_CM2_CH23"}, + {"UL_CM2", NULL, "UL_CM2_CH24"}, + {"UL_CM2", NULL, "UL_CM2_CH25"}, + {"UL_CM2", NULL, "UL_CM2_CH26"}, + {"UL_CM2", NULL, "UL_CM2_CH27"}, + {"UL_CM2", NULL, "UL_CM2_CH28"}, + {"UL_CM2", NULL, "UL_CM2_CH29"}, + {"UL_CM2", NULL, "UL_CM2_CH30"}, + {"UL_CM2", NULL, "UL_CM2_CH31"}, + {"UL_CM2", NULL, "UL_CM2_CH32"}, + {"UL_CM2_CH1", "ADDA_UL_CH1", "ADDA_UL_Mux"}, + {"UL_CM2_CH2", "ADDA_UL_CH2", "ADDA_UL_Mux"}, + {"UL_CM2_CH3", "ADDA_UL_CH3", "ADDA_CH34_UL_Mux"}, + {"UL_CM2_CH4", "ADDA_UL_CH4", "ADDA_CH34_UL_Mux"}, + + /* Audio Pin */ + {"I2SOUT4", NULL, "I2S4_PIN"}, + {"I2SIN4", NULL, "I2S4_PIN"}, + {"I2SOUT6", NULL, "I2S6_PIN"}, + {"I2SIN6", NULL, "I2S6_PIN"}, + {"I2SOUT3", NULL, "I2S3_PIN"}, + {"I2SIN3", NULL, "I2S3_PIN"}, + {"AP DMIC Capture", NULL, "AP_DMIC0_PIN"}, + {"AP DMIC CH34 Capture", NULL, "AP_DMIC1_PIN"}, +}; + +#define MT8196_DL_MEMIF(_id) \ + [MT8196_MEMIF_##_id] = { \ + .name = #_id, \ + .id = MT8196_MEMIF_##_id, \ + .reg_ofs_base = AFE_##_id##_BASE, \ + .reg_ofs_cur = AFE_##_id##_CUR, \ + .reg_ofs_end = AFE_##_id##_END, \ + .reg_ofs_base_msb = AFE_##_id##_BASE_MSB, \ + .reg_ofs_cur_msb = AFE_##_id##_CUR_MSB, \ + .reg_ofs_end_msb = AFE_##_id##_END_MSB, \ + .fs_reg = AFE_##_id##_CON0, \ + .fs_shift = _id##_SEL_FS_SFT, \ + .fs_maskbit = _id##_SEL_FS_MASK, \ + .mono_reg = AFE_##_id##_CON0, \ + .mono_shift = _id##_MONO_SFT, \ + .enable_reg = AFE_##_id##_CON0, \ + .enable_shift = _id##_ON_SFT, \ + .hd_reg = AFE_##_id##_CON0, \ + .hd_shift = _id##_HD_MODE_SFT, \ + .hd_align_reg = AFE_##_id##_CON0, \ + .hd_align_mshift = _id##_HALIGN_SFT, \ + .agent_disable_reg = -1, \ + .agent_disable_shift = -1, \ + .msb_reg = -1, \ + .msb_shift = -1, \ + .pbuf_reg = AFE_##_id##_CON0, \ + .pbuf_mask = _id##_PBUF_SIZE_MASK, \ + .pbuf_shift = _id##_PBUF_SIZE_SFT, \ + .minlen_reg = AFE_##_id##_CON0, \ + .minlen_mask = _id##_MINLEN_MASK, \ + .minlen_shift = _id##_MINLEN_SFT, \ +} + +#define MT8196_MULTI_DL_MEMIF(_id) \ + [MT8196_MEMIF_##_id] = { \ + .name = #_id, \ + .id = MT8196_MEMIF_##_id, \ + .reg_ofs_base = AFE_##_id##_BASE, \ + .reg_ofs_cur = AFE_##_id##_CUR, \ + .reg_ofs_end = AFE_##_id##_END, \ + .reg_ofs_base_msb = AFE_##_id##_BASE_MSB, \ + .reg_ofs_cur_msb = AFE_##_id##_CUR_MSB, \ + .reg_ofs_end_msb = AFE_##_id##_END_MSB, \ + .fs_reg = AFE_##_id##_CON0, \ + .fs_shift = _id##_SEL_FS_SFT, \ + .fs_maskbit = _id##_SEL_FS_MASK, \ + .mono_reg = -1, \ + .mono_shift = -1, \ + .enable_reg = AFE_##_id##_CON0, \ + .enable_shift = _id##_ON_SFT, \ + .hd_reg = AFE_##_id##_CON0, \ + .hd_shift = _id##_HD_MODE_SFT, \ + .hd_align_reg = AFE_##_id##_CON0, \ + .hd_align_mshift = _id##_HALIGN_SFT, \ + .agent_disable_reg = -1, \ + .agent_disable_shift = -1, \ + .msb_reg = -1, \ + .msb_shift = -1, \ + .pbuf_reg = AFE_##_id##_CON0, \ + .pbuf_mask = _id##_PBUF_SIZE_MASK, \ + .pbuf_shift = _id##_PBUF_SIZE_SFT, \ + .minlen_reg = AFE_##_id##_CON0, \ + .minlen_mask = _id##_MINLEN_MASK, \ + .minlen_shift = _id##_MINLEN_SFT, \ + .ch_num_reg = AFE_##_id##_CON0, \ + .ch_num_maskbit = _id##_NUM_MASK, \ + .ch_num_shift = _id##_NUM_SFT, \ +} + +#define MT8196_UL_MEMIF(_id, _fs_shift, _fs_maskbit, _mono_shift) \ + [MT8196_MEMIF_##_id] = { \ + .name = #_id, \ + .id = MT8196_MEMIF_##_id, \ + .reg_ofs_base = AFE_##_id##_BASE, \ + .reg_ofs_cur = AFE_##_id##_CUR, \ + .reg_ofs_end = AFE_##_id##_END, \ + .reg_ofs_base_msb = AFE_##_id##_BASE_MSB, \ + .reg_ofs_cur_msb = AFE_##_id##_CUR_MSB, \ + .reg_ofs_end_msb = AFE_##_id##_END_MSB, \ + .fs_reg = AFE_##_id##_CON0, \ + .fs_shift = _fs_shift, \ + .fs_maskbit = _fs_maskbit, \ + .mono_reg = AFE_##_id##_CON0, \ + .mono_shift = _mono_shift, \ + .enable_reg = AFE_##_id##_CON0, \ + .enable_shift = _id##_ON_SFT, \ + .hd_reg = AFE_##_id##_CON0, \ + .hd_shift = _id##_HD_MODE_SFT, \ + .hd_align_reg = AFE_##_id##_CON0, \ + .hd_align_mshift = _id##_HALIGN_SFT, \ + .agent_disable_reg = -1, \ + .agent_disable_shift = -1, \ + .msb_reg = -1, \ + .msb_shift = -1, \ + } + +/* For convenience with macros: missing register fields */ +#define HDMI_SEL_FS_SFT -1 +#define HDMI_SEL_FS_MASK -1 + +/* For convenience with macros: register name differences */ +#define AFE_HDMI_BASE AFE_HDMI_OUT_BASE +#define AFE_HDMI_CUR AFE_HDMI_OUT_CUR +#define AFE_HDMI_END AFE_HDMI_OUT_END +#define AFE_HDMI_BASE_MSB AFE_HDMI_OUT_BASE_MSB +#define AFE_HDMI_CUR_MSB AFE_HDMI_OUT_CUR_MSB +#define AFE_HDMI_END_MSB AFE_HDMI_OUT_END_MSB +#define AFE_HDMI_CON0 AFE_HDMI_OUT_CON0 +#define HDMI_ON_SFT HDMI_OUT_ON_SFT +#define HDMI_HD_MODE_SFT HDMI_OUT_HD_MODE_SFT +#define HDMI_HALIGN_SFT HDMI_OUT_HALIGN_SFT +#define HDMI_PBUF_SIZE_MASK HDMI_OUT_PBUF_SIZE_MASK +#define HDMI_PBUF_SIZE_SFT HDMI_OUT_PBUF_SIZE_SFT +#define HDMI_MINLEN_MASK HDMI_OUT_MINLEN_MASK +#define HDMI_MINLEN_SFT HDMI_OUT_MINLEN_SFT +#define HDMI_NUM_MASK HDMI_CH_NUM_MASK +#define HDMI_NUM_SFT HDMI_CH_NUM_SFT + +static const struct mtk_base_memif_data memif_data[MT8196_MEMIF_NUM] = { + MT8196_DL_MEMIF(DL0), + MT8196_DL_MEMIF(DL1), + MT8196_DL_MEMIF(DL2), + MT8196_DL_MEMIF(DL3), + MT8196_DL_MEMIF(DL4), + MT8196_DL_MEMIF(DL5), + MT8196_DL_MEMIF(DL6), + MT8196_DL_MEMIF(DL7), + MT8196_DL_MEMIF(DL8), + MT8196_DL_MEMIF(DL23), + MT8196_DL_MEMIF(DL24), + MT8196_DL_MEMIF(DL25), + MT8196_DL_MEMIF(DL26), + MT8196_MULTI_DL_MEMIF(DL_4CH), + MT8196_MULTI_DL_MEMIF(DL_24CH), + MT8196_MULTI_DL_MEMIF(HDMI), + MT8196_UL_MEMIF(VUL0, VUL0_SEL_FS_SFT, VUL0_SEL_FS_MASK, VUL0_MONO_SFT), + MT8196_UL_MEMIF(VUL1, VUL1_SEL_FS_SFT, VUL1_SEL_FS_MASK, VUL1_MONO_SFT), + MT8196_UL_MEMIF(VUL2, VUL2_SEL_FS_SFT, VUL2_SEL_FS_MASK, VUL2_MONO_SFT), + MT8196_UL_MEMIF(VUL3, VUL3_SEL_FS_SFT, VUL3_SEL_FS_MASK, VUL3_MONO_SFT), + MT8196_UL_MEMIF(VUL4, VUL4_SEL_FS_SFT, VUL4_SEL_FS_MASK, VUL4_MONO_SFT), + MT8196_UL_MEMIF(VUL5, VUL5_SEL_FS_SFT, VUL5_SEL_FS_MASK, VUL5_MONO_SFT), + MT8196_UL_MEMIF(VUL6, VUL6_SEL_FS_SFT, VUL6_SEL_FS_MASK, VUL6_MONO_SFT), + MT8196_UL_MEMIF(VUL7, VUL7_SEL_FS_SFT, VUL7_SEL_FS_MASK, VUL7_MONO_SFT), + MT8196_UL_MEMIF(VUL8, VUL8_SEL_FS_SFT, VUL8_SEL_FS_MASK, VUL8_MONO_SFT), + MT8196_UL_MEMIF(VUL9, VUL9_SEL_FS_SFT, VUL9_SEL_FS_MASK, VUL9_MONO_SFT), + MT8196_UL_MEMIF(VUL10, VUL10_SEL_FS_SFT, VUL10_SEL_FS_MASK, VUL10_MONO_SFT), + MT8196_UL_MEMIF(VUL24, VUL24_SEL_FS_SFT, VUL24_SEL_FS_MASK, VUL24_MONO_SFT), + MT8196_UL_MEMIF(VUL25, VUL25_SEL_FS_SFT, VUL25_SEL_FS_MASK, VUL25_MONO_SFT), + MT8196_UL_MEMIF(VUL26, VUL26_SEL_FS_SFT, VUL26_SEL_FS_MASK, VUL26_MONO_SFT), + MT8196_UL_MEMIF(VUL_CM0, -1, -1, -1), + MT8196_UL_MEMIF(VUL_CM1, -1, -1, -1), + MT8196_UL_MEMIF(VUL_CM2, -1, -1, -1), + MT8196_UL_MEMIF(ETDM_IN0, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), + MT8196_UL_MEMIF(ETDM_IN1, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), + MT8196_UL_MEMIF(ETDM_IN2, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), + MT8196_UL_MEMIF(ETDM_IN3, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), + MT8196_UL_MEMIF(ETDM_IN4, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), + MT8196_UL_MEMIF(ETDM_IN6, REG_FS_TIMING_SEL_SFT, REG_FS_TIMING_SEL_MASK, -1), +}; + +#define MT8196_AFE_IRQ(_id) \ + [MT8196_IRQ_##_id] = { \ + .id = MT8196_IRQ_##_id, \ + .irq_cnt_reg = AFE_IRQ##_id##_MCU_CFG1, \ + .irq_cnt_shift = AFE_IRQ_CNT_SHIFT, \ + .irq_cnt_maskbit = AFE_IRQ_CNT_MASK, \ + .irq_fs_reg = AFE_IRQ##_id##_MCU_CFG0, \ + .irq_fs_shift = AFE_IRQ##_id##_MCU_FS_SFT, \ + .irq_fs_maskbit = AFE_IRQ##_id##_MCU_FS_MASK, \ + .irq_en_reg = AFE_IRQ##_id##_MCU_CFG0, \ + .irq_en_shift = AFE_IRQ##_id##_MCU_ON_SFT, \ + .irq_clr_reg = AFE_IRQ##_id##_MCU_CFG1, \ + .irq_clr_shift = AFE_IRQ##_id##_CLR_CFG_SFT, \ + } + +#define MT8196_AFE_TDM_IRQ(_id) \ + [MT8196_IRQ_##_id] = { \ + .id = MT8196_CUS_IRQ_TDM, \ + .irq_cnt_reg = AFE_CUSTOM_IRQ0_MCU_CFG1, \ + .irq_cnt_shift = AFE_CUSTOM_IRQ0_MCU_CNT_SFT, \ + .irq_cnt_maskbit = AFE_CUSTOM_IRQ0_MCU_CNT_MASK, \ + .irq_fs_reg = -1, \ + .irq_fs_shift = -1, \ + .irq_fs_maskbit = -1, \ + .irq_en_reg = AFE_CUSTOM_IRQ0_MCU_CFG0, \ + .irq_en_shift = AFE_CUSTOM_IRQ0_MCU_ON_SFT, \ + .irq_clr_reg = AFE_CUSTOM_IRQ0_MCU_CFG1, \ + .irq_clr_shift = AFE_CUSTOM_IRQ0_CLR_CFG_SFT, \ + } + +static const struct mtk_base_irq_data irq_data[MT8196_IRQ_NUM] = { + MT8196_AFE_IRQ(0), + MT8196_AFE_IRQ(1), + MT8196_AFE_IRQ(2), + MT8196_AFE_IRQ(3), + MT8196_AFE_IRQ(4), + MT8196_AFE_IRQ(5), + MT8196_AFE_IRQ(6), + MT8196_AFE_IRQ(7), + MT8196_AFE_IRQ(8), + MT8196_AFE_IRQ(9), + MT8196_AFE_IRQ(10), + MT8196_AFE_IRQ(11), + MT8196_AFE_IRQ(12), + MT8196_AFE_IRQ(13), + MT8196_AFE_IRQ(14), + MT8196_AFE_IRQ(15), + MT8196_AFE_IRQ(16), + MT8196_AFE_IRQ(17), + MT8196_AFE_IRQ(18), + MT8196_AFE_IRQ(19), + MT8196_AFE_IRQ(20), + MT8196_AFE_IRQ(21), + MT8196_AFE_IRQ(22), + MT8196_AFE_IRQ(23), + MT8196_AFE_IRQ(24), + MT8196_AFE_IRQ(25), + MT8196_AFE_IRQ(26), + MT8196_AFE_TDM_IRQ(31), +}; + +static const int memif_irq_usage[MT8196_MEMIF_NUM] = { + /* TODO: verify each memif & irq */ + [MT8196_MEMIF_DL0] = MT8196_IRQ_0, + [MT8196_MEMIF_DL1] = MT8196_IRQ_1, + [MT8196_MEMIF_DL2] = MT8196_IRQ_2, + [MT8196_MEMIF_DL3] = MT8196_IRQ_3, + [MT8196_MEMIF_DL4] = MT8196_IRQ_4, + [MT8196_MEMIF_DL5] = MT8196_IRQ_5, + [MT8196_MEMIF_DL6] = MT8196_IRQ_6, + [MT8196_MEMIF_DL7] = MT8196_IRQ_7, + [MT8196_MEMIF_DL8] = MT8196_IRQ_8, + [MT8196_MEMIF_DL23] = MT8196_IRQ_9, + [MT8196_MEMIF_DL24] = MT8196_IRQ_10, + [MT8196_MEMIF_DL25] = MT8196_IRQ_11, + [MT8196_MEMIF_DL26] = MT8196_IRQ_0, + [MT8196_MEMIF_DL_4CH] = MT8196_IRQ_0, + [MT8196_MEMIF_DL_24CH] = MT8196_IRQ_12, + [MT8196_MEMIF_VUL0] = MT8196_IRQ_13, + [MT8196_MEMIF_VUL1] = MT8196_IRQ_14, + [MT8196_MEMIF_VUL2] = MT8196_IRQ_15, + [MT8196_MEMIF_VUL3] = MT8196_IRQ_16, + [MT8196_MEMIF_VUL4] = MT8196_IRQ_17, + [MT8196_MEMIF_VUL5] = MT8196_IRQ_18, + [MT8196_MEMIF_VUL6] = MT8196_IRQ_19, + [MT8196_MEMIF_VUL7] = MT8196_IRQ_20, + [MT8196_MEMIF_VUL8] = MT8196_IRQ_21, + [MT8196_MEMIF_VUL9] = MT8196_IRQ_22, + [MT8196_MEMIF_VUL10] = MT8196_IRQ_23, + [MT8196_MEMIF_VUL24] = MT8196_IRQ_24, + [MT8196_MEMIF_VUL25] = MT8196_IRQ_25, + [MT8196_MEMIF_VUL26] = MT8196_IRQ_0, + [MT8196_MEMIF_VUL_CM0] = MT8196_IRQ_26, + [MT8196_MEMIF_VUL_CM1] = MT8196_IRQ_0, + [MT8196_MEMIF_VUL_CM2] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN0] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN1] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN2] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN3] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN4] = MT8196_IRQ_0, + [MT8196_MEMIF_ETDM_IN6] = MT8196_IRQ_0, + [MT8196_MEMIF_HDMI] = MT8196_IRQ_31 +}; + +static bool mt8196_is_volatile_reg(struct device *dev, unsigned int reg) +{ + /* these auto-gen reg has read-only bit, so put it as volatile */ + /* volatile reg cannot be cached, so cannot be set when power off */ + switch (reg) { + case AUDIO_TOP_CON0 ... AUDIO_TOP_CON4: + case AFE_APLL1_TUNER_MON0: + case AFE_APLL2_TUNER_MON0: + case AFE_SPM_CONTROL_ACK: + case AUDIO_TOP_IP_VERSION: + case AUDIO_ENGEN_CON0_MON: + case AUD_TOP_MON_RG: + case AFE_CONNSYS_I2S_IPM_VER_MON: + case AFE_CONNSYS_I2S_MON: + case AFE_PCM_INTF_MON: + case AFE_PCM_TOP_IP_VERSION: + case AFE_IRQ_MCU_STATUS: + case AFE_CUSTOM_IRQ_MCU_STATUS: + case AFE_IRQ_MCU_MON0 ... AFE_IRQ26_CNT_MON: + case AFE_CUSTOM_IRQ0_CNT_MON: + case AFE_STF_MON: + case AFE_STF_IP_VERSION: + case AFE_CM0_MON: + case AFE_CM0_IP_VERSION: + case AFE_CM1_MON: + case AFE_CM1_IP_VERSION: + case AFE_ADDA_UL0_SRC_DEBUG_MON0 ... AFE_ADDA_UL0_SRC_MON1: + case AFE_ADDA_UL0_IP_VERSION: + case AFE_ADDA_UL1_SRC_DEBUG_MON0 ... AFE_ADDA_UL1_SRC_MON1: + case AFE_ADDA_UL1_IP_VERSION: + case AFE_MTKAIF_IPM_VER_MON: + case AFE_MTKAIF_MON: + case AFE_AUD_PAD_TOP_MON: + case AFE_ADDA_MTKAIFV4_MON0 ... AFE_ADDA6_MTKAIFV4_MON0: + case ETDM_IN0_MON: + case ETDM_IN1_MON: + case ETDM_IN2_MON: + case ETDM_IN4_MON: + case ETDM_IN6_MON: + case ETDM_OUT0_MON: + case ETDM_OUT1_MON: + case ETDM_OUT2_MON: + case ETDM_OUT4_MON: + case ETDM_OUT6_MON: + case AFE_DPTX_MON: + case AFE_TDM_TOP_IP_VERSION: + case AFE_CONN_MON0 ... AFE_CONN_MON5: + case AFE_CBIP_SLV_DECODER_MON0 ... AFE_CBIP_SLV_MUX_MON1: + case AFE_DL0_CUR_MSB ... AFE_DL0_CUR: + case AFE_DL0_RCH_MON ... AFE_DL0_LCH_MON: + case AFE_DL1_CUR_MSB ... AFE_DL1_CUR: + case AFE_DL1_RCH_MON ... AFE_DL1_LCH_MON: + case AFE_DL2_CUR_MSB ... AFE_DL2_CUR: + case AFE_DL2_RCH_MON ... AFE_DL2_LCH_MON: + case AFE_DL3_CUR_MSB ... AFE_DL3_CUR: + case AFE_DL3_RCH_MON ... AFE_DL3_LCH_MON: + case AFE_DL4_CUR_MSB ... AFE_DL4_CUR: + case AFE_DL4_RCH_MON ... AFE_DL4_LCH_MON: + case AFE_DL5_CUR_MSB ... AFE_DL5_CUR: + case AFE_DL5_RCH_MON ... AFE_DL5_LCH_MON: + case AFE_DL6_CUR_MSB ... AFE_DL6_CUR: + case AFE_DL6_RCH_MON ... AFE_DL6_LCH_MON: + case AFE_DL7_CUR_MSB ... AFE_DL7_CUR: + case AFE_DL7_RCH_MON ... AFE_DL7_LCH_MON: + case AFE_DL8_CUR_MSB ... AFE_DL8_CUR: + case AFE_DL8_RCH_MON ... AFE_DL8_LCH_MON: + case AFE_DL_24CH_CUR_MSB ... AFE_DL_24CH_CUR: + case AFE_DL_4CH_CUR_MSB ... AFE_DL_4CH_CUR: + case AFE_DL23_CUR_MSB ... AFE_DL23_CUR: + case AFE_DL23_RCH_MON ... AFE_DL23_LCH_MON: + case AFE_DL24_CUR_MSB ... AFE_DL24_CUR: + case AFE_DL24_RCH_MON ... AFE_DL24_LCH_MON: + case AFE_DL25_CUR_MSB ... AFE_DL25_CUR: + case AFE_DL25_RCH_MON ... AFE_DL25_LCH_MON: + case AFE_DL26_CUR_MSB ... AFE_DL26_CUR: + case AFE_DL26_RCH_MON ... AFE_DL26_LCH_MON: + case AFE_VUL0_CUR_MSB ... AFE_VUL0_CUR: + case AFE_VUL1_CUR_MSB ... AFE_VUL1_CUR: + case AFE_VUL2_CUR_MSB ... AFE_VUL2_CUR: + case AFE_VUL3_CUR_MSB ... AFE_VUL3_CUR: + case AFE_VUL4_CUR_MSB ... AFE_VUL4_CUR: + case AFE_VUL5_CUR_MSB ... AFE_VUL5_CUR: + case AFE_VUL6_CUR_MSB ... AFE_VUL6_CUR: + case AFE_VUL7_CUR_MSB ... AFE_VUL7_CUR: + case AFE_VUL8_CUR_MSB ... AFE_VUL8_CUR: + case AFE_VUL9_CUR_MSB ... AFE_VUL9_CUR: + case AFE_VUL10_CUR_MSB ... AFE_VUL10_CUR: + case AFE_VUL24_CUR_MSB ... AFE_VUL24_CUR: + case AFE_VUL25_CUR_MSB ... AFE_VUL25_CUR: + case AFE_VUL25_RCH_MON ... AFE_VUL25_LCH_MON: + case AFE_VUL26_CUR_MSB ... AFE_VUL26_CUR: + case AFE_VUL_CM0_BASE_MSB ... AFE_VUL_CM0_CON0: + case AFE_VUL_CM1_CUR_MSB ... AFE_VUL_CM1_CUR: + case AFE_VUL_CM2_CUR_MSB ... AFE_VUL_CM2_CUR: + case AFE_ETDM_IN0_CUR_MSB ... AFE_ETDM_IN0_CUR: + case AFE_ETDM_IN1_CUR_MSB ... AFE_ETDM_IN1_CUR: + case AFE_ETDM_IN2_CUR_MSB ... AFE_ETDM_IN2_CUR: + case AFE_ETDM_IN3_CUR_MSB ... AFE_ETDM_IN3_CUR: + case AFE_ETDM_IN4_CUR_MSB ... AFE_ETDM_IN4_CUR: + case AFE_ETDM_IN6_CUR_MSB ... AFE_ETDM_IN6_CUR: + case AFE_HDMI_OUT_CUR_MSB ... AFE_HDMI_OUT_CUR: + case AFE_HDMI_OUT_END: + case AFE_PROT_SIDEBAND0_MON ... AFE_DOMAIN_SIDEBAND9_MON: + case AFE_PCM0_INTF_CON1_MASK_MON ... AFE_ADDA_UL1_SRC_CON0_MASK_MON: + case AFE_IRQ_MCU_EN ... AFE_IRQ_MCU_DSP2_EN: + case AFE_CUSTOM_IRQ_MCU_EN: + case AFE_DL5_CON0: + case AFE_DL6_CON0: + case AFE_DL23_CON0: + case AFE_DL_24CH_CON0: + case AFE_VUL1_CON0: + case AFE_VUL3_CON0: + case AFE_VUL4_CON0: + case AFE_VUL5_CON0: + case AFE_VUL9_CON0: + case AFE_VUL25_CON0: + case AFE_IRQ0_MCU_CFG0 ... AFE_IRQ26_MCU_CFG1: + return true; + default: + return false; + }; +} + +static const struct regmap_config mt8196_afe_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + + .volatile_reg = mt8196_is_volatile_reg, + + .max_register = AFE_MAX_REGISTER, + .num_reg_defaults_raw = AFE_MAX_REGISTER, + + .cache_type = REGCACHE_FLAT, +}; + +static irqreturn_t mt8196_afe_irq_handler(int irq_id, void *dev) +{ + struct mtk_base_afe *afe = dev; + struct mtk_base_afe_irq *irq; + u32 status; + u32 status_mcu; + u32 mcu_en; + u32 cus_status; + u32 cus_status_mcu; + u32 cus_mcu_en; + u32 tmp_reg; + int ret, cus_ret; + int i; + struct timespec64 ts64; + u64 t1, t2; + /* one interrupt period = 5ms */ + const u64 timeout_limit = 5000000; + + /* get irq that is sent to MCU */ + regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &mcu_en); + regmap_read(afe->regmap, AFE_CUSTOM_IRQ_MCU_EN, &cus_mcu_en); + + ret = regmap_read(afe->regmap, AFE_IRQ_MCU_STATUS, &status); + cus_ret = regmap_read(afe->regmap, AFE_CUSTOM_IRQ_MCU_STATUS, &cus_status); + /* only care IRQ which is sent to MCU */ + status_mcu = status & mcu_en & AFE_IRQ_STATUS_BITS; + cus_status_mcu = cus_status & cus_mcu_en & AFE_IRQ_STATUS_BITS; + if ((ret || !status_mcu) && (cus_ret || !cus_status_mcu)) { + dev_err(afe->dev, "ret %d, sat 0x%x, en 0x%x,csat 0x%x, cen 0x%x\n", + ret, status, mcu_en, cus_status_mcu, cus_mcu_en); + return IRQ_NONE; + } + + ktime_get_ts64(&ts64); + t1 = ktime_get_ns(); + + for (i = 0; i < MT8196_MEMIF_NUM; i++) { + struct mtk_base_afe_memif *memif = &afe->memif[i]; + + if (!memif->substream) + continue; + + if (memif->irq_usage < 0) + continue; + irq = &afe->irqs[memif->irq_usage]; + + if (i == MT8196_MEMIF_HDMI) { + if (cus_status_mcu & BIT(irq->irq_data->id)) + snd_pcm_period_elapsed(memif->substream); + } else { + if (status_mcu & BIT(irq->irq_data->id)) + snd_pcm_period_elapsed(memif->substream); + } + } + + ktime_get_ts64(&ts64); + t2 = ktime_get_ns(); + t2 = t2 - t1; /* in ns (10^9) */ + + if (t2 > timeout_limit) + dev_warn(afe->dev, "IRQ handler exceeded time limit by %llu ns\n", + t2 - timeout_limit); + + /* clear irq */ + for (i = 0; i < MT8196_IRQ_NUM; ++i) { + /* cus_status_mcu only bit0 is used for TDM */ + if ((status_mcu & BIT(i)) || (cus_status_mcu & 0x1)) { + regmap_read(afe->regmap, irq_data[i].irq_clr_reg, &tmp_reg); + regmap_update_bits(afe->regmap, irq_data[i].irq_clr_reg, + AFE_IRQ_CLR_CFG_MASK_SFT | + AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT, + tmp_reg ^ (AFE_IRQ_CLR_CFG_MASK_SFT | + AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT)); + } + } + + return IRQ_HANDLED; +} + +static int mt8196_afe_runtime_suspend(struct device *dev) +{ + struct mtk_base_afe *afe = dev_get_drvdata(dev); + unsigned int value; + unsigned int tmp_reg; + int ret, i; + + if (!afe->regmap) { + dev_err(afe->dev, "skip regmap\n"); + goto skip_regmap; + } + + /* disable AFE */ + mt8196_afe_disable_main_clock(afe); + + ret = regmap_read_poll_timeout(afe->regmap, + AUDIO_ENGEN_CON0_MON, + value, + (value & AUDIO_ENGEN_MON_SFT) == 0, + 20, + 1 * 1000 * 1000); + dev_dbg(afe->dev, "read_poll ret %d\n", ret); + if (ret) + dev_warn(afe->dev, "ret %d\n", ret); + + /* make sure all irq status are cleared */ + for (i = 0; i < MT8196_IRQ_NUM; ++i) { + regmap_read(afe->regmap, irq_data[i].irq_clr_reg, &tmp_reg); + regmap_update_bits(afe->regmap, irq_data[i].irq_clr_reg, + AFE_IRQ_CLR_CFG_MASK_SFT | AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT, + tmp_reg ^ (AFE_IRQ_CLR_CFG_MASK_SFT | + AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT)); + } + + /* reset audio 26M request */ + regmap_update_bits(afe->regmap, + AFE_SPM_CONTROL_REQ, 0x1, 0x0); + + /* cache only */ + regcache_cache_only(afe->regmap, true); + regcache_mark_dirty(afe->regmap); + +skip_regmap: + mt8196_afe_disable_reg_rw_clk(afe); + return 0; +} + +static int mt8196_afe_runtime_resume(struct device *dev) +{ + struct mtk_base_afe *afe = dev_get_drvdata(dev); + int ret = 0; + + ret = mt8196_afe_enable_reg_rw_clk(afe); + if (ret) + return ret; + + if (!afe->regmap) { + dev_warn(afe->dev, "skip regmap\n"); + goto skip_regmap; + } + regcache_cache_only(afe->regmap, false); + regcache_sync(afe->regmap); + + /* set audio 26M request */ + regmap_update_bits(afe->regmap, AFE_SPM_CONTROL_REQ, 0x1, 0x1); + regmap_update_bits(afe->regmap, AFE_CBIP_CFG0, 0x1, 0x1); + + /* force cpu use 8_24 format when writing 32bit data */ + regmap_update_bits(afe->regmap, AFE_MEMIF_CON0, + CPU_HD_ALIGN_MASK_SFT, 0 << CPU_HD_ALIGN_SFT); + + /* enable AFE */ + mt8196_afe_enable_main_clock(afe); + +skip_regmap: + return 0; +} + +static int mt8196_afe_component_probe(struct snd_soc_component *component) +{ + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component); + + if (component) { + /* enable clock for regcache get default value from hw */ + pm_runtime_get_sync(afe->dev); + mtk_afe_add_sub_dai_control(component); + pm_runtime_put_sync(afe->dev); + } + return 0; +} + +static int mt8196_afe_pcm_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + /* set the wait_for_avail to 2 sec*/ + substream->wait_time = msecs_to_jiffies(2 * 1000); + + return 0; +} + +static void mt8196_afe_pcm_free(struct snd_soc_component *component, struct snd_pcm *pcm) +{ + snd_pcm_lib_preallocate_free_for_all(pcm); +} + +static const struct snd_soc_component_driver mt8196_afe_component = { + .name = AFE_PCM_NAME, + .probe = mt8196_afe_component_probe, + .pcm_new = mtk_afe_pcm_new, + .pcm_free = mt8196_afe_pcm_free, + .open = mt8196_afe_pcm_open, + .pointer = mtk_afe_pcm_pointer, +}; + +static int mt8196_dai_memif_register(struct mtk_base_afe *afe) +{ + struct mtk_base_afe_dai *dai; + + dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL); + if (!dai) + return -ENOMEM; + + list_add(&dai->list, &afe->sub_dais); + + dai->dai_drivers = mt8196_memif_dai_driver; + dai->num_dai_drivers = ARRAY_SIZE(mt8196_memif_dai_driver); + dai->dapm_widgets = mt8196_memif_widgets; + dai->num_dapm_widgets = ARRAY_SIZE(mt8196_memif_widgets); + dai->dapm_routes = mt8196_memif_routes; + dai->num_dapm_routes = ARRAY_SIZE(mt8196_memif_routes); + return 0; +} + +typedef int (*dai_register_cb)(struct mtk_base_afe *); +static const dai_register_cb dai_register_cbs[] = { + mt8196_dai_adda_register, + mt8196_dai_i2s_register, + mt8196_dai_tdm_register, + mt8196_dai_memif_register, +}; + +static const struct reg_sequence mt8196_cg_patch[] = { + { AUDIO_TOP_CON4, 0x361c }, +}; + +static int mt8196_afe_pcm_dev_probe(struct platform_device *pdev) +{ + int ret, i; + unsigned int tmp_reg; + int irq_id; + struct mtk_base_afe *afe; + struct mt8196_afe_private *afe_priv; + struct device *dev = &pdev->dev; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34)); + if (ret) + return ret; + + ret = of_reserved_mem_device_init(dev); + if (ret) + dev_err(dev, "failed to assign memory region: %d\n", ret); + + afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL); + if (!afe) + return -ENOMEM; + + platform_set_drvdata(pdev, afe); + + afe->platform_priv = devm_kzalloc(dev, sizeof(*afe_priv), + GFP_KERNEL); + if (!afe->platform_priv) + return -ENOMEM; + + afe_priv = afe->platform_priv; + afe->dev = dev; + + afe->base_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(afe->base_addr)) + return dev_err_probe(dev, PTR_ERR(afe->base_addr), + "AFE base_addr not found\n"); + + /* init audio related clock */ + ret = mt8196_init_clock(afe); + if (ret) + return dev_err_probe(dev, ret, "init clock error.\n"); + + /* init memif */ + /* IPM2.0 no need banding */ + afe->memif_32bit_supported = 1; + afe->memif_size = MT8196_MEMIF_NUM; + afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif), + GFP_KERNEL); + + if (!afe->memif) + return -ENOMEM; + + for (i = 0; i < afe->memif_size; i++) { + afe->memif[i].data = &memif_data[i]; + afe->memif[i].irq_usage = memif_irq_usage[i]; + afe->memif[i].const_irq = 1; + } + + mutex_init(&afe->irq_alloc_lock); + + /* init irq */ + afe->irqs_size = MT8196_IRQ_NUM; + afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs), + GFP_KERNEL); + + if (!afe->irqs) + return -ENOMEM; + + for (i = 0; i < afe->irqs_size; i++) + afe->irqs[i].irq_data = &irq_data[i]; + + /* request irq */ + irq_id = platform_get_irq(pdev, 0); + if (irq_id < 0) + return dev_err_probe(dev, irq_id, "no irq found"); + + ret = devm_request_irq(dev, irq_id, mt8196_afe_irq_handler, + IRQF_TRIGGER_NONE, + "Afe_ISR_Handle", afe); + if (ret) + return dev_err_probe(dev, ret, "could not request_irq for Afe_ISR_Handle\n"); + + /* init sub_dais */ + INIT_LIST_HEAD(&afe->sub_dais); + + for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) { + ret = dai_register_cbs[i](afe); + if (ret) + return dev_err_probe(dev, ret, "dai register i %d fail\n", i); + } + + /* init dai_driver and component_driver */ + ret = mtk_afe_combine_sub_dai(afe); + if (ret) + return dev_err_probe(dev, ret, "mtk_afe_combine_sub_dai fail\n"); + + /* others */ + afe->mtk_afe_hardware = &mt8196_afe_hardware; + afe->memif_fs = mt8196_memif_fs; + afe->irq_fs = mt8196_irq_fs; + afe->get_dai_fs = mt8196_get_dai_fs; + afe->get_memif_pbuf_size = mt8196_get_memif_pbuf_size; + + afe->runtime_resume = mt8196_afe_runtime_resume; + afe->runtime_suspend = mt8196_afe_runtime_suspend; + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + +/* + * Audio device is part of genpd. Registering it as a syscore device ensure + * the proper power-on sequence of the AFE device. + */ + dev_pm_syscore_device(dev, true); + + /* enable clock for regcache get default value from hw */ + pm_runtime_get_sync(dev); + + afe->regmap = devm_regmap_init_mmio(dev, afe->base_addr, + &mt8196_afe_regmap_config); + if (IS_ERR(afe->regmap)) + return PTR_ERR(afe->regmap); + + ret = regmap_register_patch(afe->regmap, mt8196_cg_patch, + ARRAY_SIZE(mt8196_cg_patch)); + if (ret < 0) { + dev_err(dev, "Failed to apply cg patch\n"); + goto err_pm_disable; + } + + regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &tmp_reg); + regmap_write(afe->regmap, AFE_IRQ_MCU_EN, 0xffffffff); + regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &tmp_reg); + + pm_runtime_put_sync(dev); + + regcache_cache_only(afe->regmap, true); + regcache_mark_dirty(afe->regmap); + + /* register component */ + ret = devm_snd_soc_register_component(dev, + &mt8196_afe_component, + afe->dai_drivers, + afe->num_dai_drivers); + if (ret) { + dev_err(dev, "afe component err\n"); + goto err_pm_disable; + } + + return 0; + +err_pm_disable: + pm_runtime_put_sync(dev); + return ret; +} + +static void mt8196_afe_pcm_dev_remove(struct platform_device *pdev) +{ + struct mtk_base_afe *afe = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + pm_runtime_put_sync(dev); + if (!pm_runtime_status_suspended(dev)) + mt8196_afe_runtime_suspend(dev); + + mt8196_afe_disable_main_clock(afe); + /* disable afe clock */ + mt8196_afe_disable_reg_rw_clk(afe); +} + +static const struct of_device_id mt8196_afe_pcm_dt_match[] = { + { .compatible = "mediatek,mt8196-afe", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mt8196_afe_pcm_dt_match); + +static const struct dev_pm_ops mt8196_afe_pm_ops = { + SET_RUNTIME_PM_OPS(mt8196_afe_runtime_suspend, + mt8196_afe_runtime_resume, NULL) +}; + +static struct platform_driver mt8196_afe_pcm_driver = { + .driver = { + .name = "mt8196-afe", + .of_match_table = mt8196_afe_pcm_dt_match, + .pm = &mt8196_afe_pm_ops, + }, + .probe = mt8196_afe_pcm_dev_probe, + .remove = mt8196_afe_pcm_dev_remove, +}; +module_platform_driver(mt8196_afe_pcm_driver); + +MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 8196"); +MODULE_AUTHOR("Darren Ye "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/mediatek/mt8196/mt8196-dai-adda.c b/sound/soc/mediatek/mt8196/mt8196-dai-adda.c new file mode 100644 index 00000000000000..9a91db4e79ae83 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-dai-adda.c @@ -0,0 +1,845 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek ALSA SoC Audio DAI ADDA Control + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include + +#include "mt8196-afe-clk.h" +#include "mt8196-afe-common.h" +#include "mt8196-interconnection.h" + +enum { + UL_IIR_SW, + UL_IIR_5HZ, + UL_IIR_10HZ, + UL_IIR_25HZ, + UL_IIR_50HZ, + UL_IIR_75HZ, +}; + +enum { + MTK_AFE_ADDA_UL_RATE_8K, + MTK_AFE_ADDA_UL_RATE_16K, + MTK_AFE_ADDA_UL_RATE_32K, + MTK_AFE_ADDA_UL_RATE_48K, + MTK_AFE_ADDA_UL_RATE_96K, + MTK_AFE_ADDA_UL_RATE_192K, + MTK_AFE_ADDA_UL_RATE_48K_HD, +}; + +enum { + MTK_AFE_MTKAIF_RATE_8K, + MTK_AFE_MTKAIF_RATE_12K, + MTK_AFE_MTKAIF_RATE_16K, + MTK_AFE_MTKAIF_RATE_24K, + MTK_AFE_MTKAIF_RATE_32K, + MTK_AFE_MTKAIF_RATE_48K, + MTK_AFE_MTKAIF_RATE_64K, + MTK_AFE_MTKAIF_RATE_96K, + MTK_AFE_MTKAIF_RATE_128K, + MTK_AFE_MTKAIF_RATE_192K, + MTK_AFE_MTKAIF_RATE_256K, + MTK_AFE_MTKAIF_RATE_384K, + MTK_AFE_MTKAIF_RATE_11K = 0x10, + MTK_AFE_MTKAIF_RATE_22K, + MTK_AFE_MTKAIF_RATE_44K, + MTK_AFE_MTKAIF_RATE_88K, + MTK_AFE_MTKAIF_RATE_176K, + MTK_AFE_MTKAIF_RATE_352K, +}; + +struct mtk_afe_adda_priv { + int dl_rate; + int ul_rate; +}; + +static unsigned int adda_ul_rate_transform(struct mtk_base_afe *afe, + unsigned int rate) +{ + switch (rate) { + case 8000: + return MTK_AFE_ADDA_UL_RATE_8K; + case 16000: + return MTK_AFE_ADDA_UL_RATE_16K; + case 32000: + return MTK_AFE_ADDA_UL_RATE_32K; + case 48000: + return MTK_AFE_ADDA_UL_RATE_48K; + case 96000: + return MTK_AFE_ADDA_UL_RATE_96K; + case 192000: + return MTK_AFE_ADDA_UL_RATE_192K; + default: + dev_warn(afe->dev, "rate %d invalid, use 48kHz!!!\n", rate); + return MTK_AFE_ADDA_UL_RATE_48K; + } +} + +static unsigned int mtkaif_rate_transform(struct mtk_base_afe *afe, + unsigned int rate) +{ + switch (rate) { + case 8000: + return MTK_AFE_MTKAIF_RATE_8K; + case 11025: + return MTK_AFE_MTKAIF_RATE_11K; + case 12000: + return MTK_AFE_MTKAIF_RATE_12K; + case 16000: + return MTK_AFE_MTKAIF_RATE_16K; + case 22050: + return MTK_AFE_MTKAIF_RATE_22K; + case 24000: + return MTK_AFE_MTKAIF_RATE_24K; + case 32000: + return MTK_AFE_MTKAIF_RATE_32K; + case 44100: + return MTK_AFE_MTKAIF_RATE_44K; + case 48000: + return MTK_AFE_MTKAIF_RATE_48K; + case 96000: + return MTK_AFE_MTKAIF_RATE_96K; + case 192000: + return MTK_AFE_MTKAIF_RATE_192K; + default: + dev_warn(afe->dev, "rate %d invalid, use 48kHz!!!\n", rate); + return MTK_AFE_MTKAIF_RATE_48K; + } +} + +enum { + SUPPLY_SEQ_ADDA_AFE_ON, + SUPPLY_SEQ_ADDA_FIFO, + SUPPLY_SEQ_ADDA_AP_DMIC, + SUPPLY_SEQ_ADDA_UL_ON, +}; + +static int mtk_adda_ul_src_set_dmic_phase_sync(struct mtk_base_afe *afe) +{ + dev_dbg(afe->dev, "set dmic phase sync\n"); + // ul0~1 + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + UL0_PHASE_SYNC_HCLK_SET_MASK_SFT, + 0x1 << UL0_PHASE_SYNC_HCLK_SET_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + UL0_PHASE_SYNC_FCLK_SET_MASK_SFT, + 0x1 << UL0_PHASE_SYNC_FCLK_SET_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + UL1_PHASE_SYNC_HCLK_SET_MASK_SFT, + 0x1 << UL1_PHASE_SYNC_HCLK_SET_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + UL1_PHASE_SYNC_FCLK_SET_MASK_SFT, + 0x1 << UL1_PHASE_SYNC_FCLK_SET_SFT); + // dmic 0 + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + DMIC0_PHASE_SYNC_FCLK_SET_MASK_SFT, + 0x1 << DMIC0_PHASE_SYNC_FCLK_SET_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + DMIC0_PHASE_SYNC_HCLK_SET_MASK_SFT, + 0x1 << DMIC0_PHASE_SYNC_HCLK_SET_SFT); + // dmic 1 + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + DMIC1_PHASE_SYNC_FCLK_SET_MASK_SFT, + 0x1 << DMIC1_PHASE_SYNC_FCLK_SET_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + DMIC1_PHASE_SYNC_HCLK_SET_MASK_SFT, + 0x1 << DMIC1_PHASE_SYNC_HCLK_SET_SFT); + // ul0~1 phase sync clock + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + DMIC1_PHASE_HCLK_SEL_MASK_SFT, + 0x1 << DMIC1_PHASE_HCLK_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + DMIC1_PHASE_FCLK_SEL_MASK_SFT, + 0x1 << DMIC1_PHASE_FCLK_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + DMIC0_PHASE_HCLK_SEL_MASK_SFT, + 0x1 << DMIC0_PHASE_HCLK_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + DMIC0_PHASE_FCLK_SEL_MASK_SFT, + 0x1 << DMIC0_PHASE_FCLK_SEL_SFT); + // dmic 0 + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL1_PHASE_HCLK_SEL_MASK_SFT, + 0x2 << UL1_PHASE_HCLK_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL1_PHASE_FCLK_SEL_MASK_SFT, + 0x2 << UL1_PHASE_FCLK_SEL_SFT); + // dmic 1 + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL0_PHASE_HCLK_SEL_MASK_SFT, + 0x2 << UL0_PHASE_HCLK_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL0_PHASE_FCLK_SEL_MASK_SFT, + 0x2 << UL0_PHASE_FCLK_SEL_SFT); + + return 0; +} + +static int mtk_adda_ul_src_set_dmic_phase_sync_clock(struct mtk_base_afe *afe) +{ + dev_dbg(afe->dev, "dmic turn on phase sync clk\n"); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL_PHASE_SYNC_HCLK_1_ON_MASK_SFT, + 0x1 << UL_PHASE_SYNC_HCLK_1_ON_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL_PHASE_SYNC_HCLK_0_ON_MASK_SFT, + 0x1 << UL_PHASE_SYNC_HCLK_0_ON_SFT); + + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL_PHASE_SYNC_FCLK_1_ON_MASK_SFT, + 0x1 << UL_PHASE_SYNC_FCLK_1_ON_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON0, + UL_PHASE_SYNC_FCLK_0_ON_MASK_SFT, + 0x1 << UL_PHASE_SYNC_FCLK_0_ON_SFT); + + return 0; +} + +static int mtk_adda_ul_src_enable_dmic(struct mtk_base_afe *afe, int id) +{ + unsigned int reg_con0 = 0, reg_con1 = 0; + + dev_dbg(afe->dev, "id: %d\n", id); + + switch (id) { + case MT8196_DAI_ADDA: + case MT8196_DAI_AP_DMIC: + reg_con0 = AFE_ADDA_UL0_SRC_CON0; + reg_con1 = AFE_ADDA_UL0_SRC_CON1; + break; + case MT8196_DAI_ADDA_CH34: + case MT8196_DAI_AP_DMIC_CH34: + reg_con0 = AFE_ADDA_UL1_SRC_CON0; + reg_con1 = AFE_ADDA_UL1_SRC_CON1; + break; + default: + return -EINVAL; + } + + switch (id) { + case MT8196_DAI_AP_DMIC: + dev_dbg(afe->dev, "clear mtkaifv4 ul ch1ch2 mux\n"); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH1CH2_IN_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_UL_CH1CH2_IN_EN_SEL_SFT); + break; + case MT8196_DAI_AP_DMIC_CH34: + dev_dbg(afe->dev, "clear mtkaifv4 ul ch3ch4 mux\n"); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH3CH4_IN_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_UL_CH3CH4_IN_EN_SEL_SFT); + break; + default: + return -EINVAL; + } + + /* choose Phase */ + regmap_update_bits(afe->regmap, reg_con0, + UL_DMIC_PHASE_SEL_CH1_MASK_SFT, + 0x0 << UL_DMIC_PHASE_SEL_CH1_SFT); + regmap_update_bits(afe->regmap, reg_con0, + UL_DMIC_PHASE_SEL_CH2_MASK_SFT, + 0x4 << UL_DMIC_PHASE_SEL_CH2_SFT); + + /* dmic mode, 3.25M*/ + regmap_update_bits(afe->regmap, reg_con0, + DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT, + 0x0); + regmap_update_bits(afe->regmap, reg_con0, + DMIC_LOW_POWER_MODE_CTL_MASK_SFT, + 0x0); + + /* turn on dmic, ch1, ch2 */ + regmap_update_bits(afe->regmap, reg_con0, + UL_SDM_3_LEVEL_CTL_MASK_SFT, + 0x1 << UL_SDM_3_LEVEL_CTL_SFT); + regmap_update_bits(afe->regmap, reg_con0, + UL_MODE_3P25M_CH1_CTL_MASK_SFT, + 0x1 << UL_MODE_3P25M_CH1_CTL_SFT); + regmap_update_bits(afe->regmap, reg_con0, + UL_MODE_3P25M_CH2_CTL_MASK_SFT, + 0x1 << UL_MODE_3P25M_CH2_CTL_SFT); + + /* ul gain: gain = 0x7fff/positive_gain = 0x0/gain_mode = 0x10 */ + regmap_update_bits(afe->regmap, reg_con1, + ADDA_UL_GAIN_VALUE_MASK_SFT, + 0x7fff << ADDA_UL_GAIN_VALUE_SFT); + regmap_update_bits(afe->regmap, reg_con1, + ADDA_UL_POSTIVEGAIN_MASK_SFT, + 0x0 << ADDA_UL_POSTIVEGAIN_SFT); + /* gain_mode = 0x10: Add 0.5 gain at CIC output */ + regmap_update_bits(afe->regmap, reg_con1, + GAIN_MODE_MASK_SFT, + 0x02 << GAIN_MODE_SFT); + return 0; +} + +static int mtk_adda_sleep_on_pmd_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + + dev_dbg(afe->dev, "name %s, event 0x%x\n", w->name, event); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + break; + case SND_SOC_DAPM_POST_PMD: + /* should delayed 1/fs(smallest is 8k) = 125us before afe off */ + usleep_range(120, 130); + break; + default: + break; + } + + return 0; +} + +/* ADDA UL MUX */ +#define ADDA_UL_MUX_MASK 0x3 +enum { + ADDA_UL_MUX_MTKAIF = 0, + ADDA_UL_MUX_AP_DMIC, + ADDA_UL_MUX_AP_DMIC_MULTICH, +}; + +static const char *const adda_ul_mux_map[] = { + "MTKAIF", "AP_DMIC", "AP_DMIC_MULTI_CH", +}; + +static int adda_ul_map_value[] = { + ADDA_UL_MUX_MTKAIF, + ADDA_UL_MUX_AP_DMIC, + ADDA_UL_MUX_AP_DMIC_MULTICH, +}; + +static SOC_VALUE_ENUM_SINGLE_DECL(adda_ul_mux_map_enum, + SND_SOC_NOPM, + 0, + ADDA_UL_MUX_MASK, + adda_ul_mux_map, + adda_ul_map_value); + +static const struct snd_kcontrol_new adda_ul_mux_control = + SOC_DAPM_ENUM("ADDA_UL_MUX Select", adda_ul_mux_map_enum); + +static const struct snd_kcontrol_new adda_ch34_ul_mux_control = + SOC_DAPM_ENUM("ADDA_CH34_UL_MUX Select", adda_ul_mux_map_enum); + +static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = { + /* inter-connections */ + SND_SOC_DAPM_SUPPLY_S("ADDA Enable", SUPPLY_SEQ_ADDA_AFE_ON, + AUDIO_ENGEN_CON0, AUDIO_F3P25M_EN_ON_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("ADDA Capture Enable", SUPPLY_SEQ_ADDA_UL_ON, + AFE_ADDA_UL0_SRC_CON0, + UL_SRC_ON_TMP_CTL_SFT, 0, + mtk_adda_sleep_on_pmd_event, + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S("ADDA CH34 Capture Enable", SUPPLY_SEQ_ADDA_UL_ON, + AFE_ADDA_UL1_SRC_CON0, + UL_SRC_ON_TMP_CTL_SFT, 0, + mtk_adda_sleep_on_pmd_event, + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S("AP_DMIC_EN", SUPPLY_SEQ_ADDA_AP_DMIC, + AFE_ADDA_UL0_SRC_CON0, + UL_AP_DMIC_ON_SFT, 0, + mtk_adda_sleep_on_pmd_event, + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S("AP_DMIC_CH34_EN", SUPPLY_SEQ_ADDA_AP_DMIC, + AFE_ADDA_UL1_SRC_CON0, + UL_AP_DMIC_ON_SFT, 0, + mtk_adda_sleep_on_pmd_event, + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S("ADDA_FIFO", SUPPLY_SEQ_ADDA_FIFO, + AFE_ADDA_UL0_SRC_CON1, + FIFO_SOFT_RST_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("ADDA_CH34_FIFO", SUPPLY_SEQ_ADDA_FIFO, + AFE_ADDA_UL1_SRC_CON1, + FIFO_SOFT_RST_SFT, 1, + NULL, 0), + SND_SOC_DAPM_MUX("ADDA_UL_Mux", SND_SOC_NOPM, 0, 0, + &adda_ul_mux_control), + SND_SOC_DAPM_MUX("ADDA_CH34_UL_Mux", SND_SOC_NOPM, 0, 0, + &adda_ch34_ul_mux_control), + SND_SOC_DAPM_INPUT("AP_DMIC_INPUT"), +}; + +static const struct snd_soc_dapm_route mtk_dai_adda_routes[] = { + /* capture */ + {"ADDA_UL_Mux", "MTKAIF", "ADDA Capture"}, + {"ADDA_UL_Mux", "AP_DMIC", "AP DMIC Capture"}, + {"ADDA_UL_Mux", "AP_DMIC_MULTI_CH", "AP DMIC MULTICH Capture"}, + + {"ADDA_CH34_UL_Mux", "MTKAIF", "ADDA CH34 Capture"}, + {"ADDA_CH34_UL_Mux", "AP_DMIC", "AP DMIC CH34 Capture"}, + {"ADDA_CH34_UL_Mux", "AP_DMIC_MULTI_CH", "AP DMIC MULTICH Capture"}, + + {"AP DMIC Capture", NULL, "ADDA Enable"}, + {"AP DMIC Capture", NULL, "ADDA Capture Enable"}, + {"AP DMIC Capture", NULL, "ADDA_FIFO"}, + {"AP DMIC Capture", NULL, "AP_DMIC_EN"}, + + {"AP DMIC CH34 Capture", NULL, "ADDA Enable"}, + {"AP DMIC CH34 Capture", NULL, "ADDA CH34 Capture Enable"}, + {"AP DMIC CH34 Capture", NULL, "ADDA_CH34_FIFO"}, + {"AP DMIC CH34 Capture", NULL, "AP_DMIC_CH34_EN"}, + + {"AP DMIC MULTICH Capture", NULL, "ADDA Enable"}, + {"AP DMIC MULTICH Capture", NULL, "ADDA Capture Enable"}, + {"AP DMIC MULTICH Capture", NULL, "ADDA CH34 Capture Enable"}, + {"AP DMIC MULTICH Capture", NULL, "ADDA_FIFO"}, + {"AP DMIC MULTICH Capture", NULL, "ADDA_CH34_FIFO"}, + {"AP DMIC MULTICH Capture", NULL, "AP_DMIC_EN"}, + {"AP DMIC MULTICH Capture", NULL, "AP_DMIC_CH34_EN"}, + + {"AP DMIC Capture", NULL, "AP_DMIC_INPUT"}, + {"AP DMIC CH34 Capture", NULL, "AP_DMIC_INPUT"}, + {"AP DMIC MULTICH Capture", NULL, "AP_DMIC_INPUT"}, +}; + +/* dai ops */ +static int set_playback_hw_params(struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int rate = params_rate(params); + struct mtk_afe_adda_priv *adda_priv; + unsigned int mtkaif_rate = 0; + int id = dai->id; + + adda_priv = afe_priv->dai_priv[id]; + if (!adda_priv) + return -EINVAL; + + adda_priv->dl_rate = rate; + + /* get mtkaif dl rate */ + mtkaif_rate = mtkaif_rate_transform(afe, adda_priv->dl_rate); + + if (id == MT8196_DAI_ADDA) { + /* MTKAIF sample rate config */ + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_TXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << MTKAIFV4_TXIF_INPUT_MODE_SFT); + /* AFE_ADDA_MTKAIFV4_TX_CFG0 */ + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_TXIF_FOUR_CHANNEL_MASK_SFT, + 0x0 << MTKAIFV4_TXIF_FOUR_CHANNEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_ADDA_OUT_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_ADDA_OUT_EN_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_ADDA6_OUT_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_ADDA6_OUT_EN_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_TXIF_V4_MASK_SFT, + 0x1 << MTKAIFV4_TXIF_V4_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_TX_CFG0, + MTKAIFV4_TXIF_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_TXIF_EN_SEL_SFT); + /* clean predistortion */ + } else { + /* MTKAIF sample rate config */ + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_TX_CFG0, + ADDA6_MTKAIFV4_TXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << ADDA6_MTKAIFV4_TXIF_INPUT_MODE_SFT); + /* AFE_ADDA6_MTKAIFV4_TX_CFG0 */ + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_TX_CFG0, + ADDA6_MTKAIFV4_TXIF_FOUR_CHANNEL_MASK_SFT, + 0x0 << ADDA6_MTKAIFV4_TXIF_FOUR_CHANNEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_TX_CFG0, + ADDA6_MTKAIFV4_TXIF_EN_SEL_MASK_SFT, + 0x1 << ADDA6_MTKAIFV4_TXIF_EN_SEL_SFT); + } + + return 0; +}; + +static int set_capture_hw_params(struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + unsigned int rate = params_rate(params); + struct mtk_afe_adda_priv *adda_priv; + unsigned int voice_mode = 0; + unsigned int ul_src_con0 = 0; + unsigned int mtkaif_rate = 0; + int id = dai->id; + + adda_priv = afe_priv->dai_priv[id]; + if (!adda_priv) + return -EINVAL; + + adda_priv->ul_rate = rate; + + /* get mtkaif dl rate */ + mtkaif_rate = mtkaif_rate_transform(afe, adda_priv->ul_rate); + + voice_mode = adda_ul_rate_transform(afe, rate); + + ul_src_con0 |= (voice_mode << 17) & (0x7 << 17); + + /* enable iir */ + ul_src_con0 |= (1 << UL_IIR_ON_TMP_CTL_SFT) & + UL_IIR_ON_TMP_CTL_MASK_SFT; + ul_src_con0 |= (UL_IIR_SW << UL_IIRMODE_CTL_SFT) & + UL_IIRMODE_CTL_MASK_SFT; + + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << MTKAIFV4_RXIF_INPUT_MODE_SFT); + + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_RX_CFG0, + ADDA6_MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << ADDA6_MTKAIFV4_RXIF_INPUT_MODE_SFT); + + switch (id) { + case MT8196_DAI_ADDA: + case MT8196_DAI_AP_DMIC: + case MT8196_DAI_AP_DMIC_MULTICH: + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << MTKAIFV4_RXIF_INPUT_MODE_SFT); + /* AFE_ADDA_MTKAIFV4_RX_CFG0 */ + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_FOUR_CHANNEL_MASK_SFT, + 0x1 << MTKAIFV4_RXIF_FOUR_CHANNEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_RXIF_EN_SEL_SFT); + /* [28] loopback mode + * 0: loopback adda tx to adda rx + * 1: loopback adda6 tx to adda rx + */ + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_TXIF_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_TXIF_EN_SEL_SFT); + + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH1CH2_IN_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_UL_CH1CH2_IN_EN_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH3CH4_IN_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_UL_CH3CH4_IN_EN_SEL_SFT); + + /* 35Hz @ 48k */ + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_02_01, 0x00000000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_04_03, 0x00003FB8); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_06_05, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_08_07, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_10_09, 0x0000C048); + + regmap_write(afe->regmap, + AFE_ADDA_UL1_SRC_CON0, ul_src_con0); + + /* mtkaif_rxif_data_mode = 0, amic */ + regmap_update_bits(afe->regmap, + AFE_MTKAIF1_RX_CFG0, + 0x1 << 0, + 0x0 << 0); + + /* 35Hz @ 48k */ + regmap_write(afe->regmap, + AFE_ADDA_UL0_IIR_COEF_02_01, 0x00000000); + regmap_write(afe->regmap, + AFE_ADDA_UL0_IIR_COEF_04_03, 0x00003FB8); + regmap_write(afe->regmap, + AFE_ADDA_UL0_IIR_COEF_06_05, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL0_IIR_COEF_08_07, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL0_IIR_COEF_10_09, 0x0000C048); + + regmap_write(afe->regmap, + AFE_ADDA_UL0_SRC_CON0, ul_src_con0); + + /* mtkaif_rxif_data_mode = 0, amic */ + regmap_update_bits(afe->regmap, + AFE_MTKAIF0_RX_CFG0, + 0x1 << 0, + 0x0 << 0); + break; + case MT8196_DAI_ADDA_CH34: + case MT8196_DAI_AP_DMIC_CH34: + /* AFE_ADDA_MTKAIFV4_RX_CFG0 */ + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_FOUR_CHANNEL_MASK_SFT, + 0x1 << MTKAIFV4_RXIF_FOUR_CHANNEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_RXIF_EN_SEL_MASK_SFT, + 0x0 << MTKAIFV4_RXIF_EN_SEL_SFT); + + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH1CH2_IN_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_UL_CH1CH2_IN_EN_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH3CH4_IN_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_UL_CH3CH4_IN_EN_SEL_SFT); + + /* 35Hz @ 48k */ + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_02_01, 0x00000000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_04_03, 0x00003FB8); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_06_05, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_08_07, 0x3FB80000); + regmap_write(afe->regmap, + AFE_ADDA_UL1_IIR_COEF_10_09, 0x0000C048); + + regmap_write(afe->regmap, + AFE_ADDA_UL1_SRC_CON0, ul_src_con0); + + /* mtkaif_rxif_data_mode = 0, amic */ + regmap_update_bits(afe->regmap, + AFE_MTKAIF1_RX_CFG0, + 0x1 << 0, + 0x0 << 0); + break; + case MT8196_DAI_ADDA_CH56: + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_RX_CFG0, + ADDA6_MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT, + mtkaif_rate << ADDA6_MTKAIFV4_RXIF_INPUT_MODE_SFT); + /* AFE_ADDA6_MTKAIFV4_RX_CFG0 */ + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_RX_CFG0, + ADDA6_MTKAIFV4_RXIF_FOUR_CHANNEL_MASK_SFT, + 0x1 << ADDA6_MTKAIFV4_RXIF_FOUR_CHANNEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIFV4_RX_CFG0, + MTKAIFV4_UL_CH5CH6_IN_EN_SEL_MASK_SFT, + 0x1 << MTKAIFV4_UL_CH5CH6_IN_EN_SEL_SFT); + regmap_update_bits(afe->regmap, AFE_ADDA6_MTKAIFV4_RX_CFG0, + ADDA6_MTKAIFV4_RXIF_EN_SEL_MASK_SFT, + 0x1 << ADDA6_MTKAIFV4_RXIF_EN_SEL_SFT); + break; + default: + break; + } + + /* ap dmic */ + switch (id) { + case MT8196_DAI_AP_DMIC: + case MT8196_DAI_AP_DMIC_CH34: + mtk_adda_ul_src_enable_dmic(afe, id); + break; + case MT8196_DAI_AP_DMIC_MULTICH: + regmap_update_bits(afe->regmap, AFE_ADDA_ULSRC_PHASE_CON1, + DMIC_CLK_PHASE_SYNC_SET_MASK_SFT, + 0x1 << DMIC_CLK_PHASE_SYNC_SET_SFT); + mtk_adda_ul_src_set_dmic_phase_sync(afe); + mtk_adda_ul_src_enable_dmic(afe, MT8196_DAI_AP_DMIC); + mtk_adda_ul_src_enable_dmic(afe, MT8196_DAI_AP_DMIC_CH34); + mtk_adda_ul_src_set_dmic_phase_sync_clock(afe); + break; + default: + break; + } + + return 0; +}; + +static int mtk_dai_adda_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + int id = dai->id; + + if (id >= MT8196_DAI_NUM || id < 0) + return -EINVAL; + + dev_dbg(afe->dev, "id %d, stream %d, rate %d\n", + id, substream->stream, params_rate(params)); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return set_playback_hw_params(params, dai); + else + return set_capture_hw_params(params, dai); + + return 0; +} + +static const struct snd_soc_dai_ops mtk_dai_adda_ops = { + .hw_params = mtk_dai_adda_hw_params, +}; + +/* dai driver */ +#define MTK_ADDA_PLAYBACK_RATES (SNDRV_PCM_RATE_8000_48000 |\ + SNDRV_PCM_RATE_96000 |\ + SNDRV_PCM_RATE_192000) + +#define MTK_ADDA_CAPTURE_RATES (SNDRV_PCM_RATE_8000 |\ + SNDRV_PCM_RATE_16000 |\ + SNDRV_PCM_RATE_32000 |\ + SNDRV_PCM_RATE_48000 |\ + SNDRV_PCM_RATE_96000 |\ + SNDRV_PCM_RATE_192000) + +#define MTK_ADDA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_driver mtk_dai_adda_driver[] = { + { + .name = "ADDA", + .id = MT8196_DAI_ADDA, + .playback = { + .stream_name = "ADDA Playback", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_PLAYBACK_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .capture = { + .stream_name = "ADDA Capture", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, + { + .name = "ADDA_CH34", + .id = MT8196_DAI_ADDA_CH34, + .playback = { + .stream_name = "ADDA CH34 Playback", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_PLAYBACK_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .capture = { + .stream_name = "ADDA CH34 Capture", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, + { + .name = "ADDA_CH56", + .id = MT8196_DAI_ADDA_CH56, + .capture = { + .stream_name = "ADDA CH56 Capture", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, + { + .name = "AP_DMIC", + .id = MT8196_DAI_AP_DMIC, + .capture = { + .stream_name = "AP DMIC Capture", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, + { + .name = "AP_DMIC_CH34", + .id = MT8196_DAI_AP_DMIC_CH34, + .capture = { + .stream_name = "AP DMIC CH34 Capture", + .channels_min = 1, + .channels_max = 2, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, + /* + * Multich DMIC combines two DMIC controllers for use together, + * so AP_DMIC and Multich DMIC cannot be used at the same time. + */ + { + .name = "AP_DMIC_MULTICH", + .id = MT8196_DAI_AP_DMIC_MULTICH, + .capture = { + .stream_name = "AP DMIC MULTICH Capture", + .channels_min = 1, + .channels_max = 4, + .rates = MTK_ADDA_CAPTURE_RATES, + .formats = MTK_ADDA_FORMATS, + }, + .ops = &mtk_dai_adda_ops, + }, +}; + +static int init_adda_priv_data(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_adda_priv *adda_priv; + static const int adda_dai_list[] = { + MT8196_DAI_ADDA, + MT8196_DAI_ADDA_CH34, + MT8196_DAI_ADDA_CH56, + MT8196_DAI_AP_DMIC_MULTICH + }; + int i; + + for (i = 0; i < ARRAY_SIZE(adda_dai_list); i++) { + adda_priv = devm_kzalloc(afe->dev, + sizeof(struct mtk_afe_adda_priv), + GFP_KERNEL); + if (!adda_priv) + return -ENOMEM; + + afe_priv->dai_priv[adda_dai_list[i]] = adda_priv; + } + + /* ap dmic priv share with adda */ + afe_priv->dai_priv[MT8196_DAI_AP_DMIC] = + afe_priv->dai_priv[MT8196_DAI_ADDA]; + afe_priv->dai_priv[MT8196_DAI_AP_DMIC_CH34] = + afe_priv->dai_priv[MT8196_DAI_ADDA_CH34]; + + return 0; +} + +int mt8196_dai_adda_register(struct mtk_base_afe *afe) +{ + struct mtk_base_afe_dai *dai; + int ret; + + dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL); + if (!dai) + return -ENOMEM; + + dai->dai_drivers = mtk_dai_adda_driver; + dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver); + dai->dapm_widgets = mtk_dai_adda_widgets; + dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets); + dai->dapm_routes = mtk_dai_adda_routes; + dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes); + + ret = init_adda_priv_data(afe); + if (ret) + return ret; + + list_add(&dai->list, &afe->sub_dais); + + return 0; +} + diff --git a/sound/soc/mediatek/mt8196/mt8196-dai-i2s.c b/sound/soc/mediatek/mt8196/mt8196-dai-i2s.c new file mode 100644 index 00000000000000..ef5cde0ba829e0 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-dai-i2s.c @@ -0,0 +1,2613 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek ALSA SoC Audio DAI I2S Control + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include + +#include + +#include "mt8196-afe-clk.h" +#include "mt8196-afe-common.h" +#include "mt8196-interconnection.h" + +#include "../common/mtk-afe-fe-dai.h" + +#define ETDM_22M_CLOCK_THRES 11289600 + +enum { + ETDM_CLK_SOURCE_H26M, + ETDM_CLK_SOURCE_APLL, + ETDM_CLK_SOURCE_SPDIF, + ETDM_CLK_SOURCE_HDMI, + ETDM_CLK_SOURCE_EARC, + ETDM_CLK_SOURCE_LINEIN, +}; + +enum { + ETDM_RELATCH_SEL_H26M, + ETDM_RELATCH_SEL_APLL, +}; + +enum { + ETDM_RATE_8K, + ETDM_RATE_12K, + ETDM_RATE_16K, + ETDM_RATE_24K, + ETDM_RATE_32K, + ETDM_RATE_48K, + ETDM_RATE_64K, + ETDM_RATE_96K, + ETDM_RATE_128K, + ETDM_RATE_192K, + ETDM_RATE_256K, + ETDM_RATE_384K, + ETDM_RATE_11025 = 16, + ETDM_RATE_22050, + ETDM_RATE_44100, + ETDM_RATE_88200, + ETDM_RATE_176400, + ETDM_RATE_352800, +}; + +enum { + ETDM_CONN_8K, + ETDM_CONN_11K, + ETDM_CONN_12K, + ETDM_CONN_16K = 4, + ETDM_CONN_22K, + ETDM_CONN_24K, + ETDM_CONN_32K = 8, + ETDM_CONN_44K, + ETDM_CONN_48K, + ETDM_CONN_88K = 13, + ETDM_CONN_96K, + ETDM_CONN_176K = 17, + ETDM_CONN_192K, + ETDM_CONN_352K = 21, + ETDM_CONN_384K, +}; + +enum { + ETDM_WLEN_8_BIT = 0x7, + ETDM_WLEN_16_BIT = 0xf, + ETDM_WLEN_32_BIT = 0x1f, +}; + +enum { + ETDM_SLAVE_SEL_ETDMIN0_MASTER, + ETDM_SLAVE_SEL_ETDMIN0_SLAVE, + ETDM_SLAVE_SEL_ETDMIN1_MASTER, + ETDM_SLAVE_SEL_ETDMIN1_SLAVE, + ETDM_SLAVE_SEL_ETDMIN2_MASTER, + ETDM_SLAVE_SEL_ETDMIN2_SLAVE, + ETDM_SLAVE_SEL_ETDMIN3_MASTER, + ETDM_SLAVE_SEL_ETDMIN3_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT0_MASTER, + ETDM_SLAVE_SEL_ETDMOUT0_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT1_MASTER, + ETDM_SLAVE_SEL_ETDMOUT1_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT2_MASTER, + ETDM_SLAVE_SEL_ETDMOUT2_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT3_MASTER, + ETDM_SLAVE_SEL_ETDMOUT3_SLAVE, +}; + +enum { + ETDM_SLAVE_SEL_ETDMIN4_MASTER, + ETDM_SLAVE_SEL_ETDMIN4_SLAVE, + ETDM_SLAVE_SEL_ETDMIN5_MASTER, + ETDM_SLAVE_SEL_ETDMIN5_SLAVE, + ETDM_SLAVE_SEL_ETDMIN6_MASTER, + ETDM_SLAVE_SEL_ETDMIN6_SLAVE, + ETDM_SLAVE_SEL_ETDMIN7_MASTER, + ETDM_SLAVE_SEL_ETDMIN7_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT4_MASTER, + ETDM_SLAVE_SEL_ETDMOUT4_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT5_MASTER, + ETDM_SLAVE_SEL_ETDMOUT5_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT6_MASTER, + ETDM_SLAVE_SEL_ETDMOUT6_SLAVE, + ETDM_SLAVE_SEL_ETDMOUT7_MASTER, + ETDM_SLAVE_SEL_ETDMOUT7_SLAVE, +}; + +enum { + MTK_DAI_ETDM_FORMAT_I2S, + MTK_DAI_ETDM_FORMAT_LJ, + MTK_DAI_ETDM_FORMAT_RJ, + MTK_DAI_ETDM_FORMAT_EIAJ, + MTK_DAI_ETDM_FORMAT_DSPA, + MTK_DAI_ETDM_FORMAT_DSPB, +}; + +static unsigned int get_etdm_wlen(snd_pcm_format_t format) +{ + return snd_pcm_format_physical_width(format) < 16 ? + ETDM_WLEN_16_BIT : ETDM_WLEN_32_BIT; +} + +static unsigned int get_etdm_lrck_width(snd_pcm_format_t format) +{ + /* The valid data bit number should be large than 7 due to hardware limitation. */ + return snd_pcm_format_physical_width(format) - 1; +} + +static unsigned int get_etdm_rate(unsigned int rate) +{ + switch (rate) { + case 8000: + return ETDM_RATE_8K; + case 12000: + return ETDM_RATE_12K; + case 16000: + return ETDM_RATE_16K; + case 24000: + return ETDM_RATE_24K; + case 32000: + return ETDM_RATE_32K; + case 48000: + return ETDM_RATE_48K; + case 64000: + return ETDM_RATE_64K; + case 96000: + return ETDM_RATE_96K; + case 128000: + return ETDM_RATE_128K; + case 192000: + return ETDM_RATE_192K; + case 256000: + return ETDM_RATE_256K; + case 384000: + return ETDM_RATE_384K; + case 11025: + return ETDM_RATE_11025; + case 22050: + return ETDM_RATE_22050; + case 44100: + return ETDM_RATE_44100; + case 88200: + return ETDM_RATE_88200; + case 176400: + return ETDM_RATE_176400; + case 352800: + return ETDM_RATE_352800; + default: + return 0; + } +} + +static unsigned int get_etdm_inconn_rate(unsigned int rate) +{ + switch (rate) { + case 8000: + return ETDM_CONN_8K; + case 12000: + return ETDM_CONN_12K; + case 16000: + return ETDM_CONN_16K; + case 24000: + return ETDM_CONN_24K; + case 32000: + return ETDM_CONN_32K; + case 48000: + return ETDM_CONN_48K; + case 96000: + return ETDM_CONN_96K; + case 192000: + return ETDM_CONN_192K; + case 384000: + return ETDM_CONN_384K; + case 11025: + return ETDM_CONN_11K; + case 22050: + return ETDM_CONN_22K; + case 44100: + return ETDM_CONN_44K; + case 88200: + return ETDM_CONN_88K; + case 176400: + return ETDM_CONN_176K; + case 352800: + return ETDM_CONN_352K; + default: + return 0; + } +} + +struct mtk_afe_i2s_priv { + u8 id; + u32 rate; /* for determine which apll to use */ + int low_jitter_en; + const char *share_property_name; + int share_i2s_id; + u32 mclk_rate; + u8 mclk_id; + u8 mclk_apll; + u8 ch_num; + u8 sync; + u8 ip_mode; + u8 format; +}; + +/* this enum is merely for mtk_afe_i2s_priv & mtk_base_etdm_data declare */ +enum { + DAI_I2SIN0, + DAI_I2SIN1, + DAI_I2SIN2, + DAI_I2SIN3, + DAI_I2SIN4, + DAI_I2SIN6, + DAI_I2SOUT0, + DAI_I2SOUT1, + DAI_I2SOUT2, + DAI_I2SOUT3, + DAI_I2SOUT4, + DAI_I2SOUT6, + DAI_FMI2S_MASTER, + DAI_I2S_NUM, +}; + +static bool is_etdm_in_pad_top(unsigned int dai_num) +{ + switch (dai_num) { + case DAI_I2SOUT4: + case DAI_I2SIN4: + return true; + default: + return false; + } +} + +struct mtk_base_etdm_data { + u16 enable_reg; + u16 enable_mask; + u8 enable_shift; + u16 sync_reg; + u16 sync_mask; + u8 sync_shift; + u16 ch_reg; + u16 ch_mask; + u8 ch_shift; + u16 ip_mode_reg; + u16 ip_mode_mask; + u8 ip_mode_shift; + u16 init_count_reg; + u16 init_count_mask; + u8 init_count_shift; + u16 init_point_reg; + u16 init_point_mask; + u8 init_point_shift; + u16 lrck_reset_reg; + u16 lrck_reset_mask; + u8 lrck_reset_shift; + u16 clk_source_reg; + u16 clk_source_mask; + u8 clk_source_shift; + u16 ck_en_sel_reg; + u16 ck_en_sel_mask; + u8 ck_en_sel_shift; + u16 fs_timing_reg; + u16 fs_timing_mask; + u8 fs_timing_shift; + u16 relatch_en_sel_reg; + u16 relatch_en_sel_mask; + u8 relatch_en_sel_shift; + u16 use_afifo_reg; + u16 use_afifo_mask; + u8 use_afifo_shift; + u16 afifo_mode_reg; + u16 afifo_mode_mask; + u8 afifo_mode_shift; + u16 almost_end_ch_reg; + u16 almost_end_ch_mask; + u8 almost_end_ch_shift; + u16 almost_end_bit_reg; + u16 almost_end_bit_mask; + u8 almost_end_bit_shift; + u16 out2latch_time_reg; + u16 out2latch_time_mask; + u8 out2latch_time_shift; + u16 tdm_mode_reg; + u16 tdm_mode_mask; + u8 tdm_mode_shift; + u16 relatch_domain_sel_reg; + u16 relatch_domain_sel_mask; + u8 relatch_domain_sel_shift; + u16 bit_length_reg; + u16 bit_length_mask; + u8 bit_length_shift; + u16 word_length_reg; + u16 word_length_mask; + u8 word_length_shift; + u16 cowork_reg; + u16 cowork_mask; + u8 cowork_shift; + u32 cowork_val; + u16 in2latch_time_reg; + u16 in2latch_time_mask; + u8 in2latch_time_shift; + u16 pad_top_ck_en_reg; + u16 pad_top_ck_en_mask; + u8 pad_top_ck_en_shift; + u16 master_latch_reg; + u16 master_latch_mask; + u8 master_latch_shift; +}; + +/* + * _cfg_vlp_reg should be a variable or constant, not an expression + * with side effects. + */ +#define MTK_ETDM_IN_DATA(_id, _cowork, _cfg_vlp_reg) \ + [DAI_I2SIN##_id] = { \ + .enable_reg = ETDM_IN##_id##_CON0, \ + .enable_mask = REG_ETDM_IN_EN_MASK, \ + .enable_shift = REG_ETDM_IN_EN_SFT, \ + .sync_reg = ETDM_IN##_id##_CON0, \ + .sync_mask = REG_SYNC_MODE_MASK, \ + .sync_shift = REG_SYNC_MODE_SFT, \ + .ch_reg = ETDM_IN##_id##_CON0, \ + .ch_mask = REG_CH_NUM_MASK, \ + .ch_shift = REG_CH_NUM_SFT, \ + .ip_mode_reg = ETDM_IN##_id##_CON2, \ + .ip_mode_mask = REG_MULTI_IP_MODE_MASK, \ + .ip_mode_shift = REG_MULTI_IP_MODE_SFT, \ + .init_count_reg = ETDM_IN##_id##_CON1, \ + .init_count_mask = REG_INITIAL_COUNT_MASK, \ + .init_count_shift = REG_INITIAL_COUNT_SFT, \ + .init_point_reg = ETDM_IN##_id##_CON1, \ + .init_point_mask = REG_INITIAL_POINT_MASK, \ + .init_point_shift = REG_INITIAL_POINT_SFT, \ + .lrck_reset_reg = ETDM_IN##_id##_CON1, \ + .lrck_reset_mask = REG_LRCK_RESET_MASK, \ + .lrck_reset_shift = REG_LRCK_RESET_SFT, \ + .clk_source_reg = ETDM_IN##_id##_CON2, \ + .clk_source_mask = REG_CLOCK_SOURCE_SEL_MASK, \ + .clk_source_shift = REG_CLOCK_SOURCE_SEL_SFT, \ + .ck_en_sel_reg = ETDM_IN##_id##_CON2, \ + .ck_en_sel_mask = REG_CK_EN_SEL_AUTO_MASK, \ + .ck_en_sel_shift = REG_CK_EN_SEL_AUTO_SFT, \ + .fs_timing_reg = ETDM_IN##_id##_CON3, \ + .fs_timing_mask = REG_FS_TIMING_SEL_MASK, \ + .fs_timing_shift = REG_FS_TIMING_SEL_SFT, \ + .relatch_en_sel_reg = ETDM_IN##_id##_CON4, \ + .relatch_en_sel_mask = REG_RELATCH_1X_EN_SEL_MASK, \ + .relatch_en_sel_shift = REG_RELATCH_1X_EN_SEL_SFT, \ + .use_afifo_reg = ETDM_IN##_id##_CON8, \ + .use_afifo_mask = REG_ETDM_USE_AFIFO_MASK, \ + .use_afifo_shift = REG_ETDM_USE_AFIFO_SFT, \ + .afifo_mode_reg = ETDM_IN##_id##_CON8, \ + .afifo_mode_mask = REG_AFIFO_MODE_MASK, \ + .afifo_mode_shift = REG_AFIFO_MODE_SFT, \ + .almost_end_ch_reg = ETDM_IN##_id##_CON9, \ + .almost_end_ch_mask = REG_ALMOST_END_CH_COUNT_MASK, \ + .almost_end_ch_shift = REG_ALMOST_END_CH_COUNT_SFT, \ + .almost_end_bit_reg = ETDM_IN##_id##_CON9, \ + .almost_end_bit_mask = REG_ALMOST_END_BIT_COUNT_MASK, \ + .almost_end_bit_shift = REG_ALMOST_END_BIT_COUNT_SFT, \ + .out2latch_time_reg = ETDM_IN##_id##_CON9, \ + .out2latch_time_mask = REG_OUT2LATCH_TIME_MASK, \ + .out2latch_time_shift = REG_OUT2LATCH_TIME_SFT, \ + .tdm_mode_reg = ETDM_IN##_id##_CON0, \ + .tdm_mode_mask = REG_FMT_MASK, \ + .tdm_mode_shift = REG_FMT_SFT, \ + .relatch_domain_sel_reg = ETDM_IN##_id##_CON0, \ + .relatch_domain_sel_mask = REG_RELATCH_1X_EN_DOMAIN_SEL_MASK, \ + .relatch_domain_sel_shift = REG_RELATCH_1X_EN_DOMAIN_SEL_SFT, \ + .bit_length_reg = ETDM_IN##_id##_CON0, \ + .bit_length_mask = REG_BIT_LENGTH_MASK, \ + .bit_length_shift = REG_BIT_LENGTH_SFT, \ + .word_length_reg = ETDM_IN##_id##_CON0, \ + .word_length_mask = REG_WORD_LENGTH_MASK, \ + .word_length_shift = REG_WORD_LENGTH_SFT, \ + .cowork_reg = _cowork, \ + .cowork_mask = ETDM_IN##_id##_SLAVE_SEL_MASK, \ + .cowork_shift = ETDM_IN##_id##_SLAVE_SEL_SFT, \ + .cowork_val = ETDM_SLAVE_SEL_ETDMOUT##_id##_MASTER, \ + .pad_top_ck_en_reg = _cfg_vlp_reg, \ + .pad_top_ck_en_mask = RG_I2S4_PAD_TOP_CK_EN_MASK, \ + .pad_top_ck_en_shift = RG_I2S4_PAD_TOP_CK_EN_SFT, \ + .master_latch_reg = _cfg_vlp_reg, \ + .master_latch_mask = RG_I2S4_IN_BCK_NEG_EG_LATCH_MASK, \ + .master_latch_shift = RG_I2S4_IN_BCK_NEG_EG_LATCH_SFT, \ + } + +/* + * _cfg_vlp_reg should be a variable or constant, not an expression + * with side effects. + */ +#define MTK_ETDM_OUT_DATA(_id, _cowork, _cfg_vlp_reg) \ + [DAI_I2SOUT##_id] = { \ + .enable_reg = ETDM_OUT##_id##_CON0, \ + .enable_mask = OUT_REG_ETDM_OUT_EN_MASK, \ + .enable_shift = OUT_REG_ETDM_OUT_EN_SFT, \ + .sync_reg = ETDM_OUT##_id##_CON0, \ + .sync_mask = REG_SYNC_MODE_MASK, \ + .sync_shift = REG_SYNC_MODE_SFT, \ + .ch_reg = ETDM_OUT##_id##_CON0, \ + .ch_mask = REG_CH_NUM_MASK, \ + .ch_shift = REG_CH_NUM_SFT, \ + .init_count_reg = ETDM_OUT##_id##_CON1, \ + .init_count_mask = OUT_REG_INITIAL_COUNT_MASK, \ + .init_count_shift = OUT_REG_INITIAL_COUNT_SFT, \ + .init_point_reg = ETDM_OUT##_id##_CON1, \ + .init_point_mask = OUT_REG_INITIAL_POINT_MASK, \ + .init_point_shift = OUT_REG_INITIAL_POINT_SFT, \ + .lrck_reset_reg = ETDM_OUT##_id##_CON1, \ + .lrck_reset_mask = OUT_REG_LRCK_RESET_MASK, \ + .lrck_reset_shift = OUT_REG_LRCK_RESET_SFT, \ + .clk_source_reg = ETDM_OUT##_id##_CON4, \ + .clk_source_mask = OUT_REG_CLOCK_SOURCE_SEL_MASK, \ + .clk_source_shift = OUT_REG_CLOCK_SOURCE_SEL_SFT, \ + .fs_timing_reg = ETDM_OUT##_id##_CON4, \ + .fs_timing_mask = OUT_REG_FS_TIMING_SEL_MASK, \ + .fs_timing_shift = OUT_REG_FS_TIMING_SEL_SFT, \ + .relatch_en_sel_reg = ETDM_OUT##_id##_CON4, \ + .relatch_en_sel_mask = OUT_REG_RELATCH_EN_SEL_MASK, \ + .relatch_en_sel_shift = OUT_REG_RELATCH_EN_SEL_SFT, \ + .tdm_mode_reg = ETDM_OUT##_id##_CON0, \ + .tdm_mode_mask = OUT_REG_FMT_MASK, \ + .tdm_mode_shift = OUT_REG_FMT_SFT, \ + .relatch_domain_sel_reg = ETDM_OUT##_id##_CON0, \ + .relatch_domain_sel_mask = OUT_REG_RELATCH_DOMAIN_SEL_MASK, \ + .relatch_domain_sel_shift = OUT_REG_RELATCH_DOMAIN_SEL_SFT, \ + .bit_length_reg = ETDM_OUT##_id##_CON0, \ + .bit_length_mask = OUT_REG_BIT_LENGTH_MASK, \ + .bit_length_shift = OUT_REG_BIT_LENGTH_SFT, \ + .word_length_reg = ETDM_OUT##_id##_CON0, \ + .word_length_mask = OUT_REG_WORD_LENGTH_MASK, \ + .word_length_shift = OUT_REG_WORD_LENGTH_SFT, \ + .cowork_reg = _cowork, \ + .cowork_mask = ETDM_OUT##_id##_SLAVE_SEL_MASK, \ + .cowork_shift = ETDM_OUT##_id##_SLAVE_SEL_SFT, \ + .cowork_val = ETDM_SLAVE_SEL_ETDMIN##_id##_MASTER, \ + .in2latch_time_reg = ETDM_OUT##_id##_CON2, \ + .in2latch_time_mask = OUT_REG_IN2LATCH_TIME_MASK, \ + .in2latch_time_shift = OUT_REG_IN2LATCH_TIME_SFT, \ + .pad_top_ck_en_reg = _cfg_vlp_reg, \ + .pad_top_ck_en_mask = RG_I2S4_PAD_TOP_CK_EN_MASK, \ + .pad_top_ck_en_shift = RG_I2S4_PAD_TOP_CK_EN_SFT, \ + .master_latch_reg = _cfg_vlp_reg, \ + .master_latch_mask = RG_I2S4_OUT_BCK_NEG_EG_LATCH_MASK, \ + .master_latch_shift = RG_I2S4_OUT_BCK_NEG_EG_LATCH_SFT, \ + } + +static const struct mtk_base_etdm_data mtk_etdm_data[DAI_I2S_NUM] = { + MTK_ETDM_IN_DATA(0, ETDM_0_3_COWORK_CON0, 0), + MTK_ETDM_IN_DATA(1, ETDM_0_3_COWORK_CON1, 0), + MTK_ETDM_IN_DATA(2, ETDM_0_3_COWORK_CON2, 0), + MTK_ETDM_IN_DATA(3, ETDM_0_3_COWORK_CON3, 0), + MTK_ETDM_IN_DATA(4, ETDM_4_7_COWORK_CON0, AUD_TOP_CFG_VLP_RG), + MTK_ETDM_IN_DATA(6, ETDM_4_7_COWORK_CON2, 0), + + MTK_ETDM_OUT_DATA(0, ETDM_0_3_COWORK_CON0, 0), + MTK_ETDM_OUT_DATA(1, ETDM_0_3_COWORK_CON0, 0), + MTK_ETDM_OUT_DATA(2, ETDM_0_3_COWORK_CON2, 0), + MTK_ETDM_OUT_DATA(3, ETDM_0_3_COWORK_CON2, 0), + MTK_ETDM_OUT_DATA(4, ETDM_4_7_COWORK_CON0, AUD_TOP_CFG_VLP_RG), + MTK_ETDM_OUT_DATA(6, ETDM_4_7_COWORK_CON2, 0), +}; + +enum { + I2S_FMT_EIAJ, + I2S_FMT_I2S, +}; + +enum { + I2S_WLEN_16_BIT, + I2S_WLEN_32_BIT, +}; + +enum { + I2S_IN_PAD_CONNSYS, + I2S_IN_PAD_IO_MUX, +}; + +static unsigned int get_i2s_wlen(snd_pcm_format_t format) +{ + return snd_pcm_format_physical_width(format) <= 16 ? + I2S_WLEN_16_BIT : I2S_WLEN_32_BIT; +} + +#define I2SIN0_MCLK_EN_W_NAME "I2SIN0_MCLK_EN" +#define I2SIN1_MCLK_EN_W_NAME "I2SIN1_MCLK_EN" +#define I2SIN2_MCLK_EN_W_NAME "I2SIN2_MCLK_EN" +#define I2SIN3_MCLK_EN_W_NAME "I2SIN3_MCLK_EN" +#define I2SIN4_MCLK_EN_W_NAME "I2SIN4_MCLK_EN" +#define I2SIN6_MCLK_EN_W_NAME "I2SIN6_MCLK_EN" +#define I2SOUT0_MCLK_EN_W_NAME "I2SOUT0_MCLK_EN" +#define I2SOUT1_MCLK_EN_W_NAME "I2SOUT1_MCLK_EN" +#define I2SOUT2_MCLK_EN_W_NAME "I2SOUT2_MCLK_EN" +#define I2SOUT3_MCLK_EN_W_NAME "I2SOUT3_MCLK_EN" +#define I2SOUT4_MCLK_EN_W_NAME "I2SOUT4_MCLK_EN" +#define I2SOUT6_MCLK_EN_W_NAME "I2SOUT6_MCLK_EN" +#define FMI2S_MASTER_MCLK_EN_W_NAME "FMI2S_MASTER_MCLK_EN" + +static int get_i2s_id_by_name(struct mtk_base_afe *afe, + const char *name) +{ + if (strncmp(name, "I2SIN0", 6) == 0) + return MT8196_DAI_I2S_IN0; + else if (strncmp(name, "I2SIN1", 6) == 0) + return MT8196_DAI_I2S_IN1; + else if (strncmp(name, "I2SIN2", 6) == 0) + return MT8196_DAI_I2S_IN2; + else if (strncmp(name, "I2SIN3", 6) == 0) + return MT8196_DAI_I2S_IN3; + else if (strncmp(name, "I2SIN4", 6) == 0) + return MT8196_DAI_I2S_IN4; + else if (strncmp(name, "I2SIN6", 6) == 0) + return MT8196_DAI_I2S_IN6; + else if (strncmp(name, "I2SOUT0", 7) == 0) + return MT8196_DAI_I2S_OUT0; + else if (strncmp(name, "I2SOUT1", 7) == 0) + return MT8196_DAI_I2S_OUT1; + else if (strncmp(name, "I2SOUT2", 7) == 0) + return MT8196_DAI_I2S_OUT2; + else if (strncmp(name, "I2SOUT3", 7) == 0) + return MT8196_DAI_I2S_OUT3; + else if (strncmp(name, "I2SOUT4", 7) == 0) + return MT8196_DAI_I2S_OUT4; + else if (strncmp(name, "I2SOUT6", 7) == 0) + return MT8196_DAI_I2S_OUT6; + else if (strncmp(name, "FMI2S_MASTER", 12) == 0) + return MT8196_DAI_FM_I2S_MASTER; + else + return -EINVAL; +} + +static struct mtk_afe_i2s_priv *get_i2s_priv_by_name(struct mtk_base_afe *afe, + const char *name) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int dai_id = get_i2s_id_by_name(afe, name); + + if (dai_id < 0) + return NULL; + + return afe_priv->dai_priv[dai_id]; +} + +static const char * const etdm_0_3_loopback_text[] = { + "etdmin0", "etdmin1", + "etdmin2", "etdmin3", + "etdmout0", "etdmout1", + "etdmout2", "etdmout3" +}; + +static const char * const etdm_4_7_loopback_text[] = { + "etdmin4", "etdmin5", + "etdmin6", "etdmin7", + "etdmout4", "etdmout5", + "etdmout6", "etdmout7" +}; + +static const u32 etdm_loopback_values[] = { + 0, 2, 4, 6, 8, 10, 12, 14 +}; + +static const struct soc_enum i2sin0_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_0_3_COWORK_CON1, ETDM_IN0_SDATA0_SEL_SFT, + ETDM_IN0_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_0_3_loopback_text), + etdm_0_3_loopback_text, etdm_loopback_values); + +static const struct soc_enum i2sin1_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_0_3_COWORK_CON1, ETDM_IN1_SDATA0_SEL_SFT, + ETDM_IN1_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_0_3_loopback_text), + etdm_0_3_loopback_text, etdm_loopback_values); + +static const struct soc_enum i2sin2_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_0_3_COWORK_CON3, ETDM_IN2_SDATA0_SEL_SFT, + ETDM_IN2_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_0_3_loopback_text), + etdm_0_3_loopback_text, etdm_loopback_values); + +static const struct soc_enum i2sin3_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_0_3_COWORK_CON3, ETDM_IN3_SDATA0_SEL_SFT, + ETDM_IN3_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_0_3_loopback_text), + etdm_0_3_loopback_text, etdm_loopback_values); + +static const struct soc_enum i2sin4_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_4_7_COWORK_CON1, ETDM_IN4_SDATA0_SEL_SFT, + ETDM_IN4_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_4_7_loopback_text), + etdm_4_7_loopback_text, etdm_loopback_values); + +static const struct soc_enum i2sin6_loopback_enum = + SOC_VALUE_ENUM_SINGLE(ETDM_4_7_COWORK_CON3, ETDM_IN6_SDATA0_SEL_SFT, + ETDM_IN6_SDATA0_SEL_MASK, ARRAY_SIZE(etdm_4_7_loopback_text), + etdm_4_7_loopback_text, etdm_loopback_values); + +static const struct snd_kcontrol_new mtk_dai_i2s_controls[] = { + SOC_ENUM("I2SIN0 LOOPBACK", i2sin0_loopback_enum), + SOC_ENUM("I2SIN1 LOOPBACK", i2sin1_loopback_enum), + SOC_ENUM("I2SIN2 LOOPBACK", i2sin2_loopback_enum), + SOC_ENUM("I2SIN3 LOOPBACK", i2sin3_loopback_enum), + /* The following I2S does not support multi-ip mode */ + SOC_ENUM("I2SIN4 LOOPBACK", i2sin4_loopback_enum), + SOC_ENUM("I2SIN6 LOOPBACK", i2sin6_loopback_enum), +}; + +/* interconnection */ +static const struct snd_kcontrol_new mtk_i2sout0_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN108_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN108_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN108_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN108_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN108_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN108_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN108_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN108_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN108_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN108_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL23_CH1", AFE_CONN108_2, I_DL23_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL24_CH1", AFE_CONN108_2, I_DL24_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN108_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN108_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN108_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN108_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN108_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN108_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH1", AFE_CONN108_6, + I_SRC_2_OUT_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout0_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN109_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN109_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN109_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN109_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN109_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN109_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN109_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN109_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN109_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN109_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL23_CH2", AFE_CONN109_2, I_DL23_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL24_CH2", AFE_CONN109_2, I_DL24_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN109_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN109_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN109_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN109_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN109_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN109_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN109_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN109_4, + I_PCM_1_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH2", AFE_CONN109_6, + I_SRC_2_OUT_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout1_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN110_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN110_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN110_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN110_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN110_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN110_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN110_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN110_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN110_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN110_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN110_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN110_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN110_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN110_4, + I_PCM_1_CAP_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout1_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN111_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN111_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN111_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN111_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN111_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN111_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN111_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN111_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN111_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN111_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN111_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN111_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN111_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN111_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN111_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN111_4, + I_PCM_1_CAP_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout2_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN112_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN112_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN112_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN112_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN112_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN112_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN112_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN112_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN112_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN112_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN112_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN112_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN112_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN112_4, + I_PCM_1_CAP_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout2_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN113_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN113_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN113_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN113_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN113_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN113_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN113_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN113_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN113_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN113_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN113_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN113_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN113_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN113_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN113_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN113_4, + I_PCM_1_CAP_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout3_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN114_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN114_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN114_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN114_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN114_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN114_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN114_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN114_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN114_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN114_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN114_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN114_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN114_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN114_4, + I_PCM_1_CAP_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout3_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN115_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN115_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN115_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN115_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN115_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN115_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN115_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN115_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN115_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN115_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN115_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN115_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN115_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN115_4, + I_PCM_1_CAP_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN116_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN116_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN116_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN116_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN116_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN116_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN116_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN116_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN116_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN116_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL24_CH1", AFE_CONN116_2, I_DL24_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN116_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN116_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN116_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN116_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN116_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN116_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH1", AFE_CONN116_6, + I_SRC_2_OUT_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN117_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN117_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN117_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN117_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN117_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN117_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN117_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN117_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN117_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN117_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL24_CH2", AFE_CONN117_2, I_DL24_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN117_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN117_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN117_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3", AFE_CONN117_0, + I_ADDA_UL_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN117_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN117_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN117_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN117_4, + I_PCM_1_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH2", AFE_CONN117_6, + I_SRC_2_OUT_CH2, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch3_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH3", AFE_CONN118_1, I_DL_24CH_CH3, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN118_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN118_4, + I_PCM_1_CAP_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch4_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH4", AFE_CONN119_1, I_DL_24CH_CH4, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN118_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN118_4, + I_PCM_1_CAP_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch5_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH5", AFE_CONN120_1, I_DL_24CH_CH5, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch6_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH6", AFE_CONN121_1, I_DL_24CH_CH6, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch7_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH7", AFE_CONN122_1, I_DL_24CH_CH7, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout4_ch8_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH8", AFE_CONN123_1, I_DL_24CH_CH8, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout6_ch1_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH1", AFE_CONN148_1, I_DL0_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN148_1, I_DL1_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN148_1, I_DL2_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN148_1, I_DL3_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1", AFE_CONN148_1, I_DL4_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1", AFE_CONN148_1, I_DL5_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1", AFE_CONN148_1, I_DL6_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH1", AFE_CONN148_1, I_DL7_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1", AFE_CONN148_1, I_DL8_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL23_CH1", AFE_CONN148_2, I_DL23_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH1", AFE_CONN148_1, I_DL_24CH_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH1", AFE_CONN148_0, + I_GAIN0_OUT_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN148_0, + I_ADDA_UL_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN148_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN148_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_1_OUT_CH1", AFE_CONN148_6, + I_SRC_1_OUT_CH1, 1, 0), +}; + +static const struct snd_kcontrol_new mtk_i2sout6_ch2_mix[] = { + SOC_DAPM_SINGLE_AUTODISABLE("DL0_CH2", AFE_CONN149_1, I_DL0_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN149_1, I_DL1_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN149_1, I_DL2_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN149_1, I_DL3_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2", AFE_CONN149_1, I_DL4_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2", AFE_CONN149_1, I_DL5_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2", AFE_CONN149_1, I_DL6_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL7_CH2", AFE_CONN149_1, I_DL7_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2", AFE_CONN149_1, I_DL8_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL23_CH2", AFE_CONN149_2, I_DL23_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("DL_24CH_CH2", AFE_CONN149_1, I_DL_24CH_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN0_OUT_CH2", AFE_CONN149_0, + I_GAIN0_OUT_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN149_0, + I_ADDA_UL_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH1", AFE_CONN149_4, + I_PCM_0_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_0_CAP_CH2", AFE_CONN149_4, + I_PCM_0_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN149_4, + I_PCM_1_CAP_CH1, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN149_4, + I_PCM_1_CAP_CH2, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_1_OUT_CH2", AFE_CONN148_6, + I_SRC_1_OUT_CH2, 1, 0), +}; + +enum { + SUPPLY_SEQ_APLL, + SUPPLY_SEQ_I2S_MCLK_EN, + SUPPLY_SEQ_I2S_CG_EN, + SUPPLY_SEQ_I2S_EN, +}; + +static int mtk_apll_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + + dev_dbg(cmpnt->dev, "name %s, event 0x%x\n", w->name, event); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + if (strcmp(w->name, APLL1_W_NAME) == 0) + mt8196_apll1_enable(afe); + else + mt8196_apll2_enable(afe); + break; + case SND_SOC_DAPM_POST_PMD: + if (strcmp(w->name, APLL1_W_NAME) == 0) + mt8196_apll1_disable(afe); + else + mt8196_apll2_disable(afe); + break; + default: + break; + } + + return 0; +} + +static int mtk_mclk_en_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mtk_afe_i2s_priv *i2s_priv; + + dev_dbg(cmpnt->dev, "name %s, event 0x%x\n", w->name, event); + + i2s_priv = get_i2s_priv_by_name(afe, w->name); + + if (!i2s_priv) + return -EINVAL; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_mck_enable(afe, i2s_priv->mclk_id, i2s_priv->mclk_rate); + break; + case SND_SOC_DAPM_POST_PMD: + i2s_priv->mclk_rate = 0; + mt8196_mck_disable(afe, i2s_priv->mclk_id); + break; + default: + break; + } + + return 0; +} + +static const struct snd_soc_dapm_widget mtk_dai_i2s_widgets[] = { + SND_SOC_DAPM_MIXER("I2SOUT0_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout0_ch1_mix, + ARRAY_SIZE(mtk_i2sout0_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT0_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout0_ch2_mix, + ARRAY_SIZE(mtk_i2sout0_ch2_mix)), + + SND_SOC_DAPM_MIXER("I2SOUT1_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout1_ch1_mix, + ARRAY_SIZE(mtk_i2sout1_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT1_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout1_ch2_mix, + ARRAY_SIZE(mtk_i2sout1_ch2_mix)), + + SND_SOC_DAPM_MIXER("I2SOUT2_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout2_ch1_mix, + ARRAY_SIZE(mtk_i2sout2_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT2_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout2_ch2_mix, + ARRAY_SIZE(mtk_i2sout2_ch2_mix)), + + SND_SOC_DAPM_MIXER("I2SOUT3_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout3_ch1_mix, + ARRAY_SIZE(mtk_i2sout3_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT3_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout3_ch2_mix, + ARRAY_SIZE(mtk_i2sout3_ch2_mix)), + + SND_SOC_DAPM_MIXER("I2SOUT4_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch1_mix, + ARRAY_SIZE(mtk_i2sout4_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch2_mix, + ARRAY_SIZE(mtk_i2sout4_ch2_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH3", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch3_mix, + ARRAY_SIZE(mtk_i2sout4_ch3_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH4", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch4_mix, + ARRAY_SIZE(mtk_i2sout4_ch4_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH5", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch5_mix, + ARRAY_SIZE(mtk_i2sout4_ch5_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH6", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch6_mix, + ARRAY_SIZE(mtk_i2sout4_ch6_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH7", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch7_mix, + ARRAY_SIZE(mtk_i2sout4_ch7_mix)), + SND_SOC_DAPM_MIXER("I2SOUT4_CH8", SND_SOC_NOPM, 0, 0, + mtk_i2sout4_ch8_mix, + ARRAY_SIZE(mtk_i2sout4_ch8_mix)), + + SND_SOC_DAPM_MIXER("I2SOUT6_CH1", SND_SOC_NOPM, 0, 0, + mtk_i2sout6_ch1_mix, + ARRAY_SIZE(mtk_i2sout6_ch1_mix)), + SND_SOC_DAPM_MIXER("I2SOUT6_CH2", SND_SOC_NOPM, 0, 0, + mtk_i2sout6_ch2_mix, + ARRAY_SIZE(mtk_i2sout6_ch2_mix)), + + /* i2s en*/ + SND_SOC_DAPM_SUPPLY_S("I2SIN0_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN0_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN1_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN1_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN2_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN2_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN3_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN3_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN4_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN4_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN6_EN", SUPPLY_SEQ_I2S_EN, + ETDM_IN6_CON0, REG_ETDM_IN_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT0_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT0_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT1_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT1_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT2_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT2_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT3_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT3_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT4_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT4_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT6_EN", SUPPLY_SEQ_I2S_EN, + ETDM_OUT6_CON0, OUT_REG_ETDM_OUT_EN_SFT, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("FMI2S_MASTER_EN", SUPPLY_SEQ_I2S_EN, + AFE_CONNSYS_I2S_CON, I2S_EN_SFT, 0, + NULL, 0), + + /* i2s mclk en */ + SND_SOC_DAPM_SUPPLY_S(I2SIN0_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SIN1_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SIN2_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SIN3_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SIN4_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SIN6_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT0_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT1_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT2_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT3_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT4_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(I2SOUT6_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(FMI2S_MASTER_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN, + SND_SOC_NOPM, 0, 0, + mtk_mclk_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + /* cg */ + SND_SOC_DAPM_SUPPLY_S("I2SOUT0_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT0_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT1_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT1_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT2_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT2_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT3_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT3_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT4_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT4_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SOUT6_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_OUT6_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN0_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN0_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN1_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN1_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN2_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN2_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN3_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN3_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN4_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN4_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("I2SIN6_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON2, PDN_ETDM_IN6_SFT, 1, + NULL, 0), + SND_SOC_DAPM_SUPPLY_S("FMI2S_MASTER_CG", SUPPLY_SEQ_I2S_CG_EN, + AUDIO_TOP_CON0, PDN_FM_I2S_SFT, 1, + NULL, 0), + + /* apll */ + SND_SOC_DAPM_SUPPLY_S(APLL1_W_NAME, SUPPLY_SEQ_APLL, + SND_SOC_NOPM, 0, 0, + mtk_apll_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY_S(APLL2_W_NAME, SUPPLY_SEQ_APLL, + SND_SOC_NOPM, 0, 0, + mtk_apll_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_MIXER("SOF_DMA_DL_24CH", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("SOF_DMA_DL1", SND_SOC_NOPM, 0, 0, NULL, 0), +}; + +static int mtk_afe_i2s_share_connect(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(sink->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mtk_afe_i2s_priv *i2s_priv; + + i2s_priv = get_i2s_priv_by_name(afe, sink->name); + if (!i2s_priv) + return 0; + + if (i2s_priv->share_i2s_id < 0) + return 0; + + return i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name); +} + +static int mtk_afe_i2s_apll_connect(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(sink->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mtk_afe_i2s_priv *i2s_priv; + int cur_apll; + int needed_apll; + + i2s_priv = get_i2s_priv_by_name(afe, sink->name); + if (!i2s_priv) + return 0; + + /* which apll */ + cur_apll = mt8196_get_apll_by_name(afe, source->name); + + /* choose APLL from i2s rate */ + needed_apll = mt8196_get_apll_by_rate(afe, i2s_priv->rate); + + return needed_apll == cur_apll; +} + +static int mtk_afe_i2s_mclk_connect(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(sink->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mtk_afe_i2s_priv *i2s_priv; + int i2s_num; + + i2s_priv = get_i2s_priv_by_name(afe, sink->name); + if (!i2s_priv) + return 0; + + i2s_num = get_i2s_id_by_name(afe, source->name); + if (get_i2s_id_by_name(afe, sink->name) == i2s_num) + return i2s_priv->mclk_rate > 0; + + /* check if share i2s need mclk */ + if (i2s_priv->share_i2s_id < 0) + return 0; + + if (i2s_priv->share_i2s_id == i2s_num) + return i2s_priv->mclk_rate > 0; + + return 0; +} + +static int mtk_afe_mclk_apll_connect(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(sink->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mtk_afe_i2s_priv *i2s_priv; + int cur_apll; + + i2s_priv = get_i2s_priv_by_name(afe, sink->name); + if (!i2s_priv) + return 0; + + /* which apll */ + cur_apll = mt8196_get_apll_by_name(afe, source->name); + + return i2s_priv->mclk_apll == cur_apll; +} + +static const struct snd_soc_dapm_route mtk_dai_i2s_routes[] = { + /* i2sin0 */ + {"I2SIN0", NULL, "I2SIN0_EN"}, + {"I2SIN0", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN0", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN0", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN0", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN0", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN0", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + + {I2SIN0_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN0_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN0", NULL, "I2SIN0_CG"}, + + /* i2sin1 */ + {"I2SIN1", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SIN1_EN"}, + {"I2SIN1", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN1", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN1", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN1", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN1", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN1", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SIN1_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN1_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN1", NULL, "I2SIN1_CG"}, + + /* i2sin2 */ + {"I2SIN2", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SIN2_EN"}, + {"I2SIN2", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN2", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN2", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN2", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN2", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN2", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SIN2_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN2_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN2", NULL, "I2SIN2_CG"}, + + /* i2sin3 */ + {"I2SIN3", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SIN3_EN"}, + {"I2SIN3", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN3", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN3", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN3", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN3", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN3", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SIN3_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN3_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN3", NULL, "I2SIN3_CG"}, + + /* i2sin4 */ + {"I2SIN4", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SIN4_EN"}, + {"I2SIN4", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN4", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN4", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN4", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN4", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN4", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SIN4_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN4_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN4", NULL, "I2SIN4_CG"}, + + /* i2sin6 */ + {"I2SIN6", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SIN6_EN"}, + {"I2SIN6", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SIN6", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SIN6", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SIN6", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SIN6", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SIN6", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SIN6_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SIN6_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SIN6", NULL, "I2SIN6_CG"}, + {"I2SIN6", NULL, "I2SOUT6_CG"}, + + /* i2sout0 */ + {"I2SOUT0_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT0_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT0_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT0_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT0_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT0_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT0_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT0_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT0_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT0_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT0_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT0_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT0_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT0_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT0_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT0_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT0_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT0_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT0_CH1", "DL23_CH1", "DL23"}, + {"I2SOUT0_CH2", "DL23_CH2", "DL23"}, + {"I2SOUT0_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT0_CH2", "DL_24CH_CH2", "DL_24CH"}, + + {"I2SOUT0_CH1", "DL24_CH1", "DL24"}, + {"I2SOUT0_CH2", "DL24_CH2", "DL24"}, + + {"I2SOUT0", NULL, "I2SOUT0_CH1"}, + {"I2SOUT0", NULL, "I2SOUT0_CH2"}, + + {"I2SOUT0", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SOUT0_EN"}, + {"I2SOUT0", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT0", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT0", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT0", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT0", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT0", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT0_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT0_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SOUT0", NULL, "I2SOUT0_CG"}, + {"I2SOUT0", NULL, "I2SIN0_CG"}, + + /* i2sout1 */ + {"I2SOUT1_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT1_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT1_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT1_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT1_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT1_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT1_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT1_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT1_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT1_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT1_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT1_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT1_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT1_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT1_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT1_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT1_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT1_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT1_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT1_CH2", "DL_24CH_CH2", "DL_24CH"}, + + {"I2SOUT1", NULL, "I2SOUT1_CH1"}, + {"I2SOUT1", NULL, "I2SOUT1_CH2"}, + + {"I2SOUT1", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SOUT1_EN"}, + {"I2SOUT1", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT1", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT1", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT1", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT1", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT1", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT1_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT1_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SOUT1", NULL, "I2SOUT1_CG"}, + {"I2SOUT1", NULL, "I2SIN1_CG"}, + + /* i2sout2 */ + {"I2SOUT2_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT2_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT2_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT2_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT2_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT2_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT2_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT2_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT2_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT2_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT2_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT2_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT2_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT2_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT2_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT2_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT2_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT2_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT2_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT2_CH2", "DL_24CH_CH2", "DL_24CH"}, + + {"I2SOUT2", NULL, "I2SOUT2_CH1"}, + {"I2SOUT2", NULL, "I2SOUT2_CH2"}, + + {"I2SOUT2", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SOUT2_EN"}, + {"I2SOUT2", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT2", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT2", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT2", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT2", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT2", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT2_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT2_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SOUT2", NULL, "I2SOUT2_CG"}, + {"I2SOUT2", NULL, "I2SIN2_CG"}, + + /* i2sout3 */ + {"I2SOUT3_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT3_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT3_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT3_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT3_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT3_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT3_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT3_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT3_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT3_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT3_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT3_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT3_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT3_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT3_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT3_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT3_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT3_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT3_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT3_CH2", "DL_24CH_CH2", "DL_24CH"}, + + {"I2SOUT3", NULL, "I2SOUT3_CH1"}, + {"I2SOUT3", NULL, "I2SOUT3_CH2"}, + + {"I2SOUT3", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SOUT3_EN"}, + {"I2SOUT3", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT3", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT3", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT3", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT3", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT3", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT3_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT3_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + {"I2SOUT3", NULL, "I2SOUT3_CG"}, + {"I2SOUT3", NULL, "I2SIN3_CG"}, + + /* i2sout4 */ + {"I2SOUT4_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT4_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT4_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT4_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT4_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT4_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT4_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT4_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT4_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT4_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT4_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT4_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT4_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT4_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT4_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT4_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT4_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT4_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT4_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT4_CH2", "DL_24CH_CH2", "DL_24CH"}, + {"I2SOUT4_CH3", "DL_24CH_CH3", "DL_24CH"}, + {"I2SOUT4_CH4", "DL_24CH_CH4", "DL_24CH"}, + {"I2SOUT4_CH5", "DL_24CH_CH5", "DL_24CH"}, + {"I2SOUT4_CH6", "DL_24CH_CH6", "DL_24CH"}, + {"I2SOUT4_CH7", "DL_24CH_CH7", "DL_24CH"}, + {"I2SOUT4_CH8", "DL_24CH_CH8", "DL_24CH"}, + {"I2SOUT4_CH1", "DL24_CH1", "DL24"}, + {"I2SOUT4_CH2", "DL24_CH2", "DL24"}, + + /* SOF Downlink */ + {"I2SOUT4_CH1", "DL_24CH_CH1", "SOF_DMA_DL_24CH"}, + {"I2SOUT4_CH2", "DL_24CH_CH2", "SOF_DMA_DL_24CH"}, + {"I2SOUT4_CH3", "DL_24CH_CH3", "SOF_DMA_DL_24CH"}, + {"I2SOUT4_CH4", "DL_24CH_CH4", "SOF_DMA_DL_24CH"}, + + {"I2SOUT4", NULL, "I2SOUT4_CH1"}, + {"I2SOUT4", NULL, "I2SOUT4_CH2"}, + {"I2SOUT4", NULL, "I2SOUT4_CH3"}, + {"I2SOUT4", NULL, "I2SOUT4_CH4"}, + {"I2SOUT4", NULL, "I2SOUT4_CH5"}, + {"I2SOUT4", NULL, "I2SOUT4_CH6"}, + {"I2SOUT4", NULL, "I2SOUT4_CH7"}, + {"I2SOUT4", NULL, "I2SOUT4_CH8"}, + + {"I2SOUT4", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "I2SOUT4_EN"}, + {"I2SOUT4", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT4", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT4", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT4", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT4", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT4", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT4_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT4_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + /* CG */ + {"I2SOUT4", NULL, "I2SOUT4_CG"}, + {"I2SOUT4", NULL, "I2SIN4_CG"}, + + /* i2sout6 */ + {"I2SOUT6_CH1", "DL0_CH1", "DL0"}, + {"I2SOUT6_CH2", "DL0_CH2", "DL0"}, + {"I2SOUT6_CH1", "DL1_CH1", "DL1"}, + {"I2SOUT6_CH2", "DL1_CH2", "DL1"}, + {"I2SOUT6_CH1", "DL2_CH1", "DL2"}, + {"I2SOUT6_CH2", "DL2_CH2", "DL2"}, + {"I2SOUT6_CH1", "DL3_CH1", "DL3"}, + {"I2SOUT6_CH2", "DL3_CH2", "DL3"}, + {"I2SOUT6_CH1", "DL4_CH1", "DL4"}, + {"I2SOUT6_CH2", "DL4_CH2", "DL4"}, + {"I2SOUT6_CH1", "DL5_CH1", "DL5"}, + {"I2SOUT6_CH2", "DL5_CH2", "DL5"}, + {"I2SOUT6_CH1", "DL6_CH1", "DL6"}, + {"I2SOUT6_CH2", "DL6_CH2", "DL6"}, + {"I2SOUT6_CH1", "DL7_CH1", "DL7"}, + {"I2SOUT6_CH2", "DL7_CH2", "DL7"}, + {"I2SOUT6_CH1", "DL8_CH1", "DL8"}, + {"I2SOUT6_CH2", "DL8_CH2", "DL8"}, + {"I2SOUT6_CH1", "DL23_CH1", "DL23"}, + {"I2SOUT6_CH2", "DL23_CH2", "DL23"}, + {"I2SOUT6_CH1", "DL_24CH_CH1", "DL_24CH"}, + {"I2SOUT6_CH2", "DL_24CH_CH2", "DL_24CH"}, + + /* SOF Downlink */ + {"I2SOUT6_CH1", "DL1_CH1", "SOF_DMA_DL1"}, + {"I2SOUT6_CH2", "DL1_CH2", "SOF_DMA_DL1"}, + {"I2SOUT6_CH1", "DL_24CH_CH1", "SOF_DMA_DL_24CH"}, + {"I2SOUT6_CH2", "DL_24CH_CH2", "SOF_DMA_DL_24CH"}, + + {"I2SOUT6", NULL, "I2SOUT6_CH1"}, + {"I2SOUT6", NULL, "I2SOUT6_CH2"}, + + {"I2SOUT6", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"I2SOUT6", NULL, "I2SOUT6_EN"}, + {"I2SOUT6", NULL, "FMI2S_MASTER_EN", mtk_afe_i2s_share_connect}, + + {"I2SOUT6", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"I2SOUT6", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"I2SOUT6", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"I2SOUT6", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {I2SOUT6_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {I2SOUT6_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + /* CG */ + {"I2SOUT6", NULL, "I2SOUT6_CG"}, + {"I2SOUT6", NULL, "I2SIN6_CG"}, + + /* fmi2s */ + {"FMI2S_MASTER", NULL, "I2SIN0_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SIN1_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SIN2_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SIN3_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SIN4_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SIN6_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT0_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT1_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT2_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT3_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT4_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "I2SOUT6_EN", mtk_afe_i2s_share_connect}, + {"FMI2S_MASTER", NULL, "FMI2S_MASTER_EN"}, + + {"FMI2S_MASTER", NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect}, + {"FMI2S_MASTER", NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect}, + + {"FMI2S_MASTER", NULL, I2SIN0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SIN1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SIN2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SIN3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SIN4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SIN6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT4_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, I2SOUT6_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {"FMI2S_MASTER", NULL, FMI2S_MASTER_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect}, + {FMI2S_MASTER_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect}, + {FMI2S_MASTER_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect}, + /* CG */ + {"FMI2S_MASTER", NULL, "FMI2S_MASTER_CG"}, +}; + +/* i2s dai ops*/ +static int mtk_dai_i2s_config(struct mtk_base_afe *afe, + struct snd_pcm_hw_params *params, + int i2s_id) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_i2s_priv *i2s_priv; + struct mtk_afe_i2s_priv *i2sin_priv = NULL; + int id = i2s_id - MT8196_DAI_I2S_IN0; + struct mtk_base_etdm_data etdm_data; + unsigned int rate = params_rate(params); + unsigned int rate_reg = get_etdm_inconn_rate(rate); + snd_pcm_format_t format = params_format(params); + unsigned int channels = params_channels(params); + int ret; + unsigned int i2s_con; + int pad_top; + + if (i2s_id >= MT8196_DAI_NUM || i2s_id < 0 || id < 0 || id >= DAI_I2S_NUM) + return -EINVAL; + + i2s_priv = afe_priv->dai_priv[i2s_id]; + if (!i2s_priv) + return -EINVAL; + + dev_dbg(afe->dev, "id: %d, rate: %d, pcm_fmt: %d, fmt: %d, ch: %d\n", + i2s_id, rate, format, i2s_priv->format, channels); + + i2s_priv->rate = rate; + etdm_data = mtk_etdm_data[id]; + + if (is_etdm_in_pad_top(id)) + pad_top = 0x3; + else + pad_top = 0x5; + + switch (id) { + case DAI_FMI2S_MASTER: + i2s_con = I2S_IN_PAD_IO_MUX << I2SIN_PAD_SEL_SFT; + i2s_con |= rate_reg << I2S_MODE_SFT; + i2s_con |= I2S_FMT_I2S << I2S_FMT_SFT; + i2s_con |= get_i2s_wlen(format) << I2S_WLEN_SFT; + regmap_update_bits(afe->regmap, AFE_CONNSYS_I2S_CON, + 0xffffeffe, i2s_con); + break; + + case DAI_I2SIN0: + case DAI_I2SIN1: + case DAI_I2SIN2: + case DAI_I2SIN3: + case DAI_I2SIN4: + case DAI_I2SIN6: + /* ---etdm in --- */ + regmap_update_bits(afe->regmap, + etdm_data.init_count_reg, + etdm_data.init_count_mask << etdm_data.init_count_shift, + 0x5 << etdm_data.init_count_shift); + + /* 3: pad top 5: no pad top */ + regmap_update_bits(afe->regmap, + etdm_data.init_point_reg, + etdm_data.init_point_mask << etdm_data.init_point_shift, + pad_top << etdm_data.init_point_shift); + + regmap_update_bits(afe->regmap, + etdm_data.lrck_reset_reg, + etdm_data.lrck_reset_mask << etdm_data.lrck_reset_shift, + 0x1 << etdm_data.lrck_reset_shift); + + regmap_update_bits(afe->regmap, + etdm_data.clk_source_reg, + etdm_data.clk_source_mask << etdm_data.clk_source_shift, + ETDM_CLK_SOURCE_APLL << etdm_data.clk_source_shift); + + /* 0: manual 1: auto */ + regmap_update_bits(afe->regmap, + etdm_data.ck_en_sel_reg, + etdm_data.ck_en_sel_mask << etdm_data.ck_en_sel_shift, + 0x1 << etdm_data.ck_en_sel_shift); + + regmap_update_bits(afe->regmap, + etdm_data.fs_timing_reg, + etdm_data.fs_timing_mask << etdm_data.fs_timing_shift, + get_etdm_rate(rate) << etdm_data.fs_timing_shift); + + regmap_update_bits(afe->regmap, + etdm_data.relatch_en_sel_reg, + etdm_data.relatch_en_sel_mask << etdm_data.relatch_en_sel_shift, + get_etdm_inconn_rate(rate) << etdm_data.relatch_en_sel_shift); + + regmap_update_bits(afe->regmap, + etdm_data.use_afifo_reg, + etdm_data.use_afifo_mask << etdm_data.use_afifo_shift, + 0x0); + + regmap_update_bits(afe->regmap, + etdm_data.afifo_mode_reg, + etdm_data.afifo_mode_mask << etdm_data.afifo_mode_shift, + 0x0); + + regmap_update_bits(afe->regmap, + etdm_data.almost_end_ch_reg, + etdm_data.almost_end_ch_mask << etdm_data.almost_end_ch_shift, + 0x0); + + regmap_update_bits(afe->regmap, + etdm_data.almost_end_bit_reg, + etdm_data.almost_end_bit_mask << etdm_data.almost_end_bit_shift, + 0x0); + + if (is_etdm_in_pad_top(id)) { + regmap_update_bits(afe->regmap, + etdm_data.out2latch_time_reg, + etdm_data.out2latch_time_mask << + etdm_data.out2latch_time_shift, + 0x6 << etdm_data.out2latch_time_shift); + } else { + regmap_update_bits(afe->regmap, + etdm_data.out2latch_time_reg, + etdm_data.out2latch_time_mask << + etdm_data.out2latch_time_shift, + 0x4 << etdm_data.out2latch_time_shift); + } + + if (id == DAI_I2SIN4) { + dev_dbg(afe->dev, "i2sin4, id: %d, fmt: %d, ch: %d, ip_mode: %d, sync: %d\n", + id, i2s_priv->format, channels, i2s_priv->ip_mode, i2s_priv->sync); + + /* Fmt Mode: 0x00 i2s, 0x04 adsp_a, DSP_A mode for multi-channel */ + regmap_update_bits(afe->regmap, + etdm_data.tdm_mode_reg, + etdm_data.tdm_mode_mask << etdm_data.tdm_mode_shift, + i2s_priv->format << etdm_data.tdm_mode_shift); + + /* set etdm ch */ + regmap_update_bits(afe->regmap, + etdm_data.ch_reg, + etdm_data.ch_mask << etdm_data.ch_shift, + (channels - 1) << etdm_data.ch_shift); + + /* set etdm ip mode */ + regmap_update_bits(afe->regmap, + etdm_data.ip_mode_reg, + etdm_data.ip_mode_mask << etdm_data.ip_mode_shift, + i2s_priv->ip_mode << etdm_data.ip_mode_shift); + + /* set etdm sync */ + regmap_update_bits(afe->regmap, + etdm_data.sync_reg, + etdm_data.sync_mask << etdm_data.sync_shift, + i2s_priv->sync << etdm_data.sync_shift); + } else { + /* default i2s */ + regmap_update_bits(afe->regmap, + etdm_data.tdm_mode_reg, + etdm_data.tdm_mode_mask << etdm_data.tdm_mode_shift, + 0x0 << etdm_data.tdm_mode_shift); + + /* set etdm sync */ + regmap_update_bits(afe->regmap, + etdm_data.sync_reg, + etdm_data.sync_mask << etdm_data.sync_shift, + 0x0 << etdm_data.sync_shift); + } + + /* APLL */ + regmap_update_bits(afe->regmap, + etdm_data.relatch_domain_sel_reg, + etdm_data.relatch_domain_sel_mask << + etdm_data.relatch_domain_sel_shift, + ETDM_RELATCH_SEL_APLL << etdm_data.relatch_domain_sel_shift); + + regmap_update_bits(afe->regmap, + etdm_data.bit_length_reg, + etdm_data.bit_length_mask << etdm_data.bit_length_shift, + get_etdm_lrck_width(format) << etdm_data.bit_length_shift); + + regmap_update_bits(afe->regmap, + etdm_data.word_length_reg, + etdm_data.word_length_mask << etdm_data.word_length_shift, + get_etdm_wlen(format) << etdm_data.word_length_shift); + + /* ---etdm cowork --- */ + regmap_update_bits(afe->regmap, + etdm_data.cowork_reg, + etdm_data.cowork_mask << etdm_data.cowork_shift, + etdm_data.cowork_val << etdm_data.cowork_shift); + + /* i2s with pad top setting */ + if (is_etdm_in_pad_top(id) && etdm_data.pad_top_ck_en_reg != 0) { + regmap_update_bits(afe->regmap, + etdm_data.pad_top_ck_en_reg, + etdm_data.pad_top_ck_en_mask << + etdm_data.pad_top_ck_en_shift, + 0x1 << etdm_data.pad_top_ck_en_shift); + + regmap_update_bits(afe->regmap, + etdm_data.master_latch_reg, + etdm_data.master_latch_mask << + etdm_data.master_latch_shift, + 0x0); + } + break; + + case DAI_I2SOUT0: + case DAI_I2SOUT1: + case DAI_I2SOUT2: + case DAI_I2SOUT3: + case DAI_I2SOUT4: + case DAI_I2SOUT6: + /* ---etdm out --- */ + regmap_update_bits(afe->regmap, + etdm_data.init_count_reg, + etdm_data.init_count_mask << etdm_data.init_count_shift, + 0x5 << etdm_data.init_count_shift); + + regmap_update_bits(afe->regmap, + etdm_data.init_point_reg, + etdm_data.init_point_mask << etdm_data.init_point_shift, + 0x6 << etdm_data.init_point_shift); + + // clock speed > 22M need to set relatch time to avoid duplicate porint + if (rate * channels * (get_etdm_wlen(format) + 1) >= ETDM_22M_CLOCK_THRES && + get_etdm_wlen(format) >= 2) { + regmap_update_bits(afe->regmap, + etdm_data.in2latch_time_reg, + etdm_data.in2latch_time_mask << + etdm_data.in2latch_time_shift, + (get_etdm_wlen(format) - 2) << + etdm_data.in2latch_time_shift); + } else { + regmap_update_bits(afe->regmap, + etdm_data.in2latch_time_reg, + etdm_data.in2latch_time_mask << + etdm_data.in2latch_time_shift, + 0x6 << etdm_data.in2latch_time_shift); + } + + regmap_update_bits(afe->regmap, + etdm_data.lrck_reset_reg, + etdm_data.lrck_reset_mask << etdm_data.lrck_reset_shift, + 0x1 << etdm_data.lrck_reset_shift); + + regmap_update_bits(afe->regmap, + etdm_data.fs_timing_reg, + etdm_data.fs_timing_mask << etdm_data.fs_timing_shift, + get_etdm_rate(rate) << etdm_data.fs_timing_shift); + + regmap_update_bits(afe->regmap, + etdm_data.clk_source_reg, + etdm_data.clk_source_mask << etdm_data.clk_source_shift, + ETDM_CLK_SOURCE_APLL << etdm_data.clk_source_shift); + + regmap_update_bits(afe->regmap, + etdm_data.relatch_en_sel_reg, + etdm_data.relatch_en_sel_mask << etdm_data.relatch_en_sel_shift, + get_etdm_inconn_rate(rate) << etdm_data.relatch_en_sel_shift); + + if (id == DAI_I2SOUT4) { + dev_dbg(afe->dev, "i2sout4, id: %d fmt: %d, ch: %d, sync: %d\n", + id, i2s_priv->format, channels, i2s_priv->sync); + + /* Fmt Mode: 0x00 i2s, 0x04 adsp_a, DSP_A mode for multi-channel */ + regmap_update_bits(afe->regmap, + etdm_data.tdm_mode_reg, + etdm_data.tdm_mode_mask << etdm_data.tdm_mode_shift, + i2s_priv->format << etdm_data.tdm_mode_shift); + + /* set etdm ch */ + regmap_update_bits(afe->regmap, + etdm_data.ch_reg, + etdm_data.ch_mask << etdm_data.ch_shift, + (channels - 1) << etdm_data.ch_shift); + + /* set etdm sync */ + regmap_update_bits(afe->regmap, + etdm_data.sync_reg, + etdm_data.sync_mask << etdm_data.sync_shift, + i2s_priv->sync << etdm_data.sync_shift); + } else { + regmap_update_bits(afe->regmap, + etdm_data.tdm_mode_reg, + etdm_data.tdm_mode_mask << etdm_data.tdm_mode_shift, + 0x0); + } + + /* APLL */ + regmap_update_bits(afe->regmap, + etdm_data.relatch_domain_sel_reg, + etdm_data.relatch_domain_sel_mask << + etdm_data.relatch_domain_sel_shift, + ETDM_RELATCH_SEL_APLL << etdm_data.relatch_domain_sel_shift); + + regmap_update_bits(afe->regmap, + etdm_data.bit_length_reg, + etdm_data.bit_length_mask << etdm_data.bit_length_shift, + get_etdm_lrck_width(format) << etdm_data.bit_length_shift); + + regmap_update_bits(afe->regmap, + etdm_data.word_length_reg, + etdm_data.word_length_mask << etdm_data.word_length_shift, + get_etdm_wlen(format) << etdm_data.word_length_shift); + + /* ---etdm cowork --- */ + regmap_update_bits(afe->regmap, + etdm_data.cowork_reg, + etdm_data.cowork_mask << etdm_data.cowork_shift, + etdm_data.cowork_val << etdm_data.cowork_shift); + + /* i2s with pad top setting */ + if (is_etdm_in_pad_top(id) && etdm_data.pad_top_ck_en_reg != 0) { + regmap_update_bits(afe->regmap, + etdm_data.pad_top_ck_en_reg, + etdm_data.cowork_mask << etdm_data.pad_top_ck_en_shift, + 0x1 << etdm_data.pad_top_ck_en_shift); + + regmap_update_bits(afe->regmap, + etdm_data.master_latch_reg, + etdm_data.master_latch_mask << + etdm_data.master_latch_shift, + 0x0); + } + break; + + default: + dev_err(afe->dev, "id %d not support\n", id); + return -EINVAL; + } + + /* set share i2s */ + if (i2s_priv && i2s_priv->share_i2s_id >= 0) { + i2sin_priv = afe_priv->dai_priv[i2s_priv->share_i2s_id]; + i2sin_priv->format = i2s_priv->format; + ret = mtk_dai_i2s_config(afe, params, i2s_priv->share_i2s_id); + if (ret) + return ret; + } + + return 0; +} + +static int mtk_dai_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + + return mtk_dai_i2s_config(afe, params, dai->id); +} + +static int mtk_dai_i2s_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_i2s_priv *i2s_priv; + int apll; + int apll_rate; + + if (dai->id >= MT8196_DAI_NUM || dai->id < 0 || dir != SND_SOC_CLOCK_OUT) + return -EINVAL; + + i2s_priv = afe_priv->dai_priv[dai->id]; + if (!i2s_priv) + return -EINVAL; + + dev_dbg(afe->dev, "freq: %u\n", freq); + + apll = mt8196_get_apll_by_rate(afe, freq); + apll_rate = mt8196_get_apll_rate(afe, apll); + + if (freq > apll_rate || apll_rate % freq) + return -EINVAL; + + i2s_priv->mclk_rate = freq; + i2s_priv->mclk_apll = apll; + + if (i2s_priv->share_i2s_id > 0) { + struct mtk_afe_i2s_priv *share_i2s_priv; + + share_i2s_priv = afe_priv->dai_priv[i2s_priv->share_i2s_id]; + if (!share_i2s_priv) + return -EINVAL; + + share_i2s_priv->mclk_rate = i2s_priv->mclk_rate; + share_i2s_priv->mclk_apll = i2s_priv->mclk_apll; + } + + return 0; +} + +static int mtk_dai_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_i2s_priv *i2s_priv; + + if (dai->id >= MT8196_DAI_NUM || dai->id < 0) + return -EINVAL; + + i2s_priv = afe_priv->dai_priv[dai->id]; + if (!i2s_priv) + return -EINVAL; + + dev_dbg(afe->dev, "dai->id: %d, fmt: 0x%x\n", dai->id, fmt); + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + i2s_priv->format = MTK_DAI_ETDM_FORMAT_I2S; + break; + case SND_SOC_DAIFMT_LEFT_J: + i2s_priv->format = MTK_DAI_ETDM_FORMAT_LJ; + break; + case SND_SOC_DAIFMT_RIGHT_J: + i2s_priv->format = MTK_DAI_ETDM_FORMAT_RJ; + break; + case SND_SOC_DAIFMT_DSP_A: + i2s_priv->format = MTK_DAI_ETDM_FORMAT_DSPA; + break; + case SND_SOC_DAIFMT_DSP_B: + i2s_priv->format = MTK_DAI_ETDM_FORMAT_DSPB; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct snd_soc_dai_ops mtk_dai_i2s_ops = { + .hw_params = mtk_dai_i2s_hw_params, + .set_sysclk = mtk_dai_i2s_set_sysclk, + .set_fmt = mtk_dai_i2s_set_fmt, +}; + +/* dai driver */ +#define MTK_ETDM_RATES (SNDRV_PCM_RATE_8000_384000) +#define MTK_ETDM_FORMATS (SNDRV_PCM_FMTBIT_S8 |\ + SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE) + +#define MT8196_I2S_DAI(_name, _id, max_ch, dir) \ +{ \ + .name = #_name, \ + .id = _id, \ + .dir = { \ + .stream_name = #_name, \ + .channels_min = 1, \ + .channels_max = max_ch, \ + .rates = MTK_ETDM_RATES, \ + .formats = MTK_ETDM_FORMATS, \ + }, \ + .ops = &mtk_dai_i2s_ops, \ +} + +static struct snd_soc_dai_driver mtk_dai_i2s_driver[] = { + /* capture */ + MT8196_I2S_DAI(I2SIN0, MT8196_DAI_I2S_IN0, 2, capture), + MT8196_I2S_DAI(I2SIN1, MT8196_DAI_I2S_IN1, 2, capture), + MT8196_I2S_DAI(I2SIN2, MT8196_DAI_I2S_IN2, 2, capture), + MT8196_I2S_DAI(I2SIN3, MT8196_DAI_I2S_IN3, 2, capture), + MT8196_I2S_DAI(I2SIN4, MT8196_DAI_I2S_IN4, 8, capture), + MT8196_I2S_DAI(I2SIN6, MT8196_DAI_I2S_IN6, 2, capture), + MT8196_I2S_DAI(FMI2S_MASTER, MT8196_DAI_FM_I2S_MASTER, 2, capture), + /* playback */ + MT8196_I2S_DAI(I2SOUT0, MT8196_DAI_I2S_OUT0, 2, playback), + MT8196_I2S_DAI(I2SOUT1, MT8196_DAI_I2S_OUT1, 2, playback), + MT8196_I2S_DAI(I2SOUT2, MT8196_DAI_I2S_OUT2, 2, playback), + MT8196_I2S_DAI(I2SOUT3, MT8196_DAI_I2S_OUT3, 2, playback), + MT8196_I2S_DAI(I2SOUT4, MT8196_DAI_I2S_OUT4, 8, playback), + MT8196_I2S_DAI(I2SOUT6, MT8196_DAI_I2S_OUT6, 2, playback), +}; + +static const struct mtk_afe_i2s_priv mt8196_i2s_priv[DAI_I2S_NUM] = { + [DAI_I2SIN0] = { + .id = MT8196_DAI_I2S_IN0, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sin0-share", + .share_i2s_id = -1, + }, + [DAI_I2SIN1] = { + .id = MT8196_DAI_I2S_IN1, + .mclk_id = MT8196_I2SIN1_MCK, + .share_property_name = "i2sin1-share", + .share_i2s_id = -1, + }, + [DAI_I2SIN2] = { + .id = MT8196_DAI_I2S_IN2, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sin2-share", + .share_i2s_id = -1, + }, + [DAI_I2SIN3] = { + .id = MT8196_DAI_I2S_IN3, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sin3-share", + .share_i2s_id = -1, + }, + [DAI_I2SIN4] = { + .id = MT8196_DAI_I2S_IN4, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sin4-share", + .share_i2s_id = -1, + .sync = 0, + .ip_mode = 0, + }, + [DAI_I2SIN6] = { + .id = MT8196_DAI_I2S_IN6, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout6-share", + .share_i2s_id = -1, + }, + [DAI_I2SOUT0] = { + .id = MT8196_DAI_I2S_OUT0, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout0-share", + .share_i2s_id = MT8196_DAI_I2S_IN0, + }, + [DAI_I2SOUT1] = { + .id = MT8196_DAI_I2S_OUT1, + .mclk_id = MT8196_I2SIN1_MCK, + .share_property_name = "i2sout1-share", + .share_i2s_id = MT8196_DAI_I2S_IN1, + }, + [DAI_I2SOUT2] = { + .id = MT8196_DAI_I2S_OUT2, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout2-share", + .share_i2s_id = MT8196_DAI_I2S_IN2, + }, + [DAI_I2SOUT3] = { + .id = MT8196_DAI_I2S_OUT3, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout3-share", + .share_i2s_id = MT8196_DAI_I2S_IN3, + }, + [DAI_I2SOUT4] = { + .id = MT8196_DAI_I2S_OUT4, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout4-share", + .share_i2s_id = MT8196_DAI_I2S_IN4, + .sync = 0, + }, + [DAI_I2SOUT6] = { + .id = MT8196_DAI_I2S_OUT6, + .mclk_id = MT8196_I2SIN0_MCK, + .share_property_name = "i2sout6-share", + .share_i2s_id = MT8196_DAI_I2S_IN6, + }, + [DAI_FMI2S_MASTER] = { + .id = MT8196_DAI_FM_I2S_MASTER, + .mclk_id = MT8196_FMI2S_MCK, + .share_property_name = "fmi2s-share", + .share_i2s_id = -1, + }, +}; + +static int mt8196_dai_i2s_get_share(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + const struct device_node *of_node = afe->dev->of_node; + + for (int i = 0; i < DAI_I2S_NUM; i++) { + const char *of_str; + struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[mt8196_i2s_priv[i].id]; + const char *property_name = mt8196_i2s_priv[i].share_property_name; + + if (of_property_read_string(of_node, property_name, &of_str)) + continue; + + i2s_priv->share_i2s_id = get_i2s_id_by_name(afe, of_str); + } + + return 0; +} + +static int init_i2s_priv_data(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_i2s_priv *i2s_priv; + + for (int i = 0; i < DAI_I2S_NUM; i++) { + int id = mt8196_i2s_priv[i].id; + size_t size = sizeof(struct mtk_afe_i2s_priv); + + if (id >= MT8196_DAI_NUM || id < 0) + return -EINVAL; + + i2s_priv = devm_kzalloc(afe->dev, size, GFP_KERNEL); + if (!i2s_priv) + return -ENOMEM; + + memcpy(i2s_priv, &mt8196_i2s_priv[i], size); + + afe_priv->dai_priv[id] = i2s_priv; + } + + return 0; +} + +int mt8196_dai_i2s_register(struct mtk_base_afe *afe) +{ + struct mtk_base_afe_dai *dai; + int ret; + + dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL); + if (!dai) + return -ENOMEM; + + dai->dai_drivers = mtk_dai_i2s_driver; + dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_i2s_driver); + + dai->controls = mtk_dai_i2s_controls; + dai->num_controls = ARRAY_SIZE(mtk_dai_i2s_controls); + dai->dapm_widgets = mtk_dai_i2s_widgets; + dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_i2s_widgets); + dai->dapm_routes = mtk_dai_i2s_routes; + dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_i2s_routes); + + /* set all dai i2s private data */ + ret = init_i2s_priv_data(afe); + if (ret) + return ret; + + /* parse share i2s */ + ret = mt8196_dai_i2s_get_share(afe); + if (ret) + return ret; + + list_add(&dai->list, &afe->sub_dais); + + return 0; +} diff --git a/sound/soc/mediatek/mt8196/mt8196-dai-tdm.c b/sound/soc/mediatek/mt8196/mt8196-dai-tdm.c new file mode 100644 index 00000000000000..b7aeee939d8886 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-dai-tdm.c @@ -0,0 +1,675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek ALSA SoC Audio DAI TDM Control + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include + +#include "mt8196-afe-clk.h" +#include "mt8196-afe-common.h" +#include "mt8196-interconnection.h" + +struct mtk_afe_tdm_priv { + int bck_id; + int bck_rate; + + int mclk_id; + int mclk_multiple; /* according to sample rate */ + int mclk_rate; + int mclk_apll; +}; + +enum { + TDM_WLEN_8_BIT, + TDM_WLEN_16_BIT, + TDM_WLEN_24_BIT, + TDM_WLEN_32_BIT, +}; + +enum { + TDM_CHANNEL_BCK_16, + TDM_CHANNEL_BCK_24, + TDM_CHANNEL_BCK_32, +}; + +enum { + TDM_CHANNEL_NUM_2, + TDM_CHANNEL_NUM_4, + TDM_CHANNEL_NUM_8, +}; + +enum { + TDM_CH_START_O30_O31, + TDM_CH_START_O32_O33, + TDM_CH_START_O34_O35, + TDM_CH_START_O36_O37, + TDM_CH_ZERO, +}; + +enum { + DPTX_CHANNEL_2, + DPTX_CHANNEL_8, +}; + +enum { + DPTX_WLEN_24_BIT, + DPTX_WLEN_16_BIT, +}; + +#define DPTX_CH_EN_MASK_2CH (0x3) +#define DPTX_CH_EN_MASK_4CH (0xf) +#define DPTX_CH_EN_MASK_6CH (0x3f) +#define DPTX_CH_EN_MASK_8CH (0xff) + +static unsigned int get_tdm_wlen(snd_pcm_format_t format) +{ + return (snd_pcm_format_physical_width(format) / 8) - 1; +} + +static unsigned int get_tdm_channel_bck(snd_pcm_format_t format) +{ + return snd_pcm_format_physical_width(format) <= 16 ? + TDM_CHANNEL_BCK_16 : TDM_CHANNEL_BCK_32; +} + +static unsigned int get_tdm_lrck_width(snd_pcm_format_t format) +{ + return snd_pcm_format_physical_width(format) - 1; +} + +static unsigned int get_tdm_ch(unsigned int ch) +{ + switch (ch) { + case 1: + case 2: + return TDM_CHANNEL_NUM_2; + case 3: + case 4: + return TDM_CHANNEL_NUM_4; + case 5: + case 6: + case 7: + case 8: + default: + return TDM_CHANNEL_NUM_8; + } +} + +static unsigned int get_dptx_ch_enable_mask(struct device *dev, unsigned int ch) +{ + switch (ch) { + case 1: + case 2: + return DPTX_CH_EN_MASK_2CH; + case 3: + case 4: + return DPTX_CH_EN_MASK_4CH; + case 5: + case 6: + return DPTX_CH_EN_MASK_6CH; + case 7: + case 8: + return DPTX_CH_EN_MASK_8CH; + default: + dev_warn(dev, "invalid channel num, default use 2ch\n"); + return DPTX_CH_EN_MASK_2CH; + } +} + +static unsigned int get_dptx_ch(unsigned int ch) +{ + if (ch == 2) + return DPTX_CHANNEL_2; + else + return DPTX_CHANNEL_8; +} + +static unsigned int get_dptx_wlen(snd_pcm_format_t format) +{ + return snd_pcm_format_physical_width(format) <= 16 ? + DPTX_WLEN_16_BIT : DPTX_WLEN_24_BIT; +} + +/* interconnection */ +enum { + HDMI_CONN_CH0, + HDMI_CONN_CH1, + HDMI_CONN_CH2, + HDMI_CONN_CH3, + HDMI_CONN_CH4, + HDMI_CONN_CH5, + HDMI_CONN_CH6, + HDMI_CONN_CH7, +}; + +static const char *const hdmi_conn_mux_map[] = { + "CH0", "CH1", "CH2", "CH3", "CH4", "CH5", "CH6", "CH7", +}; + +static int hdmi_conn_mux_map_value[] = { + HDMI_CONN_CH0, HDMI_CONN_CH1, HDMI_CONN_CH2, HDMI_CONN_CH3, + HDMI_CONN_CH4, HDMI_CONN_CH5, HDMI_CONN_CH6, HDMI_CONN_CH7, +}; + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch0_mux_map_enum, + AFE_HDMI_CONN0, HDMI_O_0_SFT, HDMI_O_0_MASK, + hdmi_conn_mux_map, hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch1_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_1_SFT, + HDMI_O_1_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch2_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_2_SFT, + HDMI_O_2_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch3_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_3_SFT, + HDMI_O_3_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch4_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_4_SFT, + HDMI_O_4_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch5_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_5_SFT, + HDMI_O_5_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch6_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_6_SFT, + HDMI_O_6_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch7_mux_map_enum, + AFE_HDMI_CONN0, + HDMI_O_7_SFT, + HDMI_O_7_MASK, + hdmi_conn_mux_map, + hdmi_conn_mux_map_value); + +static const struct snd_kcontrol_new mtk_dai_tdm_controls[] = { + SOC_ENUM("HDMI_CH0_MUX", hdmi_ch0_mux_map_enum), + SOC_ENUM("HDMI_CH1_MUX", hdmi_ch1_mux_map_enum), + SOC_ENUM("HDMI_CH2_MUX", hdmi_ch2_mux_map_enum), + SOC_ENUM("HDMI_CH3_MUX", hdmi_ch3_mux_map_enum), + SOC_ENUM("HDMI_CH4_MUX", hdmi_ch4_mux_map_enum), + SOC_ENUM("HDMI_CH5_MUX", hdmi_ch5_mux_map_enum), + SOC_ENUM("HDMI_CH6_MUX", hdmi_ch6_mux_map_enum), + SOC_ENUM("HDMI_CH7_MUX", hdmi_ch7_mux_map_enum), +}; + +static const char *const tdm_out_demux_texts[] = { + "NONE", "TDMOUT", "DPTXOUT", +}; + +static SOC_ENUM_SINGLE_DECL(tdm_out_demux_enum, + SND_SOC_NOPM, + 0, + tdm_out_demux_texts); +static const struct snd_kcontrol_new tdm_out_demux_control = + SOC_DAPM_ENUM("TDM DEMUX ROUTE", tdm_out_demux_enum); + +enum { + SUPPLY_SEQ_APLL, + SUPPLY_SEQ_TDM_MCK_EN, + SUPPLY_SEQ_TDM_BCK_EN, + SUPPLY_SEQ_TDM_DPTX_MCK_EN, + SUPPLY_SEQ_TDM_DPTX_BCK_EN, + SUPPLY_SEQ_TDM_CG_EN, +}; + +static int get_tdm_id_by_name(const char *name) +{ + if (strstr(name, "DPTX")) + return MT8196_DAI_TDM_DPTX; + else + return MT8196_DAI_TDM; +} + +static int mtk_tdm_bck_en_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int dai_id = get_tdm_id_by_name(w->name); + struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; + + dev_dbg(cmpnt->dev, "name %s, event 0x%x, dai_id %d\n", + w->name, event, dai_id); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_mck_enable(afe, tdm_priv->bck_id, tdm_priv->bck_rate); + break; + case SND_SOC_DAPM_POST_PMD: + mt8196_mck_disable(afe, tdm_priv->bck_id); + break; + default: + break; + } + + return 0; +} + +static int mtk_tdm_mck_en_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int dai_id = get_tdm_id_by_name(w->name); + struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; + + dev_dbg(cmpnt->dev, "name %s, event 0x%x, dai_id %d\n", + w->name, event, dai_id); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + mt8196_mck_enable(afe, tdm_priv->mclk_id, tdm_priv->mclk_rate); + break; + case SND_SOC_DAPM_POST_PMD: + tdm_priv->mclk_rate = 0; + mt8196_mck_disable(afe, tdm_priv->mclk_id); + break; + default: + break; + } + + return 0; +} + +static const struct snd_soc_dapm_widget mtk_dai_tdm_widgets[] = { + SND_SOC_DAPM_DEMUX("TDM_DEMUX", SND_SOC_NOPM, 0, 0, + &tdm_out_demux_control), + + SND_SOC_DAPM_SUPPLY_S("TDM_BCK", SUPPLY_SEQ_TDM_BCK_EN, + SND_SOC_NOPM, 0, 0, + mtk_tdm_bck_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY_S("TDM_MCK", SUPPLY_SEQ_TDM_MCK_EN, + SND_SOC_NOPM, 0, 0, + mtk_tdm_mck_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY_S("TDM_DPTX_BCK", SUPPLY_SEQ_TDM_DPTX_BCK_EN, + SND_SOC_NOPM, 0, 0, + mtk_tdm_bck_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY_S("TDM_DPTX_MCK", SUPPLY_SEQ_TDM_DPTX_MCK_EN, + SND_SOC_NOPM, 0, 0, + mtk_tdm_mck_en_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + /* cg */ + SND_SOC_DAPM_SUPPLY_S("TDM_CG", SUPPLY_SEQ_TDM_CG_EN, + AUDIO_TOP_CON2, PDN_TDM_OUT_SFT, 1, + NULL, 0), +}; + +static int mtk_afe_tdm_apll_connect(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(sink->dapm); + struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int dai_id = get_tdm_id_by_name(sink->name); + struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; + int cur_apll; + + /* which apll */ + cur_apll = mt8196_get_apll_by_name(afe, source->name); + + return (tdm_priv->mclk_apll == cur_apll) ? 1 : 0; +} + +static const struct snd_soc_dapm_route mtk_dai_tdm_routes[] = { + {"TDM_DEMUX", NULL, "HDMI"}, + + {"TDM", "TDMOUT", "TDM_DEMUX"}, + {"TDM", NULL, "TDM_BCK"}, + {"TDM", NULL, "TDM_CG"}, + + {"TDM_DPTX", "DPTXOUT", "TDM_DEMUX"}, + {"TDM_DPTX", NULL, "TDM_DPTX_BCK"}, + {"TDM_DPTX", NULL, "TDM_CG"}, + + {"TDM_BCK", NULL, "TDM_MCK"}, + {"TDM_DPTX_BCK", NULL, "TDM_DPTX_MCK"}, + {"TDM_MCK", NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect}, + {"TDM_MCK", NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect}, + {"TDM_DPTX_MCK", NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect}, + {"TDM_DPTX_MCK", NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect}, +}; + +/* dai ops */ +static int mtk_dai_tdm_cal_mclk(struct mtk_base_afe *afe, + struct mtk_afe_tdm_priv *tdm_priv, + int freq) +{ + int apll; + int apll_rate; + + apll = mt8196_get_apll_by_rate(afe, freq); + apll_rate = mt8196_get_apll_rate(afe, apll); + + if (freq > apll_rate) + return -EINVAL; + + if (apll_rate % freq != 0) + return -EINVAL; + + tdm_priv->mclk_rate = freq; + tdm_priv->mclk_apll = apll; + + return 0; +} + +static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + int tdm_id = dai->id; + struct mtk_afe_tdm_priv *tdm_priv; + unsigned int rate = params_rate(params); + unsigned int channels = params_channels(params); + snd_pcm_format_t format = params_format(params); + unsigned int tdm_con = 0; + + if (tdm_id >= MT8196_DAI_NUM || tdm_id < 0) + return -EINVAL; + + tdm_priv = afe_priv->dai_priv[tdm_id]; + + if (!tdm_priv) + return -EINVAL; + + /* calculate mclk_rate, if not set explicitly */ + if (!tdm_priv->mclk_rate) { + tdm_priv->mclk_rate = rate * tdm_priv->mclk_multiple; + mtk_dai_tdm_cal_mclk(afe, + tdm_priv, + tdm_priv->mclk_rate); + } + + /* calculate bck */ + tdm_priv->bck_rate = rate * + channels * + snd_pcm_format_physical_width(format); + + if (tdm_priv->bck_rate > tdm_priv->mclk_rate) + return -EINVAL; + + if (tdm_priv->mclk_rate % tdm_priv->bck_rate != 0) + return -EINVAL; + + dev_dbg(afe->dev, "id %d, rate %d, ch %d, fmt %d, mclk %d, bck %d\n", + tdm_id, rate, channels, format, tdm_priv->mclk_rate, tdm_priv->bck_rate); + + /* set tdm */ + tdm_con = 0 << BCK_INVERSE_SFT; + tdm_con |= 0 << LRCK_INVERSE_SFT; + tdm_con |= 0 << DELAY_DATA_SFT; + tdm_con |= 1 << LEFT_ALIGN_SFT; + tdm_con |= get_tdm_wlen(format) << WLEN_SFT; + tdm_con |= get_tdm_ch(channels) << CHANNEL_NUM_SFT; + tdm_con |= get_tdm_channel_bck(format) << CHANNEL_BCK_CYCLES_SFT; + tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT; + regmap_write(afe->regmap, AFE_TDM_CON1, tdm_con); + + /* set dptx */ + if (tdm_id == MT8196_DAI_TDM_DPTX) { + regmap_update_bits(afe->regmap, AFE_DPTX_CON, + DPTX_CHANNEL_ENABLE_MASK_SFT, + get_dptx_ch_enable_mask(afe->dev, channels) << + DPTX_CHANNEL_ENABLE_SFT); + regmap_update_bits(afe->regmap, AFE_DPTX_CON, + DPTX_CHANNEL_NUMBER_MASK_SFT, + get_dptx_ch(channels) << + DPTX_CHANNEL_NUMBER_SFT); + regmap_update_bits(afe->regmap, AFE_DPTX_CON, + DPTX_16BIT_MASK_SFT, + get_dptx_wlen(format) << DPTX_16BIT_SFT); + } + switch (channels) { + case 1: + case 2: + tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT1_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT; + break; + case 3: + case 4: + tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT; + tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT; + break; + case 5: + case 6: + tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT; + tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT; + tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT; + tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT; + break; + case 7: + case 8: + tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT; + tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT; + tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT; + tdm_con |= TDM_CH_START_O36_O37 << ST_CH_PAIR_SOUT3_SFT; + break; + default: + tdm_con = 0; + } + + regmap_write(afe->regmap, AFE_TDM_CON2, tdm_con); + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, + HDMI_CH_NUM_MASK_SFT, + channels << HDMI_CH_NUM_SFT); + + return 0; +} + +static int mtk_dai_tdm_trigger(struct snd_pcm_substream *substream, + int cmd, + struct snd_soc_dai *dai) +{ + struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); + int tdm_id = dai->id; + + dev_dbg(afe->dev, "cmd %d, tdm_id %d\n", cmd, tdm_id); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + /* enable Out control */ + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, + HDMI_OUT_ON_MASK_SFT, + 0x1 << HDMI_OUT_ON_SFT); + + /* enable dptx */ + if (tdm_id == MT8196_DAI_TDM_DPTX) { + regmap_update_bits(afe->regmap, AFE_DPTX_CON, + DPTX_ON_MASK_SFT, 0x1 << + DPTX_ON_SFT); + } + + /* enable tdm */ + regmap_update_bits(afe->regmap, AFE_TDM_CON1, + TDM_EN_MASK_SFT, 0x1 << TDM_EN_SFT); + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + /* disable tdm */ + regmap_update_bits(afe->regmap, AFE_TDM_CON1, + TDM_EN_MASK_SFT, 0); + + /* disable dptx */ + if (tdm_id == MT8196_DAI_TDM_DPTX) { + regmap_update_bits(afe->regmap, AFE_DPTX_CON, + DPTX_ON_MASK_SFT, 0); + } + + /* disable Out control */ + regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, + HDMI_OUT_ON_MASK_SFT, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int mtk_dai_tdm_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_tdm_priv *tdm_priv; + + if (dai->id >= MT8196_DAI_NUM || dai->id < 0) + return -EINVAL; + + tdm_priv = afe_priv->dai_priv[dai->id]; + + if (!tdm_priv) + return -EINVAL; + + if (dir != SND_SOC_CLOCK_OUT) + return -EINVAL; + + dev_dbg(afe->dev, "freq %d\n", freq); + + return mtk_dai_tdm_cal_mclk(afe, tdm_priv, freq); +} + +static const struct snd_soc_dai_ops mtk_dai_tdm_ops = { + .hw_params = mtk_dai_tdm_hw_params, + .trigger = mtk_dai_tdm_trigger, + .set_sysclk = mtk_dai_tdm_set_sysclk, +}; + +/* dai driver */ +#define MTK_TDM_RATES (SNDRV_PCM_RATE_8000_48000 |\ + SNDRV_PCM_RATE_88200 |\ + SNDRV_PCM_RATE_96000 |\ + SNDRV_PCM_RATE_176400 |\ + SNDRV_PCM_RATE_192000) + +#define MTK_TDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_driver mtk_dai_tdm_driver[] = { + { + .name = "TDM", + .id = MT8196_DAI_TDM, + .playback = { + .stream_name = "TDM", + .channels_min = 2, + .channels_max = 8, + .rates = MTK_TDM_RATES, + .formats = MTK_TDM_FORMATS, + }, + .ops = &mtk_dai_tdm_ops, + }, + { + .name = "TDM_DPTX", + .id = MT8196_DAI_TDM_DPTX, + .playback = { + .stream_name = "TDM_DPTX", + .channels_min = 2, + .channels_max = 8, + .rates = MTK_TDM_RATES, + .formats = MTK_TDM_FORMATS, + }, + .ops = &mtk_dai_tdm_ops, + }, +}; + +static struct mtk_afe_tdm_priv *init_tdm_priv_data(struct mtk_base_afe *afe, + int id) +{ + struct mtk_afe_tdm_priv *tdm_priv; + + tdm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_tdm_priv), + GFP_KERNEL); + if (!tdm_priv) + return NULL; + + if (id == MT8196_DAI_TDM_DPTX) + tdm_priv->mclk_multiple = 256; + else + tdm_priv->mclk_multiple = 128; + + tdm_priv->bck_id = MT8196_TDMOUT_BCK; + tdm_priv->mclk_id = MT8196_TDMOUT_MCK; + + return tdm_priv; +} + +int mt8196_dai_tdm_register(struct mtk_base_afe *afe) +{ + struct mt8196_afe_private *afe_priv = afe->platform_priv; + struct mtk_afe_tdm_priv *tdm_priv, *tdm_dptx_priv; + struct mtk_base_afe_dai *dai; + + dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL); + if (!dai) + return -ENOMEM; + + dai->dai_drivers = mtk_dai_tdm_driver; + dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_tdm_driver); + dai->controls = mtk_dai_tdm_controls; + dai->num_controls = ARRAY_SIZE(mtk_dai_tdm_controls); + dai->dapm_widgets = mtk_dai_tdm_widgets; + dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_tdm_widgets); + dai->dapm_routes = mtk_dai_tdm_routes; + dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_tdm_routes); + + tdm_priv = init_tdm_priv_data(afe, MT8196_DAI_TDM); + if (!tdm_priv) + return -ENOMEM; + + tdm_dptx_priv = init_tdm_priv_data(afe, MT8196_DAI_TDM_DPTX); + if (!tdm_dptx_priv) + return -ENOMEM; + + list_add(&dai->list, &afe->sub_dais); + + afe_priv->dai_priv[MT8196_DAI_TDM] = tdm_priv; + afe_priv->dai_priv[MT8196_DAI_TDM_DPTX] = tdm_dptx_priv; + + return 0; +} + diff --git a/sound/soc/mediatek/mt8196/mt8196-interconnection.h b/sound/soc/mediatek/mt8196/mt8196-interconnection.h new file mode 100644 index 00000000000000..acb91da3b4dbba --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-interconnection.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Mediatek MT8196 audio driver interconnection definition + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#ifndef _MT8196_INTERCONNECTION_H_ +#define _MT8196_INTERCONNECTION_H_ + +/* in port define */ + +#define I_CONNSYS_I2S_CH1 0 +#define I_CONNSYS_I2S_CH2 1 +#define I_GAIN0_OUT_CH1 6 +#define I_GAIN0_OUT_CH2 7 +#define I_GAIN1_OUT_CH1 8 +#define I_GAIN1_OUT_CH2 9 +#define I_GAIN2_OUT_CH1 10 +#define I_GAIN2_OUT_CH2 11 +#define I_GAIN3_OUT_CH1 12 +#define I_GAIN3_OUT_CH2 13 +#define I_STF_CH1 14 +#define I_ADDA_UL_CH1 16 +#define I_ADDA_UL_CH2 17 +#define I_ADDA_UL_CH3 18 +#define I_ADDA_UL_CH4 19 +#define I_UL_PROX_CH1 20 +#define I_UL_PROX_CH2 21 +#define I_ADDA_UL_CH5 24 +#define I_ADDA_UL_CH6 25 +#define I_DMIC0_CH1 28 +#define I_DMIC0_CH2 29 +#define I_DMIC1_CH1 30 +#define I_DMIC1_CH2 31 + +/* in port define >= 32 */ +#define I_32_OFFSET 32 +#define I_DL0_CH1 (32 - I_32_OFFSET) +#define I_DL0_CH2 (33 - I_32_OFFSET) +#define I_DL1_CH1 (34 - I_32_OFFSET) +#define I_DL1_CH2 (35 - I_32_OFFSET) +#define I_DL2_CH1 (36 - I_32_OFFSET) +#define I_DL2_CH2 (37 - I_32_OFFSET) +#define I_DL3_CH1 (38 - I_32_OFFSET) +#define I_DL3_CH2 (39 - I_32_OFFSET) +#define I_DL4_CH1 (40 - I_32_OFFSET) +#define I_DL4_CH2 (41 - I_32_OFFSET) +#define I_DL5_CH1 (42 - I_32_OFFSET) +#define I_DL5_CH2 (43 - I_32_OFFSET) +#define I_DL6_CH1 (44 - I_32_OFFSET) +#define I_DL6_CH2 (45 - I_32_OFFSET) +#define I_DL7_CH1 (46 - I_32_OFFSET) +#define I_DL7_CH2 (47 - I_32_OFFSET) +#define I_DL8_CH1 (48 - I_32_OFFSET) +#define I_DL8_CH2 (49 - I_32_OFFSET) +#define I_DL_4CH_CH1 (50 - I_32_OFFSET) +#define I_DL_4CH_CH2 (51 - I_32_OFFSET) +#define I_DL_4CH_CH3 (52 - I_32_OFFSET) +#define I_DL_4CH_CH4 (53 - I_32_OFFSET) +#define I_DL_24CH_CH1 (54 - I_32_OFFSET) +#define I_DL_24CH_CH2 (55 - I_32_OFFSET) +#define I_DL_24CH_CH3 (56 - I_32_OFFSET) +#define I_DL_24CH_CH4 (57 - I_32_OFFSET) +#define I_DL_24CH_CH5 (58 - I_32_OFFSET) +#define I_DL_24CH_CH6 (59 - I_32_OFFSET) +#define I_DL_24CH_CH7 (60 - I_32_OFFSET) +#define I_DL_24CH_CH8 (61 - I_32_OFFSET) + +/* in port define >= 64 */ +#define I_64_OFFSET 64 +#define I_DL23_CH1 (78 - I_64_OFFSET) +#define I_DL23_CH2 (79 - I_64_OFFSET) +#define I_DL24_CH1 (80 - I_64_OFFSET) +#define I_DL24_CH2 (81 - I_64_OFFSET) +#define I_DL25_CH1 (82 - I_64_OFFSET) +#define I_DL25_CH2 (83 - I_64_OFFSET) +#define I_DL26_CH1 (84 - I_64_OFFSET) +#define I_DL26_CH2 (85 - I_64_OFFSET) + +/* in port define >= 128 */ +#define I_128_OFFSET 128 +#define I_PCM_0_CAP_CH1 (130 - I_128_OFFSET) +#define I_PCM_0_CAP_CH2 (131 - I_128_OFFSET) +#define I_PCM_1_CAP_CH1 (132 - I_128_OFFSET) +#define I_PCM_1_CAP_CH2 (133 - I_128_OFFSET) +#define I_I2SIN0_CH1 (134 - I_128_OFFSET) +#define I_I2SIN0_CH2 (135 - I_128_OFFSET) +#define I_I2SIN1_CH1 (136 - I_128_OFFSET) +#define I_I2SIN1_CH2 (137 - I_128_OFFSET) +#define I_I2SIN2_CH1 (138 - I_128_OFFSET) +#define I_I2SIN2_CH2 (139 - I_128_OFFSET) +#define I_I2SIN3_CH1 (140 - I_128_OFFSET) +#define I_I2SIN3_CH2 (141 - I_128_OFFSET) +#define I_I2SIN4_CH1 (142 - I_128_OFFSET) +#define I_I2SIN4_CH2 (143 - I_128_OFFSET) +#define I_I2SIN4_CH3 (144 - I_128_OFFSET) +#define I_I2SIN4_CH4 (145 - I_128_OFFSET) +#define I_I2SIN4_CH5 (146 - I_128_OFFSET) +#define I_I2SIN4_CH6 (147 - I_128_OFFSET) +#define I_I2SIN4_CH7 (148 - I_128_OFFSET) +#define I_I2SIN4_CH8 (149 - I_128_OFFSET) + +/* in port define >= 160 */ +#define I_160_OFFSET 160 +#define I_I2SIN6_CH1 (166 - I_160_OFFSET) +#define I_I2SIN6_CH2 (167 - I_160_OFFSET) + +/* in port define >= 192 */ +#define I_192_OFFSET 192 +#define I_SRC_0_OUT_CH1 (198 - I_192_OFFSET) +#define I_SRC_0_OUT_CH2 (199 - I_192_OFFSET) +#define I_SRC_1_OUT_CH1 (200 - I_192_OFFSET) +#define I_SRC_1_OUT_CH2 (201 - I_192_OFFSET) +#define I_SRC_2_OUT_CH1 (202 - I_192_OFFSET) +#define I_SRC_2_OUT_CH2 (203 - I_192_OFFSET) +#define I_SRC_3_OUT_CH1 (204 - I_192_OFFSET) +#define I_SRC_3_OUT_CH2 (205 - I_192_OFFSET) + +#endif diff --git a/sound/soc/mediatek/mt8196/mt8196-nau8825.c b/sound/soc/mediatek/mt8196/mt8196-nau8825.c new file mode 100644 index 00000000000000..c9424786c53dfe --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-nau8825.c @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mt8196-nau8825.c -- mt8196 nau8825 ALSA SoC machine driver + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "mt8196-afe-common.h" + +#include "../../codecs/nau8825.h" +#include "../../codecs/rt5682s.h" + +#include "../common/mtk-soc-card.h" +#include "../common/mtk-dsp-sof-common.h" +#include "../common/mtk-soundcard-driver.h" +#include "../common/mtk-afe-platform-driver.h" + +#define NAU8825_HS_PRESENT BIT(0) +#define RT5682S_HS_PRESENT BIT(1) +#define RT5650_HS_PRESENT BIT(2) + +/* + * Nau88l25 + */ +#define NAU8825_CODEC_DAI "nau8825-hifi" + +/* + * Rt5682s + */ +#define RT5682S_CODEC_DAI "rt5682s-aif1" + +/* + * Rt5650 + */ +#define RT5650_CODEC_DAI "rt5645-aif1" + +#define SOF_DMA_DL1 "SOF_DMA_DL1" +#define SOF_DMA_DL_24CH "SOF_DMA_DL_24CH" +#define SOF_DMA_UL0 "SOF_DMA_UL0" +#define SOF_DMA_UL1 "SOF_DMA_UL1" +#define SOF_DMA_UL2 "SOF_DMA_UL2" + +enum mt8196_jacks { + MT8196_JACK_HEADSET, + MT8196_JACK_DP, + MT8196_JACK_HDMI, + MT8196_JACK_MAX, +}; + +static struct snd_soc_jack_pin mt8196_dp_jack_pins[] = { + { + .pin = "DP", + .mask = SND_JACK_AVOUT, + }, +}; + +static struct snd_soc_jack_pin mt8196_hdmi_jack_pins[] = { + { + .pin = "HDMI", + .mask = SND_JACK_AVOUT, + }, +}; + +static struct snd_soc_jack_pin nau8825_jack_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Headset Mic", + .mask = SND_JACK_MICROPHONE, + }, +}; + +static const struct snd_kcontrol_new mt8196_dumb_spk_controls[] = { + SOC_DAPM_PIN_SWITCH("Ext Spk"), +}; + +static const struct snd_soc_dapm_widget mt8196_dumb_spk_widgets[] = { + SND_SOC_DAPM_SPK("Ext Spk", NULL), +}; + +static const struct snd_soc_dapm_widget mt8196_nau8825_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Headset Mic", NULL), + SND_SOC_DAPM_SPK("Ext Spk", NULL), + SND_SOC_DAPM_SINK("DP"), +}; + +static const struct snd_kcontrol_new mt8196_nau8825_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Headset Mic"), +}; + +#define EXT_SPK_AMP_W_NAME "Ext_Speaker_Amp" + +static struct snd_soc_card mt8196_nau8825_soc_card; + +static const struct snd_soc_dapm_widget mt8196_nau8825_card_widgets[] = { + /* SOF Uplink */ + SND_SOC_DAPM_MIXER("SOF_DMA_UL0", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("SOF_DMA_UL1", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("SOF_DMA_UL2", SND_SOC_NOPM, 0, 0, NULL, 0), + + /* + * SOF Downlink + * the widgets on the machine driver cannot use the parameter with kcontrol + * because the widget domain is its platform driver. so sof downlink route + * is written in the i2s dai driver. + */ +}; + +static const struct snd_soc_dapm_route mt8196_nau8825_card_routes[] = { + /* SOF Uplink */ + {"SOF_DMA_UL0", NULL, "UL0_CH1"}, + {"SOF_DMA_UL0", NULL, "UL0_CH2"}, + /* SOF Uplink */ + {"SOF_DMA_UL1", NULL, "UL1_CH1"}, + {"SOF_DMA_UL1", NULL, "UL1_CH2"}, + /* SOF Uplink */ + {"SOF_DMA_UL2", NULL, "UL2_CH1"}, + {"SOF_DMA_UL2", NULL, "UL2_CH2"}, +}; + +static const struct snd_kcontrol_new mt8196_nau8825_card_controls[] = { + SOC_DAPM_PIN_SWITCH(EXT_SPK_AMP_W_NAME), +}; + +/* + * define mtk_spk_i2s_mck node in dts when need mclk, + * BE i2s need assign snd_soc_ops = mt8196_nau8825_i2s_ops + */ +static int mt8196_nau8825_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + unsigned int rate = params_rate(params); + unsigned int mclk_fs_ratio = 128; + unsigned int mclk_fs = rate * mclk_fs_ratio; + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + + return snd_soc_dai_set_sysclk(cpu_dai, + 0, mclk_fs, SND_SOC_CLOCK_OUT); +} + +static const struct snd_soc_ops mt8196_nau8825_i2s_ops = { + .hw_params = mt8196_nau8825_i2s_hw_params, +}; + +static int mt8196_dptx_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + unsigned int rate = params_rate(params); + unsigned int mclk_fs_ratio = 256; + unsigned int mclk_fs = rate * mclk_fs_ratio; + struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(rtd, 0); + + return snd_soc_dai_set_sysclk(dai, 0, mclk_fs, SND_SOC_CLOCK_OUT); +} + +static const struct snd_soc_ops mt8196_dptx_ops = { + .hw_params = mt8196_dptx_hw_params, +}; + +static int mt8196_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hw_params *params) +{ + dev_info(rtd->dev, "fix format to 32bit\n"); + + /* fix BE i2s format to 32bit, clean param mask first */ + snd_mask_reset_range(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), + 0, (__force unsigned int)SNDRV_PCM_FORMAT_LAST); + + params_set_format(params, SNDRV_PCM_FORMAT_S32_LE); + return 0; +} + +static int mt8196_sof_be_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_soc_component *cmpnt_afe = NULL; + struct snd_soc_pcm_runtime *runtime; + + /* find afe component */ + for_each_card_rtds(rtd->card, runtime) { + cmpnt_afe = snd_soc_rtdcom_lookup(runtime, AFE_PCM_NAME); + if (cmpnt_afe) { + dev_info(rtd->dev, "component->name: %s\n", cmpnt_afe->name); + break; + } + } + + if (cmpnt_afe && !pm_runtime_active(cmpnt_afe->dev)) { + dev_err(rtd->dev, "afe pm runtime is not active!!\n"); + return -EINVAL; + } + + return 0; +} + +static const struct snd_soc_ops mt8196_sof_be_ops = { + .hw_params = mt8196_sof_be_hw_params, +}; + +static const struct sof_conn_stream g_sof_conn_streams[] = { + { + .sof_link = "AFE_SOF_DL1", + .sof_dma = SOF_DMA_DL1, + .stream_dir = SNDRV_PCM_STREAM_PLAYBACK + }, + { + .sof_link = "AFE_SOF_DL_24CH", + .sof_dma = SOF_DMA_DL_24CH, + .stream_dir = SNDRV_PCM_STREAM_PLAYBACK + }, + { + .sof_link = "AFE_SOF_UL0", + .sof_dma = SOF_DMA_UL0, + .stream_dir = SNDRV_PCM_STREAM_CAPTURE + }, + { + .sof_link = "AFE_SOF_UL1", + .sof_dma = SOF_DMA_UL1, + .stream_dir = SNDRV_PCM_STREAM_CAPTURE + }, + { + .sof_link = "AFE_SOF_UL2", + .sof_dma = SOF_DMA_UL2, + .stream_dir = SNDRV_PCM_STREAM_CAPTURE + }, +}; + +/* FE */ +SND_SOC_DAILINK_DEFS(playback1, + DAILINK_COMP_ARRAY(COMP_CPU("DL1")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(playback_24ch, + DAILINK_COMP_ARRAY(COMP_CPU("DL_24CH")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(capture0, + DAILINK_COMP_ARRAY(COMP_CPU("UL0")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(capture1, + DAILINK_COMP_ARRAY(COMP_CPU("UL1")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(capture2, + DAILINK_COMP_ARRAY(COMP_CPU("UL2")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(playback_hdmi, + DAILINK_COMP_ARRAY(COMP_CPU("HDMI")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(playback2, + DAILINK_COMP_ARRAY(COMP_CPU("DL2")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(capture_cm0, + DAILINK_COMP_ARRAY(COMP_CPU("UL_CM0")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +/* BE */ +SND_SOC_DAILINK_DEFS(ap_dmic, + DAILINK_COMP_ARRAY(COMP_CPU("AP_DMIC")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(ap_dmic_ch34, + DAILINK_COMP_ARRAY(COMP_CPU("AP_DMIC_CH34")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(ap_dmic_multich, + DAILINK_COMP_ARRAY(COMP_CPU("AP_DMIC_MULTICH")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(i2sin6, + DAILINK_COMP_ARRAY(COMP_CPU("I2SIN6")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(i2sout3, + DAILINK_COMP_ARRAY(COMP_CPU("I2SOUT3")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(i2sout4, + DAILINK_COMP_ARRAY(COMP_CPU("I2SOUT4")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(i2sout6, + DAILINK_COMP_ARRAY(COMP_CPU("I2SOUT6")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(tdm_dptx, + DAILINK_COMP_ARRAY(COMP_CPU("TDM_DPTX")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(AFE_SOF_DL_24CH, + DAILINK_COMP_ARRAY(COMP_CPU("SOF_DL_24CH")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(AFE_SOF_DL1, + DAILINK_COMP_ARRAY(COMP_CPU("SOF_DL1")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(AFE_SOF_UL0, + DAILINK_COMP_ARRAY(COMP_CPU("SOF_UL0")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(AFE_SOF_UL1, + DAILINK_COMP_ARRAY(COMP_CPU("SOF_UL1")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); +SND_SOC_DAILINK_DEFS(AFE_SOF_UL2, + DAILINK_COMP_ARRAY(COMP_CPU("SOF_UL2")), + DAILINK_COMP_ARRAY(COMP_DUMMY()), + DAILINK_COMP_ARRAY(COMP_EMPTY())); + +static struct snd_soc_dai_link mt8196_nau8825_dai_links[] = { + /* + * The SOF topology expects PCM streams 0~4 to be available + * for the SOF PCM streams. Put the SOF BE definitions here + * so that the PCM device numbers are skipped over. + * (BE dailinks do not have PCM devices created.) + */ + { + .name = "AFE_SOF_DL_24CH", + .no_pcm = 1, + .playback_only = 1, + .ops = &mt8196_sof_be_ops, + SND_SOC_DAILINK_REG(AFE_SOF_DL_24CH), + }, + { + .name = "AFE_SOF_DL1", + .no_pcm = 1, + .playback_only = 1, + .ops = &mt8196_sof_be_ops, + SND_SOC_DAILINK_REG(AFE_SOF_DL1), + }, + { + .name = "AFE_SOF_UL0", + .no_pcm = 1, + .capture_only = 1, + .ops = &mt8196_sof_be_ops, + SND_SOC_DAILINK_REG(AFE_SOF_UL0), + }, + { + .name = "AFE_SOF_UL1", + .no_pcm = 1, + .capture_only = 1, + .ops = &mt8196_sof_be_ops, + SND_SOC_DAILINK_REG(AFE_SOF_UL1), + }, + { + .name = "AFE_SOF_UL2", + .no_pcm = 1, + .capture_only = 1, + .ops = &mt8196_sof_be_ops, + SND_SOC_DAILINK_REG(AFE_SOF_UL2), + }, + /* Front End DAI links */ + { + .name = "HDMI_FE", + .stream_name = "HDMI Playback", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .playback_only = 1, + SND_SOC_DAILINK_REG(playback_hdmi), + }, + { + .name = "DL2_FE", + .stream_name = "DL2 Playback", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .playback_only = 1, + SND_SOC_DAILINK_REG(playback2), + }, + { + .name = "UL_CM0_FE", + .stream_name = "UL_CM0 Capture", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .capture_only = 1, + SND_SOC_DAILINK_REG(capture_cm0), + }, + { + .name = "DL_24CH_FE", + .stream_name = "DL_24CH Playback", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .playback_only = 1, + SND_SOC_DAILINK_REG(playback_24ch), + }, + { + .name = "DL1_FE", + .stream_name = "DL1 Playback", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .playback_only = 1, + SND_SOC_DAILINK_REG(playback1), + }, + { + .name = "UL0_FE", + .stream_name = "UL0 Capture", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .capture_only = 1, + SND_SOC_DAILINK_REG(capture0), + }, + { + .name = "UL1_FE", + .stream_name = "UL1 Capture", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .capture_only = 1, + SND_SOC_DAILINK_REG(capture1), + }, + { + .name = "UL2_FE", + .stream_name = "UL2 Capture", + .trigger = {SND_SOC_DPCM_TRIGGER_PRE, + SND_SOC_DPCM_TRIGGER_PRE}, + .dynamic = 1, + .capture_only = 1, + SND_SOC_DAILINK_REG(capture2), + }, + /* Back End DAI links */ + { + .name = "I2SIN6_BE", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBC_CFC + | SND_SOC_DAIFMT_GATED, + .ops = &mt8196_nau8825_i2s_ops, + .no_pcm = 1, + .capture_only = 1, + .ignore_suspend = 1, + .be_hw_params_fixup = mt8196_hw_params_fixup, + SND_SOC_DAILINK_REG(i2sin6), + }, + { + .name = "I2SOUT4_BE", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBC_CFC + | SND_SOC_DAIFMT_GATED, + .ops = &mt8196_nau8825_i2s_ops, + .no_pcm = 1, + .playback_only = 1, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .be_hw_params_fixup = mt8196_hw_params_fixup, + SND_SOC_DAILINK_REG(i2sout4), + }, + { + .name = "I2SOUT6_BE", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBC_CFC + | SND_SOC_DAIFMT_GATED, + .ops = &mt8196_nau8825_i2s_ops, + .no_pcm = 1, + .playback_only = 1, + .ignore_suspend = 1, + .be_hw_params_fixup = mt8196_hw_params_fixup, + SND_SOC_DAILINK_REG(i2sout6), + }, + { + .name = "AP_DMIC_BE", + .no_pcm = 1, + .capture_only = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(ap_dmic), + }, + { + .name = "AP_DMIC_CH34_BE", + .no_pcm = 1, + .capture_only = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(ap_dmic_ch34), + }, + { + .name = "AP_DMIC_MULTICH_BE", + .no_pcm = 1, + .capture_only = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(ap_dmic_multich), + }, + { + .name = "TDM_DPTX_BE", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBC_CFC + | SND_SOC_DAIFMT_GATED, + .ops = &mt8196_dptx_ops, + .be_hw_params_fixup = mt8196_hw_params_fixup, + .no_pcm = 1, + .playback_only = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(tdm_dptx), + }, + { + .name = "I2SOUT3_BE", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBC_CFC + | SND_SOC_DAIFMT_GATED, + .ops = &mt8196_nau8825_i2s_ops, + .no_pcm = 1, + .playback_only = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(i2sout3), + }, +}; + +static int mt8196_dumb_amp_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_card *card = rtd->card; + struct snd_soc_dapm_context *dapm = snd_soc_card_to_dapm(card); + int ret = 0; + + ret = snd_soc_dapm_new_controls(dapm, mt8196_dumb_spk_widgets, + ARRAY_SIZE(mt8196_dumb_spk_widgets)); + if (ret) { + dev_err(rtd->dev, "unable to add Dumb Speaker dapm, ret %d\n", ret); + return ret; + } + + ret = snd_soc_add_card_controls(card, mt8196_dumb_spk_controls, + ARRAY_SIZE(mt8196_dumb_spk_controls)); + if (ret) { + dev_err(rtd->dev, "unable to add Dumb card controls, ret %d\n", ret); + return ret; + } + + return 0; +} + +static int mt8196_dptx_codec_init(struct snd_soc_pcm_runtime *rtd) +{ + struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_jack *jack = &soc_card_data->card_data->jacks[MT8196_JACK_DP]; + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + int ret = 0; + + ret = snd_soc_card_jack_new_pins(rtd->card, "DP Jack", SND_JACK_AVOUT, + jack, mt8196_dp_jack_pins, + ARRAY_SIZE(mt8196_dp_jack_pins)); + if (ret) { + dev_err(rtd->dev, "new jack failed: %d\n", ret); + return ret; + } + + ret = snd_soc_component_set_jack(component, jack, NULL); + if (ret) { + dev_err(rtd->dev, "set jack failed on %s (ret=%d)\n", + component->name, ret); + return ret; + } + + return 0; +} + +static int mt8196_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd) +{ + struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_jack *jack = &soc_card_data->card_data->jacks[MT8196_JACK_HDMI]; + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + int ret = 0; + + ret = snd_soc_card_jack_new_pins(rtd->card, "HDMI Jack", SND_JACK_AVOUT, + jack, mt8196_hdmi_jack_pins, + ARRAY_SIZE(mt8196_hdmi_jack_pins)); + if (ret) { + dev_err(rtd->dev, "new jack failed: %d\n", ret); + return ret; + } + + ret = snd_soc_component_set_jack(component, jack, NULL); + if (ret) { + dev_err(rtd->dev, "set jack failed on %s (ret=%d)\n", + component->name, ret); + return ret; + } + + return 0; +} + +static int mt8196_headset_codec_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_card *card = rtd->card; + struct snd_soc_dapm_context *dapm = snd_soc_card_to_dapm(card); + struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(card); + struct snd_soc_jack *jack = &soc_card_data->card_data->jacks[MT8196_JACK_HEADSET]; + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + int ret; + int type; + + ret = snd_soc_dapm_new_controls(dapm, mt8196_nau8825_widgets, + ARRAY_SIZE(mt8196_nau8825_widgets)); + if (ret) { + dev_err(rtd->dev, "unable to add nau8825 card widget, ret %d\n", ret); + return ret; + } + + ret = snd_soc_add_card_controls(card, mt8196_nau8825_controls, + ARRAY_SIZE(mt8196_nau8825_controls)); + if (ret) { + dev_err(rtd->dev, "unable to add nau8825 card controls, ret %d\n", ret); + return ret; + } + + ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack", + SND_JACK_HEADSET | SND_JACK_BTN_0 | + SND_JACK_BTN_1 | SND_JACK_BTN_2 | + SND_JACK_BTN_3, + jack, + nau8825_jack_pins, + ARRAY_SIZE(nau8825_jack_pins)); + if (ret) { + dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret); + return ret; + } + + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); + + type = SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3; + ret = snd_soc_component_set_jack(component, jack, (void *)&type); + + if (ret) { + dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret); + return ret; + } + + return 0; +}; + +static void mt8196_headset_codec_exit(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + + snd_soc_component_set_jack(component, NULL, NULL); +} + +static int mt8196_nau8825_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); + unsigned int rate = params_rate(params); + unsigned int bit_width = params_width(params); + int clk_freq, ret; + + clk_freq = rate * 2 * bit_width; + + /* Configure clock for codec */ + ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_BLK, 0, + SND_SOC_CLOCK_IN); + if (ret < 0) { + dev_err(codec_dai->dev, "can't set BCLK clock %d\n", ret); + return ret; + } + + /* Configure pll for codec */ + ret = snd_soc_dai_set_pll(codec_dai, 0, 0, clk_freq, + params_rate(params) * 256); + if (ret < 0) { + dev_err(codec_dai->dev, "can't set BCLK: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct snd_soc_ops mt8196_nau8825_ops = { + .hw_params = mt8196_nau8825_hw_params, +}; + +static int mt8196_rt5682s_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_card *card = rtd->card; + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); + unsigned int rate = params_rate(params); + int bitwidth; + int ret; + + bitwidth = snd_pcm_format_width(params_format(params)); + if (bitwidth < 0) { + dev_err(card->dev, "invalid bit width: %d\n", bitwidth); + return bitwidth; + } + + ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x00, 0x0, 0x2, bitwidth); + if (ret) { + dev_err(card->dev, "failed to set tdm slot\n"); + return ret; + } + + ret = snd_soc_dai_set_pll(codec_dai, RT5682S_PLL1, RT5682S_PLL_S_BCLK1, + rate * 32, rate * 512); + if (ret) { + dev_err(card->dev, "failed to set pll\n"); + return ret; + } + + dev_info(card->dev, "%s set mclk rate: %d\n", __func__, rate * 512); + + ret = snd_soc_dai_set_sysclk(codec_dai, RT5682S_SCLK_S_MCLK, + rate * 512, SND_SOC_CLOCK_IN); + if (ret) { + dev_err(card->dev, "failed to set sysclk\n"); + return ret; + } + + return snd_soc_dai_set_sysclk(cpu_dai, 0, rate * 512, + SND_SOC_CLOCK_OUT); +} + +static const struct snd_soc_ops mt8196_rt5682s_i2s_ops = { + .hw_params = mt8196_rt5682s_i2s_hw_params, +}; + +static int mt8196_nau8825_soc_card_probe(struct mtk_soc_card_data *soc_card_data, bool legacy) +{ + struct snd_soc_card *card = soc_card_data->card_data->card; + struct snd_soc_dai_link *dai_link; + bool init_nau8825 = false; + bool init_rt5682s = false; + bool init_rt5650 = false; + bool init_dumb = false; + int i; + + dev_info(card->dev, "legacy: %d\n", legacy); + + for_each_card_prelinks(card, i, dai_link) { + if (strcmp(dai_link->name, "TDM_DPTX_BE") == 0) { + if (dai_link->num_codecs && + strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai")) + dai_link->init = mt8196_dptx_codec_init; + } else if (strcmp(dai_link->name, "I2SOUT3_BE") == 0) { + if (dai_link->num_codecs && + strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai")) + dai_link->init = mt8196_hdmi_codec_init; + } else if (strcmp(dai_link->name, "I2SOUT6_BE") == 0 || + strcmp(dai_link->name, "I2SIN6_BE") == 0) { + if (!strcmp(dai_link->codecs->dai_name, NAU8825_CODEC_DAI)) { + dai_link->ops = &mt8196_nau8825_ops; + if (!init_nau8825) { + dai_link->init = mt8196_headset_codec_init; + dai_link->exit = mt8196_headset_codec_exit; + init_nau8825 = true; + } + } else if (!strcmp(dai_link->codecs->dai_name, RT5682S_CODEC_DAI)) { + dai_link->ops = &mt8196_rt5682s_i2s_ops; + if (!init_rt5682s) { + dai_link->init = mt8196_headset_codec_init; + dai_link->exit = mt8196_headset_codec_exit; + init_rt5682s = true; + } + } else if (!strcmp(dai_link->codecs->dai_name, RT5650_CODEC_DAI)) { + dai_link->ops = &mt8196_rt5682s_i2s_ops; + if (!init_rt5650) { + dai_link->init = mt8196_headset_codec_init; + dai_link->exit = mt8196_headset_codec_exit; + init_rt5650 = true; + } + } else { + if (strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai")) { + if (!init_dumb) { + dai_link->init = mt8196_dumb_amp_init; + init_dumb = true; + } + } + } + } + } + + return 0; +} + +static const struct mtk_sof_priv mt8196_sof_priv = { + .conn_streams = g_sof_conn_streams, + .num_streams = ARRAY_SIZE(g_sof_conn_streams), +}; + +static struct snd_soc_card mt8196_nau8825_soc_card = { + .owner = THIS_MODULE, + .dai_link = mt8196_nau8825_dai_links, + .num_links = ARRAY_SIZE(mt8196_nau8825_dai_links), + .dapm_widgets = mt8196_nau8825_card_widgets, + .num_dapm_widgets = ARRAY_SIZE(mt8196_nau8825_card_widgets), + .dapm_routes = mt8196_nau8825_card_routes, + .num_dapm_routes = ARRAY_SIZE(mt8196_nau8825_card_routes), + .controls = mt8196_nau8825_card_controls, + .num_controls = ARRAY_SIZE(mt8196_nau8825_card_controls), +}; + +static const struct mtk_soundcard_pdata mt8196_nau8825_card = { + .card_name = "mt8196_nau8825", + .card_data = &(struct mtk_platform_card_data) { + .card = &mt8196_nau8825_soc_card, + .num_jacks = MT8196_JACK_MAX, + .flags = NAU8825_HS_PRESENT + }, + .sof_priv = &mt8196_sof_priv, + .soc_probe = mt8196_nau8825_soc_card_probe, +}; + +static const struct mtk_soundcard_pdata mt8196_rt5682s_card = { + .card_name = "mt8196_rt5682s", + .card_data = &(struct mtk_platform_card_data) { + .card = &mt8196_nau8825_soc_card, + .num_jacks = MT8196_JACK_MAX, + .flags = RT5682S_HS_PRESENT + }, + .sof_priv = &mt8196_sof_priv, + .soc_probe = mt8196_nau8825_soc_card_probe, +}; + +static const struct mtk_soundcard_pdata mt8196_rt5650_card = { + .card_name = "mt8196_rt5650", + .card_data = &(struct mtk_platform_card_data) { + .card = &mt8196_nau8825_soc_card, + .num_jacks = MT8196_JACK_MAX, + .flags = RT5650_HS_PRESENT + }, + .sof_priv = &mt8196_sof_priv, + .soc_probe = mt8196_nau8825_soc_card_probe, +}; + +static const struct of_device_id mt8196_nau8825_dt_match[] = { + {.compatible = "mediatek,mt8196-nau8825-sound", .data = &mt8196_nau8825_card,}, + {.compatible = "mediatek,mt8196-rt5682s-sound", .data = &mt8196_rt5682s_card,}, + {.compatible = "mediatek,mt8196-rt5650-sound", .data = &mt8196_rt5650_card,}, + {} +}; +MODULE_DEVICE_TABLE(of, mt8196_nau8825_dt_match); + +static struct platform_driver mt8196_nau8825_driver = { + .driver = { + .name = "mt8196-nau8825", + .of_match_table = mt8196_nau8825_dt_match, + .pm = &snd_soc_pm_ops, + }, + .probe = mtk_soundcard_common_probe, +}; +module_platform_driver(mt8196_nau8825_driver); + +/* Module information */ +MODULE_DESCRIPTION("MT8196 nau8825 ALSA SoC machine driver"); +MODULE_AUTHOR("Darren Ye "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("mt8196 nau8825 soc card"); + diff --git a/sound/soc/mediatek/mt8196/mt8196-reg.h b/sound/soc/mediatek/mt8196/mt8196-reg.h new file mode 100644 index 00000000000000..eb689d2655be46 --- /dev/null +++ b/sound/soc/mediatek/mt8196/mt8196-reg.h @@ -0,0 +1,12068 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mt8196-reg.h -- Mediatek 8196 audio driver reg definition + * + * Copyright (c) 2025 MediaTek Inc. + * Author: Darren Ye + */ + +#ifndef _MT8196_REG_H_ +#define _MT8196_REG_H_ + + /* reg bit enum */ +enum { + MT8196_MEMIF_PBUF_SIZE_32_BYTES, + MT8196_MEMIF_PBUF_SIZE_64_BYTES, + MT8196_MEMIF_PBUF_SIZE_128_BYTES, + MT8196_MEMIF_PBUF_SIZE_256_BYTES, + MT8196_MEMIF_PBUF_SIZE_NUM, +}; + +enum { + MT8196_MEMIF_MAX_LEN_0_BYTES, + MT8196_MEMIF_MAX_LEN_16_BYTES, + MT8196_MEMIF_MAX_LEN_32_BYTES, + MT8196_MEMIF_MAX_LEN_64_BYTES, +}; + +enum { + MT8196_MEMIF_MIN_LEN_NOT_SUPPORT, + MT8196_MEMIF_MIN_LEN_16_BYTES, + MT8196_MEMIF_MIN_LEN_32_BYTES, + MT8196_MEMIF_MIN_LEN_64_BYTES, +}; + +/***************************************************************************** + * R E G I S T E R D E F I N I T I O N + *****************************************************************************/ +/* AUDIO_TOP_CON0 */ +#define PDN_MTKAIFV4_SFT 25 +#define PDN_MTKAIFV4_MASK 0x1 +#define PDN_MTKAIFV4_MASK_SFT (0x1 << 25) +#define PDN_FM_I2S_SFT 24 +#define PDN_FM_I2S_MASK 0x1 +#define PDN_FM_I2S_MASK_SFT (0x1 << 24) +#define PDN_HW_GAIN01_SFT 21 +#define PDN_HW_GAIN01_MASK 0x1 +#define PDN_HW_GAIN01_MASK_SFT (0x1 << 21) +#define PDN_HW_GAIN23_SFT 20 +#define PDN_HW_GAIN23_MASK 0x1 +#define PDN_HW_GAIN23_MASK_SFT (0x1 << 20) +#define PDN_STF_SFT 19 +#define PDN_STF_MASK 0x1 +#define PDN_STF_MASK_SFT (0x1 << 19) +#define PDN_CM0_SFT 18 +#define PDN_CM0_MASK 0x1 +#define PDN_CM0_MASK_SFT (0x1 << 18) +#define PDN_CM1_SFT 17 +#define PDN_CM1_MASK 0x1 +#define PDN_CM1_MASK_SFT (0x1 << 17) +#define PDN_CM2_SFT 16 +#define PDN_CM2_MASK 0x1 +#define PDN_CM2_MASK_SFT (0x1 << 16) +#define PDN_PCM0_SFT 14 +#define PDN_PCM0_MASK 0x1 +#define PDN_PCM0_MASK_SFT (0x1 << 14) +#define PDN_PCM1_SFT 13 +#define PDN_PCM1_MASK 0x1 +#define PDN_PCM1_MASK_SFT (0x1 << 13) + +/* AUDIO_TOP_CON1 */ +#define PDN_UL0_ADC_SFT 23 +#define PDN_UL0_ADC_MASK 0x1 +#define PDN_UL0_ADC_MASK_SFT (0x1 << 23) +#define PDN_UL0_TML_SFT 22 +#define PDN_UL0_TML_MASK 0x1 +#define PDN_UL0_TML_MASK_SFT (0x1 << 22) +#define PDN_UL0_ADC_HIRES_SFT 21 +#define PDN_UL0_ADC_HIRES_MASK 0x1 +#define PDN_UL0_ADC_HIRES_MASK_SFT (0x1 << 21) +#define PDN_UL0_ADC_HIRES_TML_SFT 20 +#define PDN_UL0_ADC_HIRES_TML_MASK 0x1 +#define PDN_UL0_ADC_HIRES_TML_MASK_SFT (0x1 << 20) +#define PDN_UL1_ADC_SFT 19 +#define PDN_UL1_ADC_MASK 0x1 +#define PDN_UL1_ADC_MASK_SFT (0x1 << 19) +#define PDN_UL1_TML_SFT 18 +#define PDN_UL1_TML_MASK 0x1 +#define PDN_UL1_TML_MASK_SFT (0x1 << 18) +#define PDN_UL1_ADC_HIRES_SFT 17 +#define PDN_UL1_ADC_HIRES_MASK 0x1 +#define PDN_UL1_ADC_HIRES_MASK_SFT (0x1 << 17) +#define PDN_UL1_ADC_HIRES_TML_SFT 16 +#define PDN_UL1_ADC_HIRES_TML_MASK 0x1 +#define PDN_UL1_ADC_HIRES_TML_MASK_SFT (0x1 << 16) +#define PDN_UL2_ADC_SFT 15 +#define PDN_UL2_ADC_MASK 0x1 +#define PDN_UL2_ADC_MASK_SFT (0x1 << 15) +#define PDN_UL2_TML_SFT 14 +#define PDN_UL2_TML_MASK 0x1 +#define PDN_UL2_TML_MASK_SFT (0x1 << 14) +#define PDN_UL2_ADC_HIRES_SFT 13 +#define PDN_UL2_ADC_HIRES_MASK 0x1 +#define PDN_UL2_ADC_HIRES_MASK_SFT (0x1 << 13) +#define PDN_UL2_ADC_HIRES_TML_SFT 12 +#define PDN_UL2_ADC_HIRES_TML_MASK 0x1 +#define PDN_UL2_ADC_HIRES_TML_MASK_SFT (0x1 << 12) + +/* AUDIO_TOP_CON2 */ +#define PDN_TDM_OUT_SFT 24 +#define PDN_TDM_OUT_MASK 0x1 +#define PDN_TDM_OUT_MASK_SFT (0x1 << 24) +#define PDN_ETDM_OUT0_SFT 21 +#define PDN_ETDM_OUT0_MASK 0x1 +#define PDN_ETDM_OUT0_MASK_SFT (0x1 << 21) +#define PDN_ETDM_OUT1_SFT 20 +#define PDN_ETDM_OUT1_MASK 0x1 +#define PDN_ETDM_OUT1_MASK_SFT (0x1 << 20) +#define PDN_ETDM_OUT2_SFT 19 +#define PDN_ETDM_OUT2_MASK 0x1 +#define PDN_ETDM_OUT2_MASK_SFT (0x1 << 19) +#define PDN_ETDM_OUT3_SFT 18 +#define PDN_ETDM_OUT3_MASK 0x1 +#define PDN_ETDM_OUT3_MASK_SFT (0x1 << 18) +#define PDN_ETDM_OUT4_SFT 17 +#define PDN_ETDM_OUT4_MASK 0x1 +#define PDN_ETDM_OUT4_MASK_SFT (0x1 << 17) +#define PDN_ETDM_OUT5_SFT 16 +#define PDN_ETDM_OUT5_MASK 0x1 +#define PDN_ETDM_OUT5_MASK_SFT (0x1 << 16) +#define PDN_ETDM_OUT6_SFT 15 +#define PDN_ETDM_OUT6_MASK 0x1 +#define PDN_ETDM_OUT6_MASK_SFT (0x1 << 15) +#define PDN_ETDM_IN0_SFT 13 +#define PDN_ETDM_IN0_MASK 0x1 +#define PDN_ETDM_IN0_MASK_SFT (0x1 << 13) +#define PDN_ETDM_IN1_SFT 12 +#define PDN_ETDM_IN1_MASK 0x1 +#define PDN_ETDM_IN1_MASK_SFT (0x1 << 12) +#define PDN_ETDM_IN2_SFT 11 +#define PDN_ETDM_IN2_MASK 0x1 +#define PDN_ETDM_IN2_MASK_SFT (0x1 << 11) +#define PDN_ETDM_IN3_SFT 10 +#define PDN_ETDM_IN3_MASK 0x1 +#define PDN_ETDM_IN3_MASK_SFT (0x1 << 10) +#define PDN_ETDM_IN4_SFT 9 +#define PDN_ETDM_IN4_MASK 0x1 +#define PDN_ETDM_IN4_MASK_SFT (0x1 << 9) +#define PDN_ETDM_IN5_SFT 8 +#define PDN_ETDM_IN5_MASK 0x1 +#define PDN_ETDM_IN5_MASK_SFT (0x1 << 8) +#define PDN_ETDM_IN6_SFT 7 +#define PDN_ETDM_IN6_MASK 0x1 +#define PDN_ETDM_IN6_MASK_SFT (0x1 << 7) + +/* AUDIO_TOP_CON3 */ +#define PDN_CONNSYS_I2S_ASRC_SFT 25 +#define PDN_CONNSYS_I2S_ASRC_MASK 0x1 +#define PDN_CONNSYS_I2S_ASRC_MASK_SFT (0x1 << 25) +#define PDN_GENERAL0_ASRC_SFT 24 +#define PDN_GENERAL0_ASRC_MASK 0x1 +#define PDN_GENERAL0_ASRC_MASK_SFT (0x1 << 24) +#define PDN_GENERAL1_ASRC_SFT 23 +#define PDN_GENERAL1_ASRC_MASK 0x1 +#define PDN_GENERAL1_ASRC_MASK_SFT (0x1 << 23) +#define PDN_GENERAL2_ASRC_SFT 22 +#define PDN_GENERAL2_ASRC_MASK 0x1 +#define PDN_GENERAL2_ASRC_MASK_SFT (0x1 << 22) +#define PDN_GENERAL3_ASRC_SFT 21 +#define PDN_GENERAL3_ASRC_MASK 0x1 +#define PDN_GENERAL3_ASRC_MASK_SFT (0x1 << 21) +#define PDN_GENERAL4_ASRC_SFT 20 +#define PDN_GENERAL4_ASRC_MASK 0x1 +#define PDN_GENERAL4_ASRC_MASK_SFT (0x1 << 20) +#define PDN_GENERAL5_ASRC_SFT 19 +#define PDN_GENERAL5_ASRC_MASK 0x1 +#define PDN_GENERAL5_ASRC_MASK_SFT (0x1 << 19) +#define PDN_GENERAL6_ASRC_SFT 18 +#define PDN_GENERAL6_ASRC_MASK 0x1 +#define PDN_GENERAL6_ASRC_MASK_SFT (0x1 << 18) +#define PDN_GENERAL7_ASRC_SFT 17 +#define PDN_GENERAL7_ASRC_MASK 0x1 +#define PDN_GENERAL7_ASRC_MASK_SFT (0x1 << 17) +#define PDN_GENERAL8_ASRC_SFT 16 +#define PDN_GENERAL8_ASRC_MASK 0x1 +#define PDN_GENERAL8_ASRC_MASK_SFT (0x1 << 16) +#define PDN_GENERAL9_ASRC_SFT 15 +#define PDN_GENERAL9_ASRC_MASK 0x1 +#define PDN_GENERAL9_ASRC_MASK_SFT (0x1 << 15) +#define PDN_GENERAL10_ASRC_SFT 14 +#define PDN_GENERAL10_ASRC_MASK 0x1 +#define PDN_GENERAL10_ASRC_MASK_SFT (0x1 << 14) +#define PDN_GENERAL11_ASRC_SFT 13 +#define PDN_GENERAL11_ASRC_MASK 0x1 +#define PDN_GENERAL11_ASRC_MASK_SFT (0x1 << 13) +#define PDN_GENERAL12_ASRC_SFT 12 +#define PDN_GENERAL12_ASRC_MASK 0x1 +#define PDN_GENERAL12_ASRC_MASK_SFT (0x1 << 12) +#define PDN_GENERAL13_ASRC_SFT 11 +#define PDN_GENERAL13_ASRC_MASK 0x1 +#define PDN_GENERAL13_ASRC_MASK_SFT (0x1 << 11) +#define PDN_GENERAL14_ASRC_SFT 10 +#define PDN_GENERAL14_ASRC_MASK 0x1 +#define PDN_GENERAL14_ASRC_MASK_SFT (0x1 << 10) +#define PDN_GENERAL15_ASRC_SFT 9 +#define PDN_GENERAL15_ASRC_MASK 0x1 +#define PDN_GENERAL15_ASRC_MASK_SFT (0x1 << 9) + +/* AUDIO_TOP_CON4 */ +#define PDN_APLL_TUNER1_SFT 13 +#define PDN_APLL_TUNER1_MASK 0x1 +#define PDN_APLL_TUNER1_MASK_SFT (0x1 << 13) +#define PDN_APLL_TUNER2_SFT 12 +#define PDN_APLL_TUNER2_MASK 0x1 +#define PDN_APLL_TUNER2_MASK_SFT (0x1 << 12) +#define CG_H208M_CK_SFT 4 +#define CG_H208M_CK_MASK 0x1 +#define CG_H208M_CK_MASK_SFT (0x1 << 4) +#define CG_APLL2_CK_SFT 3 +#define CG_APLL2_CK_MASK 0x1 +#define CG_APLL2_CK_MASK_SFT (0x1 << 3) +#define CG_APLL1_CK_SFT 2 +#define CG_APLL1_CK_MASK 0x1 +#define CG_APLL1_CK_MASK_SFT (0x1 << 2) +#define CG_AUDIO_F26M_CK_SFT 1 +#define CG_AUDIO_F26M_CK_MASK 0x1 +#define CG_AUDIO_F26M_CK_MASK_SFT (0x1 << 1) +#define CG_AUDIO_HOPPING_CK_SFT 0 +#define CG_AUDIO_HOPPING_CK_MASK 0x1 +#define CG_AUDIO_HOPPING_CK_MASK_SFT (0x1 << 0) + +/* AUDIO_ENGEN_CON0 */ +/* AUDIO_ENGEN_CON0_USER1 */ +/* AUDIO_ENGEN_CON0_USER1 */ +#define MULTI_USER_BYPASS_SFT 17 +#define MULTI_USER_BYPASS_MASK 0x1 +#define MULTI_USER_BYPASS_MASK_SFT (0x1 << 17) +#define MULTI_USER_RST_SFT 16 +#define MULTI_USER_RST_MASK 0x1 +#define MULTI_USER_RST_MASK_SFT (0x1 << 16) +#define AUDIO_F26M_EN_RST_SFT 8 +#define AUDIO_F26M_EN_RST_MASK 0x1 +#define AUDIO_F26M_EN_RST_MASK_SFT (0x1 << 8) +#define AUDIO_APLL2_EN_ON_SFT 3 +#define AUDIO_APLL2_EN_ON_MASK 0x1 +#define AUDIO_APLL2_EN_ON_MASK_SFT (0x1 << 3) +#define AUDIO_APLL1_EN_ON_SFT 2 +#define AUDIO_APLL1_EN_ON_MASK 0x1 +#define AUDIO_APLL1_EN_ON_MASK_SFT (0x1 << 2) +#define AUDIO_F3P25M_EN_ON_SFT 1 +#define AUDIO_F3P25M_EN_ON_MASK 0x1 +#define AUDIO_F3P25M_EN_ON_MASK_SFT (0x1 << 1) +#define AUDIO_26M_EN_ON_SFT 0 +#define AUDIO_26M_EN_ON_MASK 0x1 +#define AUDIO_26M_EN_ON_MASK_SFT (0x1 << 0) + +/* AFE_SINEGEN_CON0 */ +#define DAC_EN_SFT 26 +#define DAC_EN_MASK 0x1 +#define DAC_EN_MASK_SFT (0x1 << 26) +#define TIE_SW_CH2_SFT 25 +#define TIE_SW_CH2_MASK 0x1 +#define TIE_SW_CH2_MASK_SFT (0x1 << 25) +#define TIE_SW_CH1_SFT 24 +#define TIE_SW_CH1_MASK 0x1 +#define TIE_SW_CH1_MASK_SFT (0x1 << 24) +#define AMP_DIV_CH2_SFT 20 +#define AMP_DIV_CH2_MASK 0xf +#define AMP_DIV_CH2_MASK_SFT (0xf << 20) +#define FREQ_DIV_CH2_SFT 12 +#define FREQ_DIV_CH2_MASK 0x1f +#define FREQ_DIV_CH2_MASK_SFT (0x1f << 12) +#define AMP_DIV_CH1_SFT 8 +#define AMP_DIV_CH1_MASK 0xf +#define AMP_DIV_CH1_MASK_SFT (0xf << 8) +#define FREQ_DIV_CH1_SFT 0 +#define FREQ_DIV_CH1_MASK 0x1f +#define FREQ_DIV_CH1_MASK_SFT (0x1f << 0) + +/* AFE_SINEGEN_CON1 */ +#define SINE_DOMAIN_SFT 20 +#define SINE_DOMAIN_MASK 0x7 +#define SINE_DOMAIN_MASK_SFT (0x7 << 20) +#define SINE_MODE_SFT 12 +#define SINE_MODE_MASK 0x1f +#define SINE_MODE_MASK_SFT (0x1f << 12) +#define INNER_LOOP_BACKI_SEL_SFT 8 +#define INNER_LOOP_BACKI_SEL_MASK 0x1 +#define INNER_LOOP_BACKI_SEL_MASK_SFT (0x1 << 8) +#define INNER_LOOP_BACK_MODE_SFT 0 +#define INNER_LOOP_BACK_MODE_MASK 0xff +#define INNER_LOOP_BACK_MODE_MASK_SFT (0xff << 0) + +/* AFE_SINEGEN_CON2 */ +#define TIE_CH1_CONSTANT_SFT 0 +#define TIE_CH1_CONSTANT_MASK 0xffffffff +#define TIE_CH1_CONSTANT_MASK_SFT (0xffffffff << 0) + +/* AFE_SINEGEN_CON3 */ +#define TIE_CH2_CONSTANT_SFT 0 +#define TIE_CH2_CONSTANT_MASK 0xffffffff +#define TIE_CH2_CONSTANT_MASK_SFT (0xffffffff << 0) + +/* AFE_APLL1_TUNER_CFG */ +/* AFE_APLL2_TUNER_CFG */ +#define UPPER_BOUND_SFT 8 +#define UPPER_BOUND_MASK 0xff +#define UPPER_BOUND_MASK_SFT (0xff << 8) +#define APLL_DIV_SFT 4 +#define APLL_DIV_MASK 0xf +#define APLL_DIV_MASK_SFT (0xf << 4) +#define XTAL_EN_128FS_SEL_SFT 1 +#define XTAL_EN_128FS_SEL_MASK 0x3 +#define XTAL_EN_128FS_SEL_MASK_SFT (0x3 << 1) +#define FREQ_TUNER_EN_SFT 0 +#define FREQ_TUNER_EN_MASK 0x1 +#define FREQ_TUNER_EN_MASK_SFT (0x1 << 0) + +/* AFE_APLL1_TUNER_MON0 */ +/* AFE_APLL2_TUNER_MON0 */ +#define TUNER_MON_SFT 0 +#define TUNER_MON_MASK 0xffffffff +#define TUNER_MON_MASK_SFT (0xffffffff << 0) + +/* AUDIO_TOP_RG0 */ +/* AUDIO_TOP_RG1 */ +/* AUDIO_TOP_RG2 */ +/* AUDIO_TOP_RG3 */ +/* AUDIO_TOP_RG4 */ +#define RESERVE_RG_SFT 0 +#define RESERVE_RG_MASK 0xffffffff +#define RESERVE_RG_MASK_SFT (0xffffffff << 0) + +/* AFE_SPM_CONTROL_REQ */ +#define AFE_DDREN_REQ_SFT 4 +#define AFE_DDREN_REQ_MASK 0x1 +#define AFE_DDREN_REQ_MASK_SFT (0x1 << 4) +#define AFE_INFRA_REQ_SFT 3 +#define AFE_INFRA_REQ_MASK 0x1 +#define AFE_INFRA_REQ_MASK_SFT (0x1 << 3) +#define AFE_VRF18_REQ_SFT 2 +#define AFE_VRF18_REQ_MASK 0x1 +#define AFE_VRF18_REQ_MASK_SFT (0x1 << 2) +#define AFE_APSRC_REQ_SFT 1 +#define AFE_APSRC_REQ_MASK 0x1 +#define AFE_APSRC_REQ_MASK_SFT (0x1 << 1) +#define AFE_SRCCLKENA_REQ_SFT 0 +#define AFE_SRCCLKENA_REQ_MASK 0x1 +#define AFE_SRCCLKENA_REQ_MASK_SFT (0x1 << 0) + +/* AFE_SPM_CONTROL_ACK */ +#define SPM_RESOURCE_CONTROL_ACK_SFT 0 +#define SPM_RESOURCE_CONTROL_ACK_MASK 0xffffffff +#define SPM_RESOURCE_CONTROL_ACK_MASK_SFT (0xffffffff << 0) + +/* AUD_TOP_CFG_VCORE_RG */ +#define AUD_TOP_CFG_SFT 0 +#define AUD_TOP_CFG_MASK 0xffffffff +#define AUD_TOP_CFG_MASK_SFT (0xffffffff << 0) + +/* AUDIO_TOP_IP_VERSION */ +#define AUDIO_TOP_IP_VERSION_SFT 0 +#define AUDIO_TOP_IP_VERSION_MASK 0xffffffff +#define AUDIO_TOP_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AUDIO_ENGEN_CON0_MON */ +#define AUDIO_ENGEN_MON_SFT 0 +#define AUDIO_ENGEN_MON_MASK 0xffffffff +#define AUDIO_ENGEN_MON_MASK_SFT (0xffffffff << 0) + +/* AUD_TOP_CFG_VLP_RG */ +#define I2SIN1_DAT_SEL_SFT 31 +#define I2SIN1_DAT_SEL_MASK 0x1 +#define I2SIN1_DAT_SEL_MASK_SFT (0x1 << 31) +#define FMI2S_IN_SEL_SFT 30 +#define FMI2S_IN_SEL_MASK 0x1 +#define FMI2S_IN_SEL_MASK_SFT (0x1 << 30) +#define RG_I2S4_IN_BCK_NEG_EG_LATCH_SFT 21 +#define RG_I2S4_IN_BCK_NEG_EG_LATCH_MASK 0x1 +#define RG_I2S4_IN_BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 21) +#define RG_I2S4_OUT_BCK_NEG_EG_LATCH_SFT 20 +#define RG_I2S4_OUT_BCK_NEG_EG_LATCH_MASK 0x1 +#define RG_I2S4_OUT_BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 20) +#define RG_I2S4_IN_SLV_LRCK_LATCH_EDGE_SFT 19 +#define RG_I2S4_IN_SLV_LRCK_LATCH_EDGE_MASK 0x1 +#define RG_I2S4_IN_SLV_LRCK_LATCH_EDGE_MASK_SFT (0x1 << 19) +#define RG_I2S4_IN_SLV_BCK_INV_SEL_SFT 18 +#define RG_I2S4_IN_SLV_BCK_INV_SEL_MASK 0x1 +#define RG_I2S4_IN_SLV_BCK_INV_SEL_MASK_SFT (0x1 << 18) +#define RG_I2S4_OUT_SLV_LRCK_LATCH_EDGE_SFT 17 +#define RG_I2S4_OUT_SLV_LRCK_LATCH_EDGE_MASK 0x1 +#define RG_I2S4_OUT_SLV_LRCK_LATCH_EDGE_MASK_SFT (0x1 << 17) +#define RG_I2S4_OUT_SLV_BCK_INV_SEL_SFT 16 +#define RG_I2S4_OUT_SLV_BCK_INV_SEL_MASK 0x1 +#define RG_I2S4_OUT_SLV_BCK_INV_SEL_MASK_SFT (0x1 << 16) +#define RG_I2S5_IN_BCK_NEG_EG_LATCH_SFT 13 +#define RG_I2S5_IN_BCK_NEG_EG_LATCH_MASK 0x1 +#define RG_I2S5_IN_BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 13) +#define RG_I2S5_OUT_BCK_NEG_EG_LATCH_SFT 12 +#define RG_I2S5_OUT_BCK_NEG_EG_LATCH_MASK 0x1 +#define RG_I2S5_OUT_BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 12) +#define RG_I2S5_IN_SLV_LRCK_LATCH_EDGE_SFT 11 +#define RG_I2S5_IN_SLV_LRCK_LATCH_EDGE_MASK 0x1 +#define RG_I2S5_IN_SLV_LRCK_LATCH_EDGE_MASK_SFT (0x1 << 11) +#define RG_I2S5_IN_SLV_BCK_INV_SEL_SFT 10 +#define RG_I2S5_IN_SLV_BCK_INV_SEL_MASK 0x1 +#define RG_I2S5_IN_SLV_BCK_INV_SEL_MASK_SFT (0x1 << 10) +#define RG_I2S5_OUT_SLV_LRCK_LATCH_EDGE_SFT 9 +#define RG_I2S5_OUT_SLV_LRCK_LATCH_EDGE_MASK 0x1 +#define RG_I2S5_OUT_SLV_LRCK_LATCH_EDGE_MASK_SFT (0x1 << 9) +#define RG_I2S5_OUT_SLV_BCK_INV_SEL_SFT 8 +#define RG_I2S5_OUT_SLV_BCK_INV_SEL_MASK 0x1 +#define RG_I2S5_OUT_SLV_BCK_INV_SEL_MASK_SFT (0x1 << 8) +#define RG_I2S4_PAD_TOP_CK_EN_SFT 5 +#define RG_I2S4_PAD_TOP_CK_EN_MASK 0x1 +#define RG_I2S4_PAD_TOP_CK_EN_MASK_SFT (0x1 << 5) +#define RG_I2S5_PAD_TOP_CK_EN_SFT 4 +#define RG_I2S5_PAD_TOP_CK_EN_MASK 0x1 +#define RG_I2S5_PAD_TOP_CK_EN_MASK_SFT (0x1 << 4) +#define RG_TEST_TYPE_SFT 2 +#define RG_TEST_TYPE_MASK 0x1 +#define RG_TEST_TYPE_MASK_SFT (0x1 << 2) +#define RG_SW_RESET_SFT 1 +#define RG_SW_RESET_MASK 0x1 +#define RG_SW_RESET_MASK_SFT (0x1 << 1) +#define RG_TEST_ON_SFT 0 +#define RG_TEST_ON_MASK 0x1 +#define RG_TEST_ON_MASK_SFT (0x1 << 0) + +/* AUD_TOP_MON_RG */ +#define AUD_TOP_MON_SFT 0 +#define AUD_TOP_MON_MASK 0xffffffff +#define AUD_TOP_MON_MASK_SFT (0xffffffff << 0) + +/* AUDIO_USE_DEFAULT_DELSEL0 */ +#define USE_DEFAULT_DELSEL_RG_SFT 0 +#define USE_DEFAULT_DELSEL_RG_MASK 0xffffffff +#define USE_DEFAULT_DELSEL_RG_MASK_SFT (0xffffffff << 0) + +/* AUDIO_USE_DEFAULT_DELSEL1 */ +#define USE_DEFAULT_DELSEL_RG_SFT 0 +#define USE_DEFAULT_DELSEL_RG_MASK 0xffffffff +#define USE_DEFAULT_DELSEL_RG_MASK_SFT (0xffffffff << 0) + +/* AUDIO_USE_DEFAULT_DELSEL2 */ +#define USE_DEFAULT_DELSEL_RG_SFT 0 +#define USE_DEFAULT_DELSEL_RG_MASK 0xffffffff +#define USE_DEFAULT_DELSEL_RG_MASK_SFT (0xffffffff << 0) + +/* AFE_CONNSYS_I2S_IPM_VER_MON */ +#define RG_CONNSYS_I2S_IPM_VER_MON_SFT 0 +#define RG_CONNSYS_I2S_IPM_VER_MON_MASK 0xffffffff +#define RG_CONNSYS_I2S_IPM_VER_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_CONNSYS_I2S_MON_SEL */ +#define RG_CONNSYS_I2S_MON_SEL_SFT 0 +#define RG_CONNSYS_I2S_MON_SEL_MASK 0xff +#define RG_CONNSYS_I2S_MON_SEL_MASK_SFT (0xff << 0) + +/* AFE_CONNSYS_I2S_MON */ +#define RG_CONNSYS_I2S_MON_SFT 0 +#define RG_CONNSYS_I2S_MON_MASK 0xffffffff +#define RG_CONNSYS_I2S_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_CONNSYS_I2S_CON */ +#define I2S_SOFT_RST_SFT 31 +#define I2S_SOFT_RST_MASK 0x1 +#define I2S_SOFT_RST_MASK_SFT (0x1 << 31) +#define BCK_NEG_EG_LATCH_SFT 30 +#define BCK_NEG_EG_LATCH_MASK 0x1 +#define BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 30) +#define BCK_INV_SFT 29 +#define BCK_INV_MASK 0x1 +#define BCK_INV_MASK_SFT (0x1 << 29) +#define I2SIN_PAD_SEL_SFT 28 +#define I2SIN_PAD_SEL_MASK 0x1 +#define I2SIN_PAD_SEL_MASK_SFT (0x1 << 28) +#define I2S_LOOPBACK_SFT 20 +#define I2S_LOOPBACK_MASK 0x1 +#define I2S_LOOPBACK_MASK_SFT (0x1 << 20) +#define I2S_HDEN_SFT 12 +#define I2S_HDEN_MASK 0x1 +#define I2S_HDEN_MASK_SFT (0x1 << 12) +#define I2S_MODE_SFT 8 +#define I2S_MODE_MASK 0xf +#define I2S_MODE_MASK_SFT (0xf << 8) +#define I2S_BYPSRC_SFT 6 +#define I2S_BYPSRC_MASK 0x1 +#define I2S_BYPSRC_MASK_SFT (0x1 << 6) +#define INV_LRCK_SFT 5 +#define INV_LRCK_MASK 0x1 +#define INV_LRCK_MASK_SFT (0x1 << 5) +#define I2S_FMT_SFT 3 +#define I2S_FMT_MASK 0x1 +#define I2S_FMT_MASK_SFT (0x1 << 3) +#define I2S_SRC_SFT 2 +#define I2S_SRC_MASK 0x1 +#define I2S_SRC_MASK_SFT (0x1 << 2) +#define I2S_WLEN_SFT 1 +#define I2S_WLEN_MASK 0x1 +#define I2S_WLEN_MASK_SFT (0x1 << 1) +#define I2S_EN_SFT 0 +#define I2S_EN_MASK 0x1 +#define I2S_EN_MASK_SFT (0x1 << 0) + +/* AFE_PCM0_INTF_CON0 */ +#define PCM0_HDEN_SFT 26 +#define PCM0_HDEN_MASK 0x1 +#define PCM0_HDEN_MASK_SFT (0x1 << 26) +#define PCM0_SYNC_DELSEL_SFT 25 +#define PCM0_SYNC_DELSEL_MASK 0x1 +#define PCM0_SYNC_DELSEL_MASK_SFT (0x1 << 25) +#define PCM0_TX_LR_SWAP_SFT 24 +#define PCM0_TX_LR_SWAP_MASK 0x1 +#define PCM0_TX_LR_SWAP_MASK_SFT (0x1 << 24) +#define PCM0_SYNC_OUT_INV_SFT 23 +#define PCM0_SYNC_OUT_INV_MASK 0x1 +#define PCM0_SYNC_OUT_INV_MASK_SFT (0x1 << 23) +#define PCM0_BCLK_OUT_INV_SFT 22 +#define PCM0_BCLK_OUT_INV_MASK 0x1 +#define PCM0_BCLK_OUT_INV_MASK_SFT (0x1 << 22) +#define PCM0_SYNC_IN_INV_SFT 21 +#define PCM0_SYNC_IN_INV_MASK 0x1 +#define PCM0_SYNC_IN_INV_MASK_SFT (0x1 << 21) +#define PCM0_BCLK_IN_INV_SFT 20 +#define PCM0_BCLK_IN_INV_MASK 0x1 +#define PCM0_BCLK_IN_INV_MASK_SFT (0x1 << 20) +#define PCM0_TX_LCH_RPT_SFT 19 +#define PCM0_TX_LCH_RPT_MASK 0x1 +#define PCM0_TX_LCH_RPT_MASK_SFT (0x1 << 19) +#define PCM0_VBT_16K_MODE_SFT 18 +#define PCM0_VBT_16K_MODE_MASK 0x1 +#define PCM0_VBT_16K_MODE_MASK_SFT (0x1 << 18) +#define PCM0_BIT_LENGTH_SFT 16 +#define PCM0_BIT_LENGTH_MASK 0x3 +#define PCM0_BIT_LENGTH_MASK_SFT (0x3 << 16) +#define PCM0_WLEN_SFT 14 +#define PCM0_WLEN_MASK 0x3 +#define PCM0_WLEN_MASK_SFT (0x3 << 14) +#define PCM0_SYNC_LENGTH_SFT 9 +#define PCM0_SYNC_LENGTH_MASK 0x1f +#define PCM0_SYNC_LENGTH_MASK_SFT (0x1f << 9) +#define PCM0_SYNC_TYPE_SFT 8 +#define PCM0_SYNC_TYPE_MASK 0x1 +#define PCM0_SYNC_TYPE_MASK_SFT (0x1 << 8) +#define PCM0_BYP_ASRC_SFT 7 +#define PCM0_BYP_ASRC_MASK 0x1 +#define PCM0_BYP_ASRC_MASK_SFT (0x1 << 7) +#define PCM0_SLAVE_SFT 6 +#define PCM0_SLAVE_MASK 0x1 +#define PCM0_SLAVE_MASK_SFT (0x1 << 6) +#define PCM0_MODE_SFT 3 +#define PCM0_MODE_MASK 0x7 +#define PCM0_MODE_MASK_SFT (0x7 << 3) +#define PCM0_FMT_SFT 1 +#define PCM0_FMT_MASK 0x3 +#define PCM0_FMT_MASK_SFT (0x3 << 1) +#define PCM0_EN_SFT 0 +#define PCM0_EN_MASK 0x1 +#define PCM0_EN_MASK_SFT (0x1 << 0) + +/* AFE_PCM0_INTF_CON1 */ +#define PCM0_TX_RX_LOOPBACK_SFT 31 +#define PCM0_TX_RX_LOOPBACK_MASK 0x1 +#define PCM0_TX_RX_LOOPBACK_MASK_SFT (0x1 << 31) +#define PCM0_BUFFER_LOOPBACK_SFT 30 +#define PCM0_BUFFER_LOOPBACK_MASK 0x1 +#define PCM0_BUFFER_LOOPBACK_MASK_SFT (0x1 << 30) +#define PCM0_PARALLEL_LOOPBACK_SFT 29 +#define PCM0_PARALLEL_LOOPBACK_MASK 0x1 +#define PCM0_PARALLEL_LOOPBACK_MASK_SFT (0x1 << 29) +#define PCM0_SERIAL_LOOPBACK_SFT 28 +#define PCM0_SERIAL_LOOPBACK_MASK 0x1 +#define PCM0_SERIAL_LOOPBACK_MASK_SFT (0x1 << 28) +#define PCM0_DAI_LOOPBACK_SFT 27 +#define PCM0_DAI_LOOPBACK_MASK 0x1 +#define PCM0_DAI_LOOPBACK_MASK_SFT (0x1 << 27) +#define PCM0_I2S_LOOPBACK_SFT 26 +#define PCM0_I2S_LOOPBACK_MASK 0x1 +#define PCM0_I2S_LOOPBACK_MASK_SFT (0x1 << 26) +#define PCM0_1X_EN_DOMAIN_SFT 23 +#define PCM0_1X_EN_DOMAIN_MASK 0x7 +#define PCM0_1X_EN_DOMAIN_MASK_SFT (0x7 << 23) +#define PCM0_1X_EN_MODE_SFT 18 +#define PCM0_1X_EN_MODE_MASK 0x1f +#define PCM0_1X_EN_MODE_MASK_SFT (0x1f << 18) +#define PCM0_TX3_RCH_DBG_MODE_SFT 17 +#define PCM0_TX3_RCH_DBG_MODE_MASK 0x1 +#define PCM0_TX3_RCH_DBG_MODE_MASK_SFT (0x1 << 17) +#define PCM0_PCM1_LOOPBACK_SFT 16 +#define PCM0_PCM1_LOOPBACK_MASK 0x1 +#define PCM0_PCM1_LOOPBACK_MASK_SFT (0x1 << 16) +#define PCM0_LOOPBACK_CH_SEL_SFT 12 +#define PCM0_LOOPBACK_CH_SEL_MASK 0x3 +#define PCM0_LOOPBACK_CH_SEL_MASK_SFT (0x3 << 12) +#define PCM0_BT_MODE_SFT 11 +#define PCM0_BT_MODE_MASK 0x1 +#define PCM0_BT_MODE_MASK_SFT (0x1 << 11) +#define PCM0_EXT_MODEM_SFT 10 +#define PCM0_EXT_MODEM_MASK 0x1 +#define PCM0_EXT_MODEM_MASK_SFT (0x1 << 10) +#define PCM0_USE_MD3_SFT 9 +#define PCM0_USE_MD3_MASK 0x1 +#define PCM0_USE_MD3_MASK_SFT (0x1 << 9) +#define PCM0_FIX_VALUE_SEL_SFT 8 +#define PCM0_FIX_VALUE_SEL_MASK 0x1 +#define PCM0_FIX_VALUE_SEL_MASK_SFT (0x1 << 8) +#define PCM0_TX_FIX_VALUE_SFT 0 +#define PCM0_TX_FIX_VALUE_MASK 0xff +#define PCM0_TX_FIX_VALUE_MASK_SFT (0xff << 0) + +/* AFE_PCM_INTF_MON */ +#define PCM0_TX_FIFO_OV_SFT 5 +#define PCM0_TX_FIFO_OV_MASK 0x1 +#define PCM0_TX_FIFO_OV_MASK_SFT (0x1 << 5) +#define PCM0_RX_FIFO_OV_SFT 4 +#define PCM0_RX_FIFO_OV_MASK 0x1 +#define PCM0_RX_FIFO_OV_MASK_SFT (0x1 << 4) +#define PCM1_TX_FIFO_OV_SFT 3 +#define PCM1_TX_FIFO_OV_MASK 0x1 +#define PCM1_TX_FIFO_OV_MASK_SFT (0x1 << 3) +#define PCM1_RX_FIFO_OV_SFT 2 +#define PCM1_RX_FIFO_OV_MASK 0x1 +#define PCM1_RX_FIFO_OV_MASK_SFT (0x1 << 2) +#define PCM0_SYNC_GLITCH_SFT 1 +#define PCM0_SYNC_GLITCH_MASK 0x1 +#define PCM0_SYNC_GLITCH_MASK_SFT (0x1 << 1) +#define PCM1_SYNC_GLITCH_SFT 0 +#define PCM1_SYNC_GLITCH_MASK 0x1 +#define PCM1_SYNC_GLITCH_MASK_SFT (0x1 << 0) + +/* AFE_PCM1_INTF_CON0 */ +#define PCM1_TX_FIX_VALUE_SFT 24 +#define PCM1_TX_FIX_VALUE_MASK 0xff +#define PCM1_TX_FIX_VALUE_MASK_SFT (0xff << 24) +#define PCM1_FIX_VALUE_SEL_SFT 23 +#define PCM1_FIX_VALUE_SEL_MASK 0x1 +#define PCM1_FIX_VALUE_SEL_MASK_SFT (0x1 << 23) +#define PCM1_BUFFER_LOOPBACK_SFT 22 +#define PCM1_BUFFER_LOOPBACK_MASK 0x1 +#define PCM1_BUFFER_LOOPBACK_MASK_SFT (0x1 << 22) +#define PCM1_PARALLEL_LOOPBACK_SFT 21 +#define PCM1_PARALLEL_LOOPBACK_MASK 0x1 +#define PCM1_PARALLEL_LOOPBACK_MASK_SFT (0x1 << 21) +#define PCM1_SERIAL_LOOPBACK_SFT 20 +#define PCM1_SERIAL_LOOPBACK_MASK 0x1 +#define PCM1_SERIAL_LOOPBACK_MASK_SFT (0x1 << 20) +#define PCM1_DAI_PCM1_LOOPBACK_SFT 19 +#define PCM1_DAI_PCM1_LOOPBACK_MASK 0x1 +#define PCM1_DAI_PCM1_LOOPBACK_MASK_SFT (0x1 << 19) +#define PCM1_I2S_PCM1_LOOPBACK_SFT 18 +#define PCM1_I2S_PCM1_LOOPBACK_MASK 0x1 +#define PCM1_I2S_PCM1_LOOPBACK_MASK_SFT (0x1 << 18) +#define PCM1_SYNC_DELSEL_SFT 17 +#define PCM1_SYNC_DELSEL_MASK 0x1 +#define PCM1_SYNC_DELSEL_MASK_SFT (0x1 << 17) +#define PCM1_TX_LR_SWAP_SFT 16 +#define PCM1_TX_LR_SWAP_MASK 0x1 +#define PCM1_TX_LR_SWAP_MASK_SFT (0x1 << 16) +#define PCM1_SYNC_IN_INV_SFT 15 +#define PCM1_SYNC_IN_INV_MASK 0x1 +#define PCM1_SYNC_IN_INV_MASK_SFT (0x1 << 15) +#define PCM1_BCLK_IN_INV_SFT 14 +#define PCM1_BCLK_IN_INV_MASK 0x1 +#define PCM1_BCLK_IN_INV_MASK_SFT (0x1 << 14) +#define PCM1_TX_LCH_RPT_SFT 13 +#define PCM1_TX_LCH_RPT_MASK 0x1 +#define PCM1_TX_LCH_RPT_MASK_SFT (0x1 << 13) +#define PCM1_VBT_16K_MODE_SFT 12 +#define PCM1_VBT_16K_MODE_MASK 0x1 +#define PCM1_VBT_16K_MODE_MASK_SFT (0x1 << 12) +#define PCM1_LOOPBACK_CH_SEL_SFT 10 +#define PCM1_LOOPBACK_CH_SEL_MASK 0x3 +#define PCM1_LOOPBACK_CH_SEL_MASK_SFT (0x3 << 10) +#define PCM1_TX2_BT_MODE_SFT 8 +#define PCM1_TX2_BT_MODE_MASK 0x1 +#define PCM1_TX2_BT_MODE_MASK_SFT (0x1 << 8) +#define PCM1_BT_MODE_SFT 7 +#define PCM1_BT_MODE_MASK 0x1 +#define PCM1_BT_MODE_MASK_SFT (0x1 << 7) +#define PCM1_AFIFO_SFT 6 +#define PCM1_AFIFO_MASK 0x1 +#define PCM1_AFIFO_MASK_SFT (0x1 << 6) +#define PCM1_WLEN_SFT 5 +#define PCM1_WLEN_MASK 0x1 +#define PCM1_WLEN_MASK_SFT (0x1 << 5) +#define PCM1_MODE_SFT 3 +#define PCM1_MODE_MASK 0x3 +#define PCM1_MODE_MASK_SFT (0x3 << 3) +#define PCM1_FMT_SFT 1 +#define PCM1_FMT_MASK 0x3 +#define PCM1_FMT_MASK_SFT (0x3 << 1) +#define PCM1_EN_SFT 0 +#define PCM1_EN_MASK 0x1 +#define PCM1_EN_MASK_SFT (0x1 << 0) + +/* AFE_PCM1_INTF_CON1 */ +#define PCM1_1X_EN_DOMAIN_SFT 23 +#define PCM1_1X_EN_DOMAIN_MASK 0x7 +#define PCM1_1X_EN_DOMAIN_MASK_SFT (0x7 << 23) +#define PCM1_1X_EN_MODE_SFT 18 +#define PCM1_1X_EN_MODE_MASK 0x1f +#define PCM1_1X_EN_MODE_MASK_SFT (0x1f << 18) + +/* AFE_PCM_TOP_IP_VERSION */ +#define AFE_PCM_TOP_IP_VERSION_SFT 0 +#define AFE_PCM_TOP_IP_VERSION_MASK 0xffffffff +#define AFE_PCM_TOP_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_IRQ_MCU_EN */ +#define AFE_IRQ_MCU_EN_SFT 0 +#define AFE_IRQ_MCU_EN_MASK 0xffffffff +#define AFE_IRQ_MCU_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_IRQ_MCU_DSP_EN */ +#define AFE_IRQ_DSP_EN_SFT 0 +#define AFE_IRQ_DSP_EN_MASK 0xffffffff +#define AFE_IRQ_DSP_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_IRQ_MCU_DSP2_EN */ +#define AFE_IRQ_DSP2_EN_SFT 0 +#define AFE_IRQ_DSP2_EN_MASK 0xffffffff +#define AFE_IRQ_DSP2_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_IRQ_MCU_SCP_EN */ +#define IRQ31_MCU_SCP_EN_SFT 31 +#define IRQ30_MCU_SCP_EN_SFT 30 +#define IRQ29_MCU_SCP_EN_SFT 29 +#define IRQ28_MCU_SCP_EN_SFT 28 +#define IRQ27_MCU_SCP_EN_SFT 27 +#define IRQ26_MCU_SCP_EN_SFT 26 +#define IRQ25_MCU_SCP_EN_SFT 25 +#define IRQ24_MCU_SCP_EN_SFT 24 +#define IRQ23_MCU_SCP_EN_SFT 23 +#define IRQ22_MCU_SCP_EN_SFT 22 +#define IRQ21_MCU_SCP_EN_SFT 21 +#define IRQ20_MCU_SCP_EN_SFT 20 +#define IRQ19_MCU_SCP_EN_SFT 19 +#define IRQ18_MCU_SCP_EN_SFT 18 +#define IRQ17_MCU_SCP_EN_SFT 17 +#define IRQ16_MCU_SCP_EN_SFT 16 +#define IRQ15_MCU_SCP_EN_SFT 15 +#define IRQ14_MCU_SCP_EN_SFT 14 +#define IRQ13_MCU_SCP_EN_SFT 13 +#define IRQ12_MCU_SCP_EN_SFT 12 +#define IRQ11_MCU_SCP_EN_SFT 11 +#define IRQ10_MCU_SCP_EN_SFT 10 +#define IRQ9_MCU_SCP_EN_SFT 9 +#define IRQ8_MCU_SCP_EN_SFT 8 +#define IRQ7_MCU_SCP_EN_SFT 7 +#define IRQ6_MCU_SCP_EN_SFT 6 +#define IRQ5_MCU_SCP_EN_SFT 5 +#define IRQ4_MCU_SCP_EN_SFT 4 +#define IRQ3_MCU_SCP_EN_SFT 3 +#define IRQ2_MCU_SCP_EN_SFT 2 +#define IRQ1_MCU_SCP_EN_SFT 1 +#define IRQ0_MCU_SCP_EN_SFT 0 + +/* AFE_CUSTOM_IRQ_MCU_EN */ +#define AFE_CUSTOM_IRQ_MCU_EN_SFT 0 +#define AFE_CUSTOM_IRQ_MCU_EN_MASK 0xffffffff +#define AFE_CUSTOM_IRQ_MCU_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_CUSTOM_IRQ_MCU_DSP_EN */ +#define AFE_CUSTOM_IRQ_DSP_EN_SFT 0 +#define AFE_CUSTOM_IRQ_DSP_EN_MASK 0xffffffff +#define AFE_CUSTOM_IRQ_DSP_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_CUSTOM_IRQ_MCU_DSP2_EN */ +#define AFE_CUSTOM_IRQ_DSP2_EN_SFT 0 +#define AFE_CUSTOM_IRQ_DSP2_EN_MASK 0xffffffff +#define AFE_CUSTOM_IRQ_DSP2_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_CUSTOM_IRQ_MCU_SCP_EN */ +#define AFE_CUSTOM_IRQ_SCP_EN_SFT 0 +#define AFE_CUSTOM_IRQ_SCP_EN_MASK 0xffffffff +#define AFE_CUSTOM_IRQ_SCP_EN_MASK_SFT (0xffffffff << 0) + +/* AFE_IRQ_MCU_STATUS */ +#define IRQ26_MCU_SFT 26 +#define IRQ26_MCU_MASK 0x1 +#define IRQ26_MCU_MASK_SFT (0x1 << 26) +#define IRQ25_MCU_SFT 25 +#define IRQ25_MCU_MASK 0x1 +#define IRQ25_MCU_MASK_SFT (0x1 << 25) +#define IRQ24_MCU_SFT 24 +#define IRQ24_MCU_MASK 0x1 +#define IRQ24_MCU_MASK_SFT (0x1 << 24) +#define IRQ23_MCU_SFT 23 +#define IRQ23_MCU_MASK 0x1 +#define IRQ23_MCU_MASK_SFT (0x1 << 23) +#define IRQ22_MCU_SFT 22 +#define IRQ22_MCU_MASK 0x1 +#define IRQ22_MCU_MASK_SFT (0x1 << 22) +#define IRQ21_MCU_SFT 21 +#define IRQ21_MCU_MASK 0x1 +#define IRQ21_MCU_MASK_SFT (0x1 << 21) +#define IRQ20_MCU_SFT 20 +#define IRQ20_MCU_MASK 0x1 +#define IRQ20_MCU_MASK_SFT (0x1 << 20) +#define IRQ19_MCU_SFT 19 +#define IRQ19_MCU_MASK 0x1 +#define IRQ19_MCU_MASK_SFT (0x1 << 19) +#define IRQ18_MCU_SFT 18 +#define IRQ18_MCU_MASK 0x1 +#define IRQ18_MCU_MASK_SFT (0x1 << 18) +#define IRQ17_MCU_SFT 17 +#define IRQ17_MCU_MASK 0x1 +#define IRQ17_MCU_MASK_SFT (0x1 << 17) +#define IRQ16_MCU_SFT 16 +#define IRQ16_MCU_MASK 0x1 +#define IRQ16_MCU_MASK_SFT (0x1 << 16) +#define IRQ15_MCU_SFT 15 +#define IRQ15_MCU_MASK 0x1 +#define IRQ15_MCU_MASK_SFT (0x1 << 15) +#define IRQ14_MCU_SFT 14 +#define IRQ14_MCU_MASK 0x1 +#define IRQ14_MCU_MASK_SFT (0x1 << 14) +#define IRQ13_MCU_SFT 13 +#define IRQ13_MCU_MASK 0x1 +#define IRQ13_MCU_MASK_SFT (0x1 << 13) +#define IRQ12_MCU_SFT 12 +#define IRQ12_MCU_MASK 0x1 +#define IRQ12_MCU_MASK_SFT (0x1 << 12) +#define IRQ11_MCU_SFT 11 +#define IRQ11_MCU_MASK 0x1 +#define IRQ11_MCU_MASK_SFT (0x1 << 11) +#define IRQ10_MCU_SFT 10 +#define IRQ10_MCU_MASK 0x1 +#define IRQ10_MCU_MASK_SFT (0x1 << 10) +#define IRQ9_MCU_SFT 9 +#define IRQ9_MCU_MASK 0x1 +#define IRQ9_MCU_MASK_SFT (0x1 << 9) +#define IRQ8_MCU_SFT 8 +#define IRQ8_MCU_MASK 0x1 +#define IRQ8_MCU_MASK_SFT (0x1 << 8) +#define IRQ7_MCU_SFT 7 +#define IRQ7_MCU_MASK 0x1 +#define IRQ7_MCU_MASK_SFT (0x1 << 7) +#define IRQ6_MCU_SFT 6 +#define IRQ6_MCU_MASK 0x1 +#define IRQ6_MCU_MASK_SFT (0x1 << 6) +#define IRQ5_MCU_SFT 5 +#define IRQ5_MCU_MASK 0x1 +#define IRQ5_MCU_MASK_SFT (0x1 << 5) +#define IRQ4_MCU_SFT 4 +#define IRQ4_MCU_MASK 0x1 +#define IRQ4_MCU_MASK_SFT (0x1 << 4) +#define IRQ3_MCU_SFT 3 +#define IRQ3_MCU_MASK 0x1 +#define IRQ3_MCU_MASK_SFT (0x1 << 3) +#define IRQ2_MCU_SFT 2 +#define IRQ2_MCU_MASK 0x1 +#define IRQ2_MCU_MASK_SFT (0x1 << 2) +#define IRQ1_MCU_SFT 1 +#define IRQ1_MCU_MASK 0x1 +#define IRQ1_MCU_MASK_SFT (0x1 << 1) +#define IRQ0_MCU_SFT 0 +#define IRQ0_MCU_MASK 0x1 +#define IRQ0_MCU_MASK_SFT (0x1 << 0) + +/* AFE_CUSTOM_IRQ_MCU_STATUS */ +#define CUSTOM_IRQ21_MCU_SFT 21 +#define CUSTOM_IRQ21_MCU_MASK 0x1 +#define CUSTOM_IRQ21_MCU_MASK_SFT (0x1 << 21) +#define CUSTOM_IRQ20_MCU_SFT 20 +#define CUSTOM_IRQ20_MCU_MASK 0x1 +#define CUSTOM_IRQ20_MCU_MASK_SFT (0x1 << 20) +#define CUSTOM_IRQ19_MCU_SFT 19 +#define CUSTOM_IRQ19_MCU_MASK 0x1 +#define CUSTOM_IRQ19_MCU_MASK_SFT (0x1 << 19) +#define CUSTOM_IRQ18_MCU_SFT 18 +#define CUSTOM_IRQ18_MCU_MASK 0x1 +#define CUSTOM_IRQ18_MCU_MASK_SFT (0x1 << 18) +#define CUSTOM_IRQ17_MCU_SFT 17 +#define CUSTOM_IRQ17_MCU_MASK 0x1 +#define CUSTOM_IRQ17_MCU_MASK_SFT (0x1 << 17) +#define CUSTOM_IRQ16_MCU_SFT 16 +#define CUSTOM_IRQ16_MCU_MASK 0x1 +#define CUSTOM_IRQ16_MCU_MASK_SFT (0x1 << 16) +#define CUSTOM_IRQ9_MCU_SFT 9 +#define CUSTOM_IRQ9_MCU_MASK 0x1 +#define CUSTOM_IRQ9_MCU_MASK_SFT (0x1 << 9) +#define CUSTOM_IRQ8_MCU_SFT 8 +#define CUSTOM_IRQ8_MCU_MASK 0x1 +#define CUSTOM_IRQ8_MCU_MASK_SFT (0x1 << 8) +#define CUSTOM_IRQ7_MCU_SFT 7 +#define CUSTOM_IRQ7_MCU_MASK 0x1 +#define CUSTOM_IRQ7_MCU_MASK_SFT (0x1 << 7) +#define CUSTOM_IRQ6_MCU_SFT 6 +#define CUSTOM_IRQ6_MCU_MASK 0x1 +#define CUSTOM_IRQ6_MCU_MASK_SFT (0x1 << 6) +#define CUSTOM_IRQ5_MCU_SFT 5 +#define CUSTOM_IRQ5_MCU_MASK 0x1 +#define CUSTOM_IRQ5_MCU_MASK_SFT (0x1 << 5) +#define CUSTOM_IRQ4_MCU_SFT 4 +#define CUSTOM_IRQ4_MCU_MASK 0x1 +#define CUSTOM_IRQ4_MCU_MASK_SFT (0x1 << 4) +#define CUSTOM_IRQ3_MCU_SFT 3 +#define CUSTOM_IRQ3_MCU_MASK 0x1 +#define CUSTOM_IRQ3_MCU_MASK_SFT (0x1 << 3) +#define CUSTOM_IRQ2_MCU_SFT 2 +#define CUSTOM_IRQ2_MCU_MASK 0x1 +#define CUSTOM_IRQ2_MCU_MASK_SFT (0x1 << 2) +#define CUSTOM_IRQ1_MCU_SFT 1 +#define CUSTOM_IRQ1_MCU_MASK 0x1 +#define CUSTOM_IRQ1_MCU_MASK_SFT (0x1 << 1) +#define CUSTOM_IRQ0_MCU_SFT 0 +#define CUSTOM_IRQ0_MCU_MASK 0x1 +#define CUSTOM_IRQ0_MCU_MASK_SFT (0x1 << 0) + +/* AFE_IRQ_MCU_CFG */ +#define AFE_IRQ_CLR_CFG_SFT 31 +#define AFE_IRQ_CLR_CFG_MASK 0x1 +#define AFE_IRQ_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ_MCU_CNT_SFT 0 +#define AFE_IRQ_MCU_CNT_MASK 0xffffff +#define AFE_IRQ_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ0_MCU_CFG0 */ +#define AFE_IRQ0_MCU_DOMAIN_SFT 9 +#define AFE_IRQ0_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ0_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ0_MCU_FS_SFT 4 +#define AFE_IRQ0_MCU_FS_MASK 0x1f +#define AFE_IRQ0_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ0_MCU_ON_SFT 0 +#define AFE_IRQ0_MCU_ON_MASK 0x1 +#define AFE_IRQ0_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ0_MCU_CFG1 */ +#define AFE_IRQ0_CLR_CFG_SFT 31 +#define AFE_IRQ0_CLR_CFG_MASK 0x1 +#define AFE_IRQ0_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ0_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ0_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ0_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ0_MCU_CNT_SFT 0 +#define AFE_IRQ0_MCU_CNT_MASK 0xffffff +#define AFE_IRQ0_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ1_MCU_CFG0 */ +#define AFE_IRQ1_MCU_DOMAIN_SFT 9 +#define AFE_IRQ1_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ1_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ1_MCU_FS_SFT 4 +#define AFE_IRQ1_MCU_FS_MASK 0x1f +#define AFE_IRQ1_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ1_MCU_ON_SFT 0 +#define AFE_IRQ1_MCU_ON_MASK 0x1 +#define AFE_IRQ1_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ1_MCU_CFG1 */ +#define AFE_IRQ1_CLR_CFG_SFT 31 +#define AFE_IRQ1_CLR_CFG_MASK 0x1 +#define AFE_IRQ1_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ1_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ1_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ1_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ1_MCU_CNT_SFT 0 +#define AFE_IRQ1_MCU_CNT_MASK 0xffffff +#define AFE_IRQ1_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ2_MCU_CFG0 */ +#define AFE_IRQ2_MCU_DOMAIN_SFT 9 +#define AFE_IRQ2_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ2_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ2_MCU_FS_SFT 4 +#define AFE_IRQ2_MCU_FS_MASK 0x1f +#define AFE_IRQ2_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ2_MCU_ON_SFT 0 +#define AFE_IRQ2_MCU_ON_MASK 0x1 +#define AFE_IRQ2_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ2_MCU_CFG1 */ +#define AFE_IRQ2_CLR_CFG_SFT 31 +#define AFE_IRQ2_CLR_CFG_MASK 0x1 +#define AFE_IRQ2_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ2_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ2_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ2_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ2_MCU_CNT_SFT 0 +#define AFE_IRQ2_MCU_CNT_MASK 0xffffff +#define AFE_IRQ2_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ3_MCU_CFG0 */ +#define AFE_IRQ3_MCU_DOMAIN_SFT 9 +#define AFE_IRQ3_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ3_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ3_MCU_FS_SFT 4 +#define AFE_IRQ3_MCU_FS_MASK 0x1f +#define AFE_IRQ3_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ3_MCU_ON_SFT 0 +#define AFE_IRQ3_MCU_ON_MASK 0x1 +#define AFE_IRQ3_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ3_MCU_CFG1 */ +#define AFE_IRQ3_CLR_CFG_SFT 31 +#define AFE_IRQ3_CLR_CFG_MASK 0x1 +#define AFE_IRQ3_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ3_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ3_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ3_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ3_MCU_CNT_SFT 0 +#define AFE_IRQ3_MCU_CNT_MASK 0xffffff +#define AFE_IRQ3_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ4_MCU_CFG0 */ +#define AFE_IRQ4_MCU_DOMAIN_SFT 9 +#define AFE_IRQ4_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ4_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ4_MCU_FS_SFT 4 +#define AFE_IRQ4_MCU_FS_MASK 0x1f +#define AFE_IRQ4_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ4_MCU_ON_SFT 0 +#define AFE_IRQ4_MCU_ON_MASK 0x1 +#define AFE_IRQ4_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ4_MCU_CFG1 */ +#define AFE_IRQ4_CLR_CFG_SFT 31 +#define AFE_IRQ4_CLR_CFG_MASK 0x1 +#define AFE_IRQ4_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ4_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ4_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ4_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ4_MCU_CNT_SFT 0 +#define AFE_IRQ4_MCU_CNT_MASK 0xffffff +#define AFE_IRQ4_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ5_MCU_CFG0 */ +#define AFE_IRQ5_MCU_DOMAIN_SFT 9 +#define AFE_IRQ5_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ5_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ5_MCU_FS_SFT 4 +#define AFE_IRQ5_MCU_FS_MASK 0x1f +#define AFE_IRQ5_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ5_MCU_ON_SFT 0 +#define AFE_IRQ5_MCU_ON_MASK 0x1 +#define AFE_IRQ5_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ5_MCU_CFG1 */ +#define AFE_IRQ5_CLR_CFG_SFT 31 +#define AFE_IRQ5_CLR_CFG_MASK 0x1 +#define AFE_IRQ5_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ5_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ5_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ5_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ5_MCU_CNT_SFT 0 +#define AFE_IRQ5_MCU_CNT_MASK 0xffffff +#define AFE_IRQ5_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ6_MCU_CFG0 */ +#define AFE_IRQ6_MCU_DOMAIN_SFT 9 +#define AFE_IRQ6_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ6_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ6_MCU_FS_SFT 4 +#define AFE_IRQ6_MCU_FS_MASK 0x1f +#define AFE_IRQ6_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ6_MCU_ON_SFT 0 +#define AFE_IRQ6_MCU_ON_MASK 0x1 +#define AFE_IRQ6_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ6_MCU_CFG1 */ +#define AFE_IRQ6_CLR_CFG_SFT 31 +#define AFE_IRQ6_CLR_CFG_MASK 0x1 +#define AFE_IRQ6_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ6_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ6_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ6_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ6_MCU_CNT_SFT 0 +#define AFE_IRQ6_MCU_CNT_MASK 0xffffff +#define AFE_IRQ6_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ7_MCU_CFG0 */ +#define AFE_IRQ7_MCU_DOMAIN_SFT 9 +#define AFE_IRQ7_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ7_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ7_MCU_FS_SFT 4 +#define AFE_IRQ7_MCU_FS_MASK 0x1f +#define AFE_IRQ7_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ7_MCU_ON_SFT 0 +#define AFE_IRQ7_MCU_ON_MASK 0x1 +#define AFE_IRQ7_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ7_MCU_CFG1 */ +#define AFE_IRQ7_CLR_CFG_SFT 31 +#define AFE_IRQ7_CLR_CFG_MASK 0x1 +#define AFE_IRQ7_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ7_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ7_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ7_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ7_MCU_CNT_SFT 0 +#define AFE_IRQ7_MCU_CNT_MASK 0xffffff +#define AFE_IRQ7_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ8_MCU_CFG0 */ +#define AFE_IRQ8_MCU_DOMAIN_SFT 9 +#define AFE_IRQ8_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ8_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ8_MCU_FS_SFT 4 +#define AFE_IRQ8_MCU_FS_MASK 0x1f +#define AFE_IRQ8_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ8_MCU_ON_SFT 0 +#define AFE_IRQ8_MCU_ON_MASK 0x1 +#define AFE_IRQ8_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ8_MCU_CFG1 */ +#define AFE_IRQ8_CLR_CFG_SFT 31 +#define AFE_IRQ8_CLR_CFG_MASK 0x1 +#define AFE_IRQ8_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ8_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ8_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ8_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ8_MCU_CNT_SFT 0 +#define AFE_IRQ8_MCU_CNT_MASK 0xffffff +#define AFE_IRQ8_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ9_MCU_CFG0 */ +#define AFE_IRQ9_MCU_DOMAIN_SFT 9 +#define AFE_IRQ9_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ9_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ9_MCU_FS_SFT 4 +#define AFE_IRQ9_MCU_FS_MASK 0x1f +#define AFE_IRQ9_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ9_MCU_ON_SFT 0 +#define AFE_IRQ9_MCU_ON_MASK 0x1 +#define AFE_IRQ9_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ9_MCU_CFG1 */ +#define AFE_IRQ9_CLR_CFG_SFT 31 +#define AFE_IRQ9_CLR_CFG_MASK 0x1 +#define AFE_IRQ9_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ9_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ9_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ9_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ9_MCU_CNT_SFT 0 +#define AFE_IRQ9_MCU_CNT_MASK 0xffffff +#define AFE_IRQ9_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ10_MCU_CFG0 */ +#define AFE_IRQ10_MCU_DOMAIN_SFT 9 +#define AFE_IRQ10_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ10_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ10_MCU_FS_SFT 4 +#define AFE_IRQ10_MCU_FS_MASK 0x1f +#define AFE_IRQ10_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ10_MCU_ON_SFT 0 +#define AFE_IRQ10_MCU_ON_MASK 0x1 +#define AFE_IRQ10_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ10_MCU_CFG1 */ +#define AFE_IRQ10_CLR_CFG_SFT 31 +#define AFE_IRQ10_CLR_CFG_MASK 0x1 +#define AFE_IRQ10_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ10_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ10_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ10_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ10_MCU_CNT_SFT 0 +#define AFE_IRQ10_MCU_CNT_MASK 0xffffff +#define AFE_IRQ10_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ11_MCU_CFG0 */ +#define AFE_IRQ11_MCU_DOMAIN_SFT 9 +#define AFE_IRQ11_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ11_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ11_MCU_FS_SFT 4 +#define AFE_IRQ11_MCU_FS_MASK 0x1f +#define AFE_IRQ11_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ11_MCU_ON_SFT 0 +#define AFE_IRQ11_MCU_ON_MASK 0x1 +#define AFE_IRQ11_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ11_MCU_CFG1 */ +#define AFE_IRQ11_CLR_CFG_SFT 31 +#define AFE_IRQ11_CLR_CFG_MASK 0x1 +#define AFE_IRQ11_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ11_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ11_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ11_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ11_MCU_CNT_SFT 0 +#define AFE_IRQ11_MCU_CNT_MASK 0xffffff +#define AFE_IRQ11_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ12_MCU_CFG0 */ +#define AFE_IRQ12_MCU_DOMAIN_SFT 9 +#define AFE_IRQ12_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ12_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ12_MCU_FS_SFT 4 +#define AFE_IRQ12_MCU_FS_MASK 0x1f +#define AFE_IRQ12_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ12_MCU_ON_SFT 0 +#define AFE_IRQ12_MCU_ON_MASK 0x1 +#define AFE_IRQ12_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ12_MCU_CFG1 */ +#define AFE_IRQ12_CLR_CFG_SFT 31 +#define AFE_IRQ12_CLR_CFG_MASK 0x1 +#define AFE_IRQ12_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ12_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ12_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ12_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ12_MCU_CNT_SFT 0 +#define AFE_IRQ12_MCU_CNT_MASK 0xffffff +#define AFE_IRQ12_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ13_MCU_CFG0 */ +#define AFE_IRQ13_MCU_DOMAIN_SFT 9 +#define AFE_IRQ13_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ13_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ13_MCU_FS_SFT 4 +#define AFE_IRQ13_MCU_FS_MASK 0x1f +#define AFE_IRQ13_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ13_MCU_ON_SFT 0 +#define AFE_IRQ13_MCU_ON_MASK 0x1 +#define AFE_IRQ13_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ13_MCU_CFG1 */ +#define AFE_IRQ13_CLR_CFG_SFT 31 +#define AFE_IRQ13_CLR_CFG_MASK 0x1 +#define AFE_IRQ13_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ13_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ13_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ13_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ13_MCU_CNT_SFT 0 +#define AFE_IRQ13_MCU_CNT_MASK 0xffffff +#define AFE_IRQ13_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ14_MCU_CFG0 */ +#define AFE_IRQ14_MCU_DOMAIN_SFT 9 +#define AFE_IRQ14_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ14_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ14_MCU_FS_SFT 4 +#define AFE_IRQ14_MCU_FS_MASK 0x1f +#define AFE_IRQ14_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ14_MCU_ON_SFT 0 +#define AFE_IRQ14_MCU_ON_MASK 0x1 +#define AFE_IRQ14_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ14_MCU_CFG1 */ +#define AFE_IRQ14_CLR_CFG_SFT 31 +#define AFE_IRQ14_CLR_CFG_MASK 0x1 +#define AFE_IRQ14_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ14_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ14_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ14_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ14_MCU_CNT_SFT 0 +#define AFE_IRQ14_MCU_CNT_MASK 0xffffff +#define AFE_IRQ14_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ15_MCU_CFG0 */ +#define AFE_IRQ15_MCU_DOMAIN_SFT 9 +#define AFE_IRQ15_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ15_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ15_MCU_FS_SFT 4 +#define AFE_IRQ15_MCU_FS_MASK 0x1f +#define AFE_IRQ15_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ15_MCU_ON_SFT 0 +#define AFE_IRQ15_MCU_ON_MASK 0x1 +#define AFE_IRQ15_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ15_MCU_CFG1 */ +#define AFE_IRQ15_CLR_CFG_SFT 31 +#define AFE_IRQ15_CLR_CFG_MASK 0x1 +#define AFE_IRQ15_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ15_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ15_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ15_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ15_MCU_CNT_SFT 0 +#define AFE_IRQ15_MCU_CNT_MASK 0xffffff +#define AFE_IRQ15_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ16_MCU_CFG0 */ +#define AFE_IRQ16_MCU_DOMAIN_SFT 9 +#define AFE_IRQ16_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ16_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ16_MCU_FS_SFT 4 +#define AFE_IRQ16_MCU_FS_MASK 0x1f +#define AFE_IRQ16_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ16_MCU_ON_SFT 0 +#define AFE_IRQ16_MCU_ON_MASK 0x1 +#define AFE_IRQ16_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ16_MCU_CFG1 */ +#define AFE_IRQ16_CLR_CFG_SFT 31 +#define AFE_IRQ16_CLR_CFG_MASK 0x1 +#define AFE_IRQ16_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ16_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ16_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ16_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ16_MCU_CNT_SFT 0 +#define AFE_IRQ16_MCU_CNT_MASK 0xffffff +#define AFE_IRQ16_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ17_MCU_CFG0 */ +#define AFE_IRQ17_MCU_DOMAIN_SFT 9 +#define AFE_IRQ17_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ17_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ17_MCU_FS_SFT 4 +#define AFE_IRQ17_MCU_FS_MASK 0x1f +#define AFE_IRQ17_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ17_MCU_ON_SFT 0 +#define AFE_IRQ17_MCU_ON_MASK 0x1 +#define AFE_IRQ17_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ17_MCU_CFG1 */ +#define AFE_IRQ17_CLR_CFG_SFT 31 +#define AFE_IRQ17_CLR_CFG_MASK 0x1 +#define AFE_IRQ17_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ17_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ17_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ17_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ17_MCU_CNT_SFT 0 +#define AFE_IRQ17_MCU_CNT_MASK 0xffffff +#define AFE_IRQ17_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ18_MCU_CFG0 */ +#define AFE_IRQ18_MCU_DOMAIN_SFT 9 +#define AFE_IRQ18_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ18_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ18_MCU_FS_SFT 4 +#define AFE_IRQ18_MCU_FS_MASK 0x1f +#define AFE_IRQ18_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ18_MCU_ON_SFT 0 +#define AFE_IRQ18_MCU_ON_MASK 0x1 +#define AFE_IRQ18_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ18_MCU_CFG1 */ +#define AFE_IRQ18_CLR_CFG_SFT 31 +#define AFE_IRQ18_CLR_CFG_MASK 0x1 +#define AFE_IRQ18_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ18_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ18_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ18_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ18_MCU_CNT_SFT 0 +#define AFE_IRQ18_MCU_CNT_MASK 0xffffff +#define AFE_IRQ18_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ19_MCU_CFG0 */ +#define AFE_IRQ19_MCU_DOMAIN_SFT 9 +#define AFE_IRQ19_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ19_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ19_MCU_FS_SFT 4 +#define AFE_IRQ19_MCU_FS_MASK 0x1f +#define AFE_IRQ19_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ19_MCU_ON_SFT 0 +#define AFE_IRQ19_MCU_ON_MASK 0x1 +#define AFE_IRQ19_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ19_MCU_CFG1 */ +#define AFE_IRQ19_CLR_CFG_SFT 31 +#define AFE_IRQ19_CLR_CFG_MASK 0x1 +#define AFE_IRQ19_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ19_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ19_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ19_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ19_MCU_CNT_SFT 0 +#define AFE_IRQ19_MCU_CNT_MASK 0xffffff +#define AFE_IRQ19_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ20_MCU_CFG0 */ +#define AFE_IRQ20_MCU_DOMAIN_SFT 9 +#define AFE_IRQ20_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ20_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ20_MCU_FS_SFT 4 +#define AFE_IRQ20_MCU_FS_MASK 0x1f +#define AFE_IRQ20_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ20_MCU_ON_SFT 0 +#define AFE_IRQ20_MCU_ON_MASK 0x1 +#define AFE_IRQ20_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ20_MCU_CFG1 */ +#define AFE_IRQ20_CLR_CFG_SFT 31 +#define AFE_IRQ20_CLR_CFG_MASK 0x1 +#define AFE_IRQ20_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ20_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ20_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ20_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ20_MCU_CNT_SFT 0 +#define AFE_IRQ20_MCU_CNT_MASK 0xffffff +#define AFE_IRQ20_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ21_MCU_CFG0 */ +#define AFE_IRQ21_MCU_DOMAIN_SFT 9 +#define AFE_IRQ21_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ21_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ21_MCU_FS_SFT 4 +#define AFE_IRQ21_MCU_FS_MASK 0x1f +#define AFE_IRQ21_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ21_MCU_ON_SFT 0 +#define AFE_IRQ21_MCU_ON_MASK 0x1 +#define AFE_IRQ21_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ21_MCU_CFG1 */ +#define AFE_IRQ21_CLR_CFG_SFT 31 +#define AFE_IRQ21_CLR_CFG_MASK 0x1 +#define AFE_IRQ21_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ21_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ21_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ21_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ21_MCU_CNT_SFT 0 +#define AFE_IRQ21_MCU_CNT_MASK 0xffffff +#define AFE_IRQ21_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ22_MCU_CFG0 */ +#define AFE_IRQ22_MCU_DOMAIN_SFT 9 +#define AFE_IRQ22_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ22_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ22_MCU_FS_SFT 4 +#define AFE_IRQ22_MCU_FS_MASK 0x1f +#define AFE_IRQ22_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ22_MCU_ON_SFT 0 +#define AFE_IRQ22_MCU_ON_MASK 0x1 +#define AFE_IRQ22_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ22_MCU_CFG1 */ +#define AFE_IRQ22_CLR_CFG_SFT 31 +#define AFE_IRQ22_CLR_CFG_MASK 0x1 +#define AFE_IRQ22_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ22_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ22_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ22_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ22_MCU_CNT_SFT 0 +#define AFE_IRQ22_MCU_CNT_MASK 0xffffff +#define AFE_IRQ22_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ23_MCU_CFG0 */ +#define AFE_IRQ23_MCU_DOMAIN_SFT 9 +#define AFE_IRQ23_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ23_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ23_MCU_FS_SFT 4 +#define AFE_IRQ23_MCU_FS_MASK 0x1f +#define AFE_IRQ23_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ23_MCU_ON_SFT 0 +#define AFE_IRQ23_MCU_ON_MASK 0x1 +#define AFE_IRQ23_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ23_MCU_CFG1 */ +#define AFE_IRQ23_CLR_CFG_SFT 31 +#define AFE_IRQ23_CLR_CFG_MASK 0x1 +#define AFE_IRQ23_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ23_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ23_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ23_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ23_MCU_CNT_SFT 0 +#define AFE_IRQ23_MCU_CNT_MASK 0xffffff +#define AFE_IRQ23_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ24_MCU_CFG0 */ +#define AFE_IRQ24_MCU_DOMAIN_SFT 9 +#define AFE_IRQ24_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ24_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ24_MCU_FS_SFT 4 +#define AFE_IRQ24_MCU_FS_MASK 0x1f +#define AFE_IRQ24_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ24_MCU_ON_SFT 0 +#define AFE_IRQ24_MCU_ON_MASK 0x1 +#define AFE_IRQ24_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ24_MCU_CFG1 */ +#define AFE_IRQ24_CLR_CFG_SFT 31 +#define AFE_IRQ24_CLR_CFG_MASK 0x1 +#define AFE_IRQ24_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ24_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ24_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ24_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ24_MCU_CNT_SFT 0 +#define AFE_IRQ24_MCU_CNT_MASK 0xffffff +#define AFE_IRQ24_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ25_MCU_CFG0 */ +#define AFE_IRQ25_MCU_DOMAIN_SFT 9 +#define AFE_IRQ25_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ25_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ25_MCU_FS_SFT 4 +#define AFE_IRQ25_MCU_FS_MASK 0x1f +#define AFE_IRQ25_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ25_MCU_ON_SFT 0 +#define AFE_IRQ25_MCU_ON_MASK 0x1 +#define AFE_IRQ25_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ25_MCU_CFG1 */ +#define AFE_IRQ25_CLR_CFG_SFT 31 +#define AFE_IRQ25_CLR_CFG_MASK 0x1 +#define AFE_IRQ25_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ25_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ25_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ25_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ25_MCU_CNT_SFT 0 +#define AFE_IRQ25_MCU_CNT_MASK 0xffffff +#define AFE_IRQ25_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ26_MCU_CFG0 */ +#define AFE_IRQ26_MCU_DOMAIN_SFT 9 +#define AFE_IRQ26_MCU_DOMAIN_MASK 0x7 +#define AFE_IRQ26_MCU_DOMAIN_MASK_SFT (0x7 << 9) +#define AFE_IRQ26_MCU_FS_SFT 4 +#define AFE_IRQ26_MCU_FS_MASK 0x1f +#define AFE_IRQ26_MCU_FS_MASK_SFT (0x1f << 4) +#define AFE_IRQ26_MCU_ON_SFT 0 +#define AFE_IRQ26_MCU_ON_MASK 0x1 +#define AFE_IRQ26_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ26_MCU_CFG1 */ +#define AFE_IRQ26_CLR_CFG_SFT 31 +#define AFE_IRQ26_CLR_CFG_MASK 0x1 +#define AFE_IRQ26_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_IRQ26_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_IRQ26_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_IRQ26_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_IRQ26_MCU_CNT_SFT 0 +#define AFE_IRQ26_MCU_CNT_MASK 0xffffff +#define AFE_IRQ26_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_CUSTOM_IRQ0_MCU_CFG0 */ +#define AFE_CUSTOM_IRQ0_MCU_ON_SFT 0 +#define AFE_CUSTOM_IRQ0_MCU_ON_MASK 0x1 +#define AFE_CUSTOM_IRQ0_MCU_ON_MASK_SFT (0x1 << 0) + +/* AFE_IRQ_MCU_MON0 */ +#define AFE_IRQ26_MISS_FLAG_SFT 26 +#define AFE_IRQ26_MISS_FLAG_MASK 0x1 +#define AFE_IRQ26_MISS_FLAG_MASK_SFT (0x1 << 26) +#define AFE_IRQ25_MISS_FLAG_SFT 25 +#define AFE_IRQ25_MISS_FLAG_MASK 0x1 +#define AFE_IRQ25_MISS_FLAG_MASK_SFT (0x1 << 25) +#define AFE_IRQ24_MISS_FLAG_SFT 24 +#define AFE_IRQ24_MISS_FLAG_MASK 0x1 +#define AFE_IRQ24_MISS_FLAG_MASK_SFT (0x1 << 24) +#define AFE_IRQ23_MISS_FLAG_SFT 23 +#define AFE_IRQ23_MISS_FLAG_MASK 0x1 +#define AFE_IRQ23_MISS_FLAG_MASK_SFT (0x1 << 23) +#define AFE_IRQ22_MISS_FLAG_SFT 22 +#define AFE_IRQ22_MISS_FLAG_MASK 0x1 +#define AFE_IRQ22_MISS_FLAG_MASK_SFT (0x1 << 22) +#define AFE_IRQ21_MISS_FLAG_SFT 21 +#define AFE_IRQ21_MISS_FLAG_MASK 0x1 +#define AFE_IRQ21_MISS_FLAG_MASK_SFT (0x1 << 21) +#define AFE_IRQ20_MISS_FLAG_SFT 20 +#define AFE_IRQ20_MISS_FLAG_MASK 0x1 +#define AFE_IRQ20_MISS_FLAG_MASK_SFT (0x1 << 20) +#define AFE_IRQ19_MISS_FLAG_SFT 19 +#define AFE_IRQ19_MISS_FLAG_MASK 0x1 +#define AFE_IRQ19_MISS_FLAG_MASK_SFT (0x1 << 19) +#define AFE_IRQ18_MISS_FLAG_SFT 18 +#define AFE_IRQ18_MISS_FLAG_MASK 0x1 +#define AFE_IRQ18_MISS_FLAG_MASK_SFT (0x1 << 18) +#define AFE_IRQ17_MISS_FLAG_SFT 17 +#define AFE_IRQ17_MISS_FLAG_MASK 0x1 +#define AFE_IRQ17_MISS_FLAG_MASK_SFT (0x1 << 17) +#define AFE_IRQ16_MISS_FLAG_SFT 16 +#define AFE_IRQ16_MISS_FLAG_MASK 0x1 +#define AFE_IRQ16_MISS_FLAG_MASK_SFT (0x1 << 16) +#define AFE_IRQ15_MISS_FLAG_SFT 15 +#define AFE_IRQ15_MISS_FLAG_MASK 0x1 +#define AFE_IRQ15_MISS_FLAG_MASK_SFT (0x1 << 15) +#define AFE_IRQ14_MISS_FLAG_SFT 14 +#define AFE_IRQ14_MISS_FLAG_MASK 0x1 +#define AFE_IRQ14_MISS_FLAG_MASK_SFT (0x1 << 14) +#define AFE_IRQ13_MISS_FLAG_SFT 13 +#define AFE_IRQ13_MISS_FLAG_MASK 0x1 +#define AFE_IRQ13_MISS_FLAG_MASK_SFT (0x1 << 13) +#define AFE_IRQ12_MISS_FLAG_SFT 12 +#define AFE_IRQ12_MISS_FLAG_MASK 0x1 +#define AFE_IRQ12_MISS_FLAG_MASK_SFT (0x1 << 12) +#define AFE_IRQ11_MISS_FLAG_SFT 11 +#define AFE_IRQ11_MISS_FLAG_MASK 0x1 +#define AFE_IRQ11_MISS_FLAG_MASK_SFT (0x1 << 11) +#define AFE_IRQ10_MISS_FLAG_SFT 10 +#define AFE_IRQ10_MISS_FLAG_MASK 0x1 +#define AFE_IRQ10_MISS_FLAG_MASK_SFT (0x1 << 10) +#define AFE_IRQ9_MISS_FLAG_SFT 9 +#define AFE_IRQ9_MISS_FLAG_MASK 0x1 +#define AFE_IRQ9_MISS_FLAG_MASK_SFT (0x1 << 9) +#define AFE_IRQ8_MISS_FLAG_SFT 8 +#define AFE_IRQ8_MISS_FLAG_MASK 0x1 +#define AFE_IRQ8_MISS_FLAG_MASK_SFT (0x1 << 8) +#define AFE_IRQ7_MISS_FLAG_SFT 7 +#define AFE_IRQ7_MISS_FLAG_MASK 0x1 +#define AFE_IRQ7_MISS_FLAG_MASK_SFT (0x1 << 7) +#define AFE_IRQ6_MISS_FLAG_SFT 6 +#define AFE_IRQ6_MISS_FLAG_MASK 0x1 +#define AFE_IRQ6_MISS_FLAG_MASK_SFT (0x1 << 6) +#define AFE_IRQ5_MISS_FLAG_SFT 5 +#define AFE_IRQ5_MISS_FLAG_MASK 0x1 +#define AFE_IRQ5_MISS_FLAG_MASK_SFT (0x1 << 5) +#define AFE_IRQ4_MISS_FLAG_SFT 4 +#define AFE_IRQ4_MISS_FLAG_MASK 0x1 +#define AFE_IRQ4_MISS_FLAG_MASK_SFT (0x1 << 4) +#define AFE_IRQ3_MISS_FLAG_SFT 3 +#define AFE_IRQ3_MISS_FLAG_MASK 0x1 +#define AFE_IRQ3_MISS_FLAG_MASK_SFT (0x1 << 3) +#define AFE_IRQ2_MISS_FLAG_SFT 2 +#define AFE_IRQ2_MISS_FLAG_MASK 0x1 +#define AFE_IRQ2_MISS_FLAG_MASK_SFT (0x1 << 2) +#define AFE_IRQ1_MISS_FLAG_SFT 1 +#define AFE_IRQ1_MISS_FLAG_MASK 0x1 +#define AFE_IRQ1_MISS_FLAG_MASK_SFT (0x1 << 1) +#define AFE_IRQ0_MISS_FLAG_SFT 0 +#define AFE_IRQ0_MISS_FLAG_MASK 0x1 +#define AFE_IRQ0_MISS_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_IRQ_MCU_MON1 */ +#define AFE_CUSTOM_IRQ21_MISS_FLAG_SFT 21 +#define AFE_CUSTOM_IRQ21_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ21_MISS_FLAG_MASK_SFT (0x1 << 21) +#define AFE_CUSTOM_IRQ20_MISS_FLAG_SFT 20 +#define AFE_CUSTOM_IRQ20_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ20_MISS_FLAG_MASK_SFT (0x1 << 20) +#define AFE_CUSTOM_IRQ19_MISS_FLAG_SFT 19 +#define AFE_CUSTOM_IRQ19_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ19_MISS_FLAG_MASK_SFT (0x1 << 19) +#define AFE_CUSTOM_IRQ18_MISS_FLAG_SFT 18 +#define AFE_CUSTOM_IRQ18_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ18_MISS_FLAG_MASK_SFT (0x1 << 18) +#define AFE_CUSTOM_IRQ17_MISS_FLAG_SFT 17 +#define AFE_CUSTOM_IRQ17_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ17_MISS_FLAG_MASK_SFT (0x1 << 17) +#define AFE_CUSTOM_IRQ16_MISS_FLAG_SFT 16 +#define AFE_CUSTOM_IRQ16_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ16_MISS_FLAG_MASK_SFT (0x1 << 16) +#define AFE_CUSTOM_IRQ9_MISS_FLAG_SFT 9 +#define AFE_CUSTOM_IRQ9_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ9_MISS_FLAG_MASK_SFT (0x1 << 9) +#define AFE_CUSTOM_IRQ8_MISS_FLAG_SFT 8 +#define AFE_CUSTOM_IRQ8_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ8_MISS_FLAG_MASK_SFT (0x1 << 8) +#define AFE_CUSTOM_IRQ7_MISS_FLAG_SFT 7 +#define AFE_CUSTOM_IRQ7_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ7_MISS_FLAG_MASK_SFT (0x1 << 7) +#define AFE_CUSTOM_IRQ6_MISS_FLAG_SFT 6 +#define AFE_CUSTOM_IRQ6_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ6_MISS_FLAG_MASK_SFT (0x1 << 6) +#define AFE_CUSTOM_IRQ5_MISS_FLAG_SFT 5 +#define AFE_CUSTOM_IRQ5_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ5_MISS_FLAG_MASK_SFT (0x1 << 5) +#define AFE_CUSTOM_IRQ4_MISS_FLAG_SFT 4 +#define AFE_CUSTOM_IRQ4_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ4_MISS_FLAG_MASK_SFT (0x1 << 4) +#define AFE_CUSTOM_IRQ3_MISS_FLAG_SFT 3 +#define AFE_CUSTOM_IRQ3_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ3_MISS_FLAG_MASK_SFT (0x1 << 3) +#define AFE_CUSTOM_IRQ2_MISS_FLAG_SFT 2 +#define AFE_CUSTOM_IRQ2_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ2_MISS_FLAG_MASK_SFT (0x1 << 2) +#define AFE_CUSTOM_IRQ1_MISS_FLAG_SFT 1 +#define AFE_CUSTOM_IRQ1_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ1_MISS_FLAG_MASK_SFT (0x1 << 1) +#define AFE_CUSTOM_IRQ0_MISS_FLAG_SFT 0 +#define AFE_CUSTOM_IRQ0_MISS_FLAG_MASK 0x1 +#define AFE_CUSTOM_IRQ0_MISS_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_IRQ_MCU_MON2 */ +#define AFE_IRQ_B_R_CNT_SFT 8 +#define AFE_IRQ_B_R_CNT_MASK 0xff +#define AFE_IRQ_B_R_CNT_MASK_SFT (0xff << 8) +#define AFE_IRQ_B_F_CNT_SFT 0 +#define AFE_IRQ_B_F_CNT_MASK 0xff +#define AFE_IRQ_B_F_CNT_MASK_SFT (0xff << 0) + +/* AFE_IRQ0_CNT_MON */ +#define AFE_IRQ0_CNT_MON_SFT 0 +#define AFE_IRQ0_CNT_MON_MASK 0xffffff +#define AFE_IRQ0_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ1_CNT_MON */ +#define AFE_IRQ1_CNT_MON_SFT 0 +#define AFE_IRQ1_CNT_MON_MASK 0xffffff +#define AFE_IRQ1_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ2_CNT_MON */ +#define AFE_IRQ2_CNT_MON_SFT 0 +#define AFE_IRQ2_CNT_MON_MASK 0xffffff +#define AFE_IRQ2_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ3_CNT_MON */ +#define AFE_IRQ3_CNT_MON_SFT 0 +#define AFE_IRQ3_CNT_MON_MASK 0xffffff +#define AFE_IRQ3_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ4_CNT_MON */ +#define AFE_IRQ4_CNT_MON_SFT 0 +#define AFE_IRQ4_CNT_MON_MASK 0xffffff +#define AFE_IRQ4_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ5_CNT_MON */ +#define AFE_IRQ5_CNT_MON_SFT 0 +#define AFE_IRQ5_CNT_MON_MASK 0xffffff +#define AFE_IRQ5_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ6_CNT_MON */ +#define AFE_IRQ6_CNT_MON_SFT 0 +#define AFE_IRQ6_CNT_MON_MASK 0xffffff +#define AFE_IRQ6_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ7_CNT_MON */ +#define AFE_IRQ7_CNT_MON_SFT 0 +#define AFE_IRQ7_CNT_MON_MASK 0xffffff +#define AFE_IRQ7_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ8_CNT_MON */ +#define AFE_IRQ8_CNT_MON_SFT 0 +#define AFE_IRQ8_CNT_MON_MASK 0xffffff +#define AFE_IRQ8_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ9_CNT_MON */ +#define AFE_IRQ9_CNT_MON_SFT 0 +#define AFE_IRQ9_CNT_MON_MASK 0xffffff +#define AFE_IRQ9_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ10_CNT_MON */ +#define AFE_IRQ10_CNT_MON_SFT 0 +#define AFE_IRQ10_CNT_MON_MASK 0xffffff +#define AFE_IRQ10_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ11_CNT_MON */ +#define AFE_IRQ11_CNT_MON_SFT 0 +#define AFE_IRQ11_CNT_MON_MASK 0xffffff +#define AFE_IRQ11_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ12_CNT_MON */ +#define AFE_IRQ12_CNT_MON_SFT 0 +#define AFE_IRQ12_CNT_MON_MASK 0xffffff +#define AFE_IRQ12_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ13_CNT_MON */ +#define AFE_IRQ13_CNT_MON_SFT 0 +#define AFE_IRQ13_CNT_MON_MASK 0xffffff +#define AFE_IRQ13_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ14_CNT_MON */ +#define AFE_IRQ14_CNT_MON_SFT 0 +#define AFE_IRQ14_CNT_MON_MASK 0xffffff +#define AFE_IRQ14_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ15_CNT_MON */ +#define AFE_IRQ15_CNT_MON_SFT 0 +#define AFE_IRQ15_CNT_MON_MASK 0xffffff +#define AFE_IRQ15_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ16_CNT_MON */ +#define AFE_IRQ16_CNT_MON_SFT 0 +#define AFE_IRQ16_CNT_MON_MASK 0xffffff +#define AFE_IRQ16_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ17_CNT_MON */ +#define AFE_IRQ17_CNT_MON_SFT 0 +#define AFE_IRQ17_CNT_MON_MASK 0xffffff +#define AFE_IRQ17_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ18_CNT_MON */ +#define AFE_IRQ18_CNT_MON_SFT 0 +#define AFE_IRQ18_CNT_MON_MASK 0xffffff +#define AFE_IRQ18_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ19_CNT_MON */ +#define AFE_IRQ19_CNT_MON_SFT 0 +#define AFE_IRQ19_CNT_MON_MASK 0xffffff +#define AFE_IRQ19_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ20_CNT_MON */ +#define AFE_IRQ20_CNT_MON_SFT 0 +#define AFE_IRQ20_CNT_MON_MASK 0xffffff +#define AFE_IRQ20_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ21_CNT_MON */ +#define AFE_IRQ21_CNT_MON_SFT 0 +#define AFE_IRQ21_CNT_MON_MASK 0xffffff +#define AFE_IRQ21_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ22_CNT_MON */ +#define AFE_IRQ22_CNT_MON_SFT 0 +#define AFE_IRQ22_CNT_MON_MASK 0xffffff +#define AFE_IRQ22_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ23_CNT_MON */ +#define AFE_IRQ23_CNT_MON_SFT 0 +#define AFE_IRQ23_CNT_MON_MASK 0xffffff +#define AFE_IRQ23_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ24_CNT_MON */ +#define AFE_IRQ24_CNT_MON_SFT 0 +#define AFE_IRQ24_CNT_MON_MASK 0xffffff +#define AFE_IRQ24_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ25_CNT_MON */ +#define AFE_IRQ25_CNT_MON_SFT 0 +#define AFE_IRQ25_CNT_MON_MASK 0xffffff +#define AFE_IRQ25_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_IRQ26_CNT_MON */ +#define AFE_IRQ26_CNT_MON_SFT 0 +#define AFE_IRQ26_CNT_MON_MASK 0xffffff +#define AFE_IRQ26_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_CUSTOM_IRQ0_CNT_MON */ +#define AFE_CUSTOM_IRQ0_CNT_MON_SFT 0 +#define AFE_CUSTOM_IRQ0_CNT_MON_MASK 0xffffff +#define AFE_CUSTOM_IRQ0_CNT_MON_MASK_SFT (0xffffff << 0) + +/* AFE_CUSTOM_IRQ0_MCU_CFG1 */ +#define AFE_CUSTOM_IRQ0_CLR_CFG_SFT 31 +#define AFE_CUSTOM_IRQ0_CLR_CFG_MASK 0x1 +#define AFE_CUSTOM_IRQ0_CLR_CFG_MASK_SFT (0x1 << 31) +#define AFE_CUSTOM_IRQ0_MISS_FLAG_CLR_CFG_SFT 30 +#define AFE_CUSTOM_IRQ0_MISS_FLAG_CLR_CFG_MASK 0x1 +#define AFE_CUSTOM_IRQ0_MISS_FLAG_CLR_CFG_MASK_SFT (0x1 << 30) +#define AFE_CUSTOM_IRQ0_MCU_CNT_SFT 0 +#define AFE_CUSTOM_IRQ0_MCU_CNT_MASK 0xffffff +#define AFE_CUSTOM_IRQ0_MCU_CNT_MASK_SFT (0xffffff << 0) + +/* AFE_GAIN0_CON1_R */ +/* AFE_GAIN1_CON1_R */ +/* AFE_GAIN2_CON1_R */ +/* AFE_GAIN3_CON1_R */ +#define GAIN_TARGET_R_SFT 0 +#define GAIN_TARGET_R_MASK 0xffffffff +#define GAIN_TARGET_R_MASK_SFT (0xffffffff << 0) + +/* AFE_GAIN0_CON1_L */ +/* AFE_GAIN1_CON1_L */ +/* AFE_GAIN2_CON1_L */ +/* AFE_GAIN3_CON1_L */ +#define GAIN_TARGET_L_SFT 0 +#define GAIN_TARGET_L_MASK 0xffffffff +#define GAIN_TARGET_L_MASK_SFT (0xffffffff << 0) + +/* AFE_GAIN0_CON2 */ +#define GAIN0_DOWN_STEP_SFT 0 +#define GAIN0_DOWN_STEP_MASK 0x3fffff +#define GAIN0_DOWN_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN0_CON3 */ +#define GAIN0_UP_STEP_SFT 0 +#define GAIN0_UP_STEP_MASK 0x3fffff +#define GAIN0_UP_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN0_CUR_R */ +/* AFE_GAIN1_CUR_R */ +/* AFE_GAIN2_CUR_R */ +/* AFE_GAIN3_CUR_R */ +#define AFE_GAIN_CUR_R_SFT 0 +#define AFE_GAIN_CUR_R_MASK 0xffffffff +#define AFE_GAIN_CUR_R_MASK_SFT (0xffffffff << 0) + +/* AFE_GAIN0_CUR_L */ +/* AFE_GAIN1_CUR_L */ +/* AFE_GAIN2_CUR_L */ +/* AFE_GAIN3_CUR_L */ +#define AFE_GAIN_CUR_L_SFT 0 +#define AFE_GAIN_CUR_L_MASK 0xffffffff +#define AFE_GAIN_CUR_L_MASK_SFT (0xffffffff << 0) + +/* AFE_GAIN0_CON0 */ +/* AFE_GAIN1_CON0 */ +/* AFE_GAIN2_CON0 */ +/* AFE_GAIN3_CON0 */ +#define GAIN_TARGET_SYNC_ON_SFT 24 +#define GAIN_TARGET_SYNC_ON_MASK 0x1 +#define GAIN_TARGET_SYNC_ON_MASK_SFT (0x1 << 24) +#define GAIN_TIMEOUT_SFT 18 +#define GAIN_TIMEOUT_MASK 0x3f +#define GAIN_TIMEOUT_MASK_SFT (0x3f << 18) +#define GAIN_TRIG_SFT 17 +#define GAIN_TRIG_MASK 0x1 +#define GAIN_TRIG_MASK_SFT (0x1 << 17) +#define GAIN_ON_SFT 16 +#define GAIN_ON_MASK 0x1 +#define GAIN_ON_MASK_SFT (0x1 << 16) +#define GAIN_SAMPLE_PER_STEP_SFT 8 +#define GAIN_SAMPLE_PER_STEP_MASK 0xff +#define GAIN_SAMPLE_PER_STEP_MASK_SFT (0xff << 8) +#define GAIN_SEL_DOMAIN_SFT 5 +#define GAIN_SEL_DOMAIN_MASK 0x7 +#define GAIN_SEL_DOMAIN_MASK_SFT (0x7 << 5) +#define GAIN_SEL_FS_SFT 0 +#define GAIN_SEL_FS_MASK 0x1f +#define GAIN_SEL_FS_MASK_SFT (0x1f << 0) + +/* AFE_GAIN1_CON2 */ +#define GAIN1_DOWN_STEP_SFT 0 +#define GAIN1_DOWN_STEP_MASK 0x3fffff +#define GAIN1_DOWN_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN1_CON3 */ +#define GAIN1_UP_STEP_SFT 0 +#define GAIN1_UP_STEP_MASK 0x3fffff +#define GAIN1_UP_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN2_CON2 */ +#define GAIN2_DOWN_STEP_SFT 0 +#define GAIN2_DOWN_STEP_MASK 0x3fffff +#define GAIN2_DOWN_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN2_CON3 */ +#define GAIN2_UP_STEP_SFT 0 +#define GAIN2_UP_STEP_MASK 0x3fffff +#define GAIN2_UP_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN3_CON2 */ +#define GAIN3_DOWN_STEP_SFT 0 +#define GAIN3_DOWN_STEP_MASK 0x3fffff +#define GAIN3_DOWN_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_GAIN3_CON3 */ +#define GAIN3_UP_STEP_SFT 0 +#define GAIN3_UP_STEP_MASK 0x3fffff +#define GAIN3_UP_STEP_MASK_SFT (0x3fffff << 0) + +/* AFE_STF_CON0 */ +#define SLT_CNT_FLAG_RESET_SFT 28 +#define SLT_CNT_FLAG_RESET_MASK 0x1 +#define SLT_CNT_FLAG_RESET_MASK_SFT (0x1 << 28) +#define SLT_CNT_THD_SFT 16 +#define SLT_CNT_THD_MASK 0xfff +#define SLT_CNT_THD_MASK_SFT (0xfff << 16) +#define SIDE_TONE_HALF_TAP_NUM_SFT 4 +#define SIDE_TONE_HALF_TAP_NUM_MASK 0x7f +#define SIDE_TONE_HALF_TAP_NUM_MASK_SFT (0x7f << 4) +#define SIDE_TONE_ODD_MODE_SFT 1 +#define SIDE_TONE_ODD_MODE_MASK 0x1 +#define SIDE_TONE_ODD_MODE_MASK_SFT (0x1 << 1) +#define SIDE_TONE_ON_SFT 0 +#define SIDE_TONE_ON_MASK 0x1 +#define SIDE_TONE_ON_MASK_SFT (0x1 << 0) + +/* AFE_STF_CON1 */ +#define SIDE_TONE_IN_EN_SEL_DOMAIN_SFT 5 +#define SIDE_TONE_IN_EN_SEL_DOMAIN_MASK 0x7 +#define SIDE_TONE_IN_EN_SEL_DOMAIN_MASK_SFT (0x7 << 5) +#define SIDE_TONE_IN_EN_SEL_FS_SFT 0 +#define SIDE_TONE_IN_EN_SEL_FS_MASK 0x1f +#define SIDE_TONE_IN_EN_SEL_FS_MASK_SFT (0x1f << 0) + +/* AFE_STF_COEFF */ +#define SIDE_TONE_COEFFICIENT_R_W_SEL_SFT 24 +#define SIDE_TONE_COEFFICIENT_R_W_SEL_MASK 0x1 +#define SIDE_TONE_COEFFICIENT_R_W_SEL_MASK_SFT (0x1 << 24) +#define SIDE_TONE_COEFFICIENT_ADDR_SFT 16 +#define SIDE_TONE_COEFFICIENT_ADDR_MASK 0x1f +#define SIDE_TONE_COEFFICIENT_ADDR_MASK_SFT (0x1f << 16) +#define SIDE_TONE_COEFFICIENT_SFT 0 +#define SIDE_TONE_COEFFICIENT_MASK 0xffff +#define SIDE_TONE_COEFFICIENT_MASK_SFT (0xffff << 0) + +/* AFE_STF_GAIN */ +#define SIDE_TONE_POSITIVE_GAIN_SFT 16 +#define SIDE_TONE_POSITIVE_GAIN_MASK 0x7 +#define SIDE_TONE_POSITIVE_GAIN_MASK_SFT (0x7 << 16) +#define SIDE_TONE_GAIN_SFT 0 +#define SIDE_TONE_GAIN_MASK 0xffff +#define SIDE_TONE_GAIN_MASK_SFT (0xffff << 0) + +/* AFE_STF_MON */ +#define SIDE_TONE_R_RDY_SFT 30 +#define SIDE_TONE_R_RDY_MASK 0x1 +#define SIDE_TONE_R_RDY_MASK_SFT (0x1 << 30) +#define SIDE_TONE_W_RDY_SFT 29 +#define SIDE_TONE_W_RDY_MASK 0x1 +#define SIDE_TONE_W_RDY_MASK_SFT (0x1 << 29) +#define SLT_CNT_FLAG_SFT 28 +#define SLT_CNT_FLAG_MASK 0x1 +#define SLT_CNT_FLAG_MASK_SFT (0x1 << 28) +#define SLT_CNT_SFT 16 +#define SLT_CNT_MASK 0xfff +#define SLT_CNT_MASK_SFT (0xfff << 16) +#define SIDE_TONE_COEFF_SFT 0 +#define SIDE_TONE_COEFF_MASK 0xffff +#define SIDE_TONE_COEFF_MASK_SFT (0xffff << 0) + +/* AFE_STF_IP_VERSION */ +#define SIDE_TONE_IP_VERSION_SFT 0 +#define SIDE_TONE_IP_VERSION_MASK 0xffffffff +#define SIDE_TONE_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_CM_REG */ +#define AFE_CM_UPDATE_CNT_SFT 16 +#define AFE_CM_UPDATE_CNT_MASK 0x7fff +#define AFE_CM_UPDATE_CNT_MASK_SFT (0x7fff << 16) +#define AFE_CM_1X_EN_SEL_FS_SFT 8 +#define AFE_CM_1X_EN_SEL_FS_MASK 0x1f +#define AFE_CM_1X_EN_SEL_FS_MASK_SFT (0x1f << 8) +#define AFE_CM_CH_NUM_SFT 2 +#define AFE_CM_CH_NUM_MASK 0x1f +#define AFE_CM_CH_NUM_MASK_SFT (0x1f << 2) +#define AFE_CM_BYTE_SWAP_SFT 1 +#define AFE_CM_BYTE_SWAP_MASK 0x1 +#define AFE_CM_BYTE_SWAP_MASK_SFT (0x1 << 1) +#define AFE_CM_BYPASS_MODE_SFT 31 +#define AFE_CM_BYPASS_MODE_MASK 0x1 +#define AFE_CM_BYPASS_MODE_MASK_SFT (0x1 << 31) + +/* AFE_CM0_CON0 */ +#define AFE_CM0_BYPASS_MODE_SFT 31 +#define AFE_CM0_BYPASS_MODE_MASK 0x1 +#define AFE_CM0_BYPASS_MODE_MASK_SFT (0x1 << 31) +#define AFE_CM0_UPDATE_CNT_SFT 16 +#define AFE_CM0_UPDATE_CNT_MASK 0x7fff +#define AFE_CM0_UPDATE_CNT_MASK_SFT (0x7fff << 16) +#define AFE_CM0_1X_EN_SEL_DOMAIN_SFT 13 +#define AFE_CM0_1X_EN_SEL_DOMAIN_MASK 0x7 +#define AFE_CM0_1X_EN_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define AFE_CM0_1X_EN_SEL_FS_SFT 8 +#define AFE_CM0_1X_EN_SEL_FS_MASK 0x1f +#define AFE_CM0_1X_EN_SEL_FS_MASK_SFT (0x1f << 8) +#define AFE_CM0_OUTPUT_MUX_SFT 7 +#define AFE_CM0_OUTPUT_MUX_MASK 0x1 +#define AFE_CM0_OUTPUT_MUX_MASK_SFT (0x1 << 7) +#define AFE_CM0_CH_NUM_SFT 2 +#define AFE_CM0_CH_NUM_MASK 0x1f +#define AFE_CM0_CH_NUM_MASK_SFT (0x1f << 2) +#define AFE_CM0_BYTE_SWAP_SFT 1 +#define AFE_CM0_BYTE_SWAP_MASK 0x1 +#define AFE_CM0_BYTE_SWAP_MASK_SFT (0x1 << 1) +#define AFE_CM0_ON_SFT 0 +#define AFE_CM0_ON_MASK 0x1 +#define AFE_CM0_ON_MASK_SFT (0x1 << 0) + +/* AFE_CM0_MON */ +#define AFE_CM0_BYPASS_MODE_MON_SFT 31 +#define AFE_CM0_BYPASS_MODE_MON_MASK 0x1 +#define AFE_CM0_BYPASS_MODE_MON_MASK_SFT (0x1 << 31) +#define AFE_CM0_OUTPUT_CNT_MON_SFT 16 +#define AFE_CM0_OUTPUT_CNT_MON_MASK 0x7fff +#define AFE_CM0_OUTPUT_CNT_MON_MASK_SFT (0x7fff << 16) +#define AFE_CM0_CUR_CHSET_MON_SFT 5 +#define AFE_CM0_CUR_CHSET_MON_MASK 0xf +#define AFE_CM0_CUR_CHSET_MON_MASK_SFT (0xf << 5) +#define AFE_CM0_ODD_FLAG_MON_SFT 4 +#define AFE_CM0_ODD_FLAG_MON_MASK 0x1 +#define AFE_CM0_ODD_FLAG_MON_MASK_SFT (0x1 << 4) +#define AFE_CM0_BYTE_SWAP_MON_SFT 1 +#define AFE_CM0_BYTE_SWAP_MON_MASK 0x1 +#define AFE_CM0_BYTE_SWAP_MON_MASK_SFT (0x1 << 1) +#define AFE_CM0_ON_MON_SFT 0 +#define AFE_CM0_ON_MON_MASK 0x1 +#define AFE_CM0_ON_MON_MASK_SFT (0x1 << 0) + +/* AFE_CM0_IP_VERSION */ +#define AFE_CM0_IP_VERSION_SFT 0 +#define AFE_CM0_IP_VERSION_MASK 0xffffffff +#define AFE_CM0_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_CM1_CON0 */ +#define AFE_CM1_BYPASS_MODE_SFT 31 +#define AFE_CM1_BYPASS_MODE_MASK 0x1 +#define AFE_CM1_BYPASS_MODE_MASK_SFT (0x1 << 31) +#define AFE_CM1_UPDATE_CNT_SFT 16 +#define AFE_CM1_UPDATE_CNT_MASK 0x7fff +#define AFE_CM1_UPDATE_CNT_MASK_SFT (0x7fff << 16) +#define AFE_CM1_1X_EN_SEL_DOMAIN_SFT 13 +#define AFE_CM1_1X_EN_SEL_DOMAIN_MASK 0x7 +#define AFE_CM1_1X_EN_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define AFE_CM1_1X_EN_SEL_FS_SFT 8 +#define AFE_CM1_1X_EN_SEL_FS_MASK 0x1f +#define AFE_CM1_1X_EN_SEL_FS_MASK_SFT (0x1f << 8) +#define AFE_CM1_OUTPUT_MUX_SFT 7 +#define AFE_CM1_OUTPUT_MUX_MASK 0x1 +#define AFE_CM1_OUTPUT_MUX_MASK_SFT (0x1 << 7) +#define AFE_CM1_CH_NUM_SFT 2 +#define AFE_CM1_CH_NUM_MASK 0x1f +#define AFE_CM1_CH_NUM_MASK_SFT (0x1f << 2) +#define AFE_CM1_BYTE_SWAP_SFT 1 +#define AFE_CM1_BYTE_SWAP_MASK 0x1 +#define AFE_CM1_BYTE_SWAP_MASK_SFT (0x1 << 1) +#define AFE_CM1_ON_SFT 0 +#define AFE_CM1_ON_MASK 0x1 +#define AFE_CM1_ON_MASK_SFT (0x1 << 0) + +/* AFE_CM1_MON */ +#define AFE_CM1_BYPASS_MODE_MON_SFT 31 +#define AFE_CM1_BYPASS_MODE_MON_MASK 0x1 +#define AFE_CM1_BYPASS_MODE_MON_MASK_SFT (0x1 << 31) +#define AFE_CM1_OUTPUT_CNT_MON_SFT 16 +#define AFE_CM1_OUTPUT_CNT_MON_MASK 0x7fff +#define AFE_CM1_OUTPUT_CNT_MON_MASK_SFT (0x7fff << 16) +#define AFE_CM1_CUR_CHSET_MON_SFT 5 +#define AFE_CM1_CUR_CHSET_MON_MASK 0xf +#define AFE_CM1_CUR_CHSET_MON_MASK_SFT (0xf << 5) +#define AFE_CM1_ODD_FLAG_MON_SFT 4 +#define AFE_CM1_ODD_FLAG_MON_MASK 0x1 +#define AFE_CM1_ODD_FLAG_MON_MASK_SFT (0x1 << 4) +#define AFE_CM1_BYTE_SWAP_MON_SFT 1 +#define AFE_CM1_BYTE_SWAP_MON_MASK 0x1 +#define AFE_CM1_BYTE_SWAP_MON_MASK_SFT (0x1 << 1) +#define AFE_CM1_ON_MON_SFT 0 +#define AFE_CM1_ON_MON_MASK 0x1 +#define AFE_CM1_ON_MON_MASK_SFT (0x1 << 0) + +/* AFE_CM1_IP_VERSION */ +#define AFE_CM1_IP_VERSION_SFT 0 +#define AFE_CM1_IP_VERSION_MASK 0xffffffff +#define AFE_CM1_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_CM2_CON0 */ +#define AFE_CM2_BYPASS_MODE_SFT 31 +#define AFE_CM2_BYPASS_MODE_MASK 0x1 +#define AFE_CM2_BYPASS_MODE_MASK_SFT (0x1 << 31) +#define AFE_CM2_UPDATE_CNT_SFT 16 +#define AFE_CM2_UPDATE_CNT_MASK 0x7fff +#define AFE_CM2_UPDATE_CNT_MASK_SFT (0x7fff << 16) +#define AFE_CM2_1X_EN_SEL_DOMAIN_SFT 13 +#define AFE_CM2_1X_EN_SEL_DOMAIN_MASK 0x7 +#define AFE_CM2_1X_EN_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define AFE_CM2_1X_EN_SEL_FS_SFT 8 +#define AFE_CM2_1X_EN_SEL_FS_MASK 0x1f +#define AFE_CM2_1X_EN_SEL_FS_MASK_SFT (0x1f << 8) +#define AFE_CM2_OUTPUT_MUX_SFT 7 +#define AFE_CM2_OUTPUT_MUX_MASK 0x1 +#define AFE_CM2_OUTPUT_MUX_MASK_SFT (0x1 << 7) +#define AFE_CM2_CH_NUM_SFT 2 +#define AFE_CM2_CH_NUM_MASK 0x1f +#define AFE_CM2_CH_NUM_MASK_SFT (0x1f << 2) +#define AFE_CM2_BYTE_SWAP_SFT 1 +#define AFE_CM2_BYTE_SWAP_MASK 0x1 +#define AFE_CM2_BYTE_SWAP_MASK_SFT (0x1 << 1) +#define AFE_CM2_ON_SFT 0 +#define AFE_CM2_ON_MASK 0x1 +#define AFE_CM2_ON_MASK_SFT (0x1 << 0) + +/* AFE_CM2_MON */ +#define AFE_CM2_BYPASS_MODE_MON_SFT 31 +#define AFE_CM2_BYPASS_MODE_MON_MASK 0x1 +#define AFE_CM2_BYPASS_MODE_MON_MASK_SFT (0x1 << 31) +#define AFE_CM2_OUTPUT_CNT_MON_SFT 16 +#define AFE_CM2_OUTPUT_CNT_MON_MASK 0x7fff +#define AFE_CM2_OUTPUT_CNT_MON_MASK_SFT (0x7fff << 16) +#define AFE_CM2_CUR_CHSET_MON_SFT 5 +#define AFE_CM2_CUR_CHSET_MON_MASK 0xf +#define AFE_CM2_CUR_CHSET_MON_MASK_SFT (0xf << 5) +#define AFE_CM2_ODD_FLAG_MON_SFT 4 +#define AFE_CM2_ODD_FLAG_MON_MASK 0x1 +#define AFE_CM2_ODD_FLAG_MON_MASK_SFT (0x1 << 4) +#define AFE_CM2_BYTE_SWAP_MON_SFT 1 +#define AFE_CM2_BYTE_SWAP_MON_MASK 0x1 +#define AFE_CM2_BYTE_SWAP_MON_MASK_SFT (0x1 << 1) +#define AFE_CM2_ON_MON_SFT 0 +#define AFE_CM2_ON_MON_MASK 0x1 +#define AFE_CM2_ON_MON_MASK_SFT (0x1 << 0) + +/* AFE_CM2_IP_VERSION */ +#define AFE_CM2_IP_VERSION_SFT 0 +#define AFE_CM2_IP_VERSION_MASK 0xffffffff +#define AFE_CM2_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_SRC_CON0 */ +#define ULCF_CFG_EN_CTL_SFT 31 +#define ULCF_CFG_EN_CTL_MASK 0x1 +#define ULCF_CFG_EN_CTL_MASK_SFT (0x1 << 31) +#define UL_DMIC_PHASE_SEL_CH1_SFT 27 +#define UL_DMIC_PHASE_SEL_CH1_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH1_MASK_SFT (0x7 << 27) +#define UL_DMIC_PHASE_SEL_CH2_SFT 24 +#define UL_DMIC_PHASE_SEL_CH2_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH2_MASK_SFT (0x7 << 24) +#define UL_DMIC_TWO_WIRE_CTL_SFT 23 +#define UL_DMIC_TWO_WIRE_CTL_MASK 0x1 +#define UL_DMIC_TWO_WIRE_CTL_MASK_SFT (0x1 << 23) +#define UL_MODE_3P25M_CH2_CTL_SFT 22 +#define UL_MODE_3P25M_CH2_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH2_CTL_MASK_SFT (0x1 << 22) +#define UL_MODE_3P25M_CH1_CTL_SFT 21 +#define UL_MODE_3P25M_CH1_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH1_CTL_MASK_SFT (0x1 << 21) +#define UL_VOICE_MODE_CH1_CH2_CTL_SFT 17 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK 0x7 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT (0x7 << 17) +#define UL_AP_DMIC_ON_SFT 16 +#define UL_AP_DMIC_ON_MASK 0x1 +#define UL_AP_DMIC_ON_MASK_SFT (0x1 << 16) +#define DMIC_LOW_POWER_MODE_CTL_SFT 14 +#define DMIC_LOW_POWER_MODE_CTL_MASK 0x3 +#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT (0x3 << 14) +#define UL_DISABLE_HW_CG_CTL_SFT 12 +#define UL_DISABLE_HW_CG_CTL_MASK 0x1 +#define UL_DISABLE_HW_CG_CTL_MASK_SFT (0x1 << 12) +#define AMIC_26M_SEL_CTL_SFT 11 +#define AMIC_26M_SEL_CTL_MASK 0x1 +#define AMIC_26M_SEL_CTL_MASK_SFT (0x1 << 11) +#define UL_IIR_ON_TMP_CTL_SFT 10 +#define UL_IIR_ON_TMP_CTL_MASK 0x1 +#define UL_IIR_ON_TMP_CTL_MASK_SFT (0x1 << 10) +#define UL_IIRMODE_CTL_SFT 7 +#define UL_IIRMODE_CTL_MASK 0x7 +#define UL_IIRMODE_CTL_MASK_SFT (0x7 << 7) +#define DIGMIC_4P33M_SEL_SFT 6 +#define DIGMIC_4P33M_SEL_MASK 0x1 +#define DIGMIC_4P33M_SEL_MASK_SFT (0x1 << 6) +#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT 5 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK 0x1 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT (0x1 << 5) +#define AMIC_6P5M_SEL_CTL_SFT 4 +#define AMIC_6P5M_SEL_CTL_MASK 0x1 +#define AMIC_6P5M_SEL_CTL_MASK_SFT (0x1 << 4) +#define AMIC_1P625M_SEL_CTL_SFT 3 +#define AMIC_1P625M_SEL_CTL_MASK 0x1 +#define AMIC_1P625M_SEL_CTL_MASK_SFT (0x1 << 3) +#define UL_LOOP_BACK_MODE_CTL_SFT 2 +#define UL_LOOP_BACK_MODE_CTL_MASK 0x1 +#define UL_LOOP_BACK_MODE_CTL_MASK_SFT (0x1 << 2) +#define UL_SDM_3_LEVEL_CTL_SFT 1 +#define UL_SDM_3_LEVEL_CTL_MASK 0x1 +#define UL_SDM_3_LEVEL_CTL_MASK_SFT (0x1 << 1) +#define UL_SRC_ON_TMP_CTL_SFT 0 +#define UL_SRC_ON_TMP_CTL_MASK 0x1 +#define UL_SRC_ON_TMP_CTL_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_UL0_SRC_CON1 */ +#define ADDA_UL_GAIN_VALUE_SFT 16 +#define ADDA_UL_GAIN_VALUE_MASK 0xffff +#define ADDA_UL_GAIN_VALUE_MASK_SFT (0xffff << 16) +#define ADDA_UL_POSTIVEGAIN_SFT 12 +#define ADDA_UL_POSTIVEGAIN_MASK 0x7 +#define ADDA_UL_POSTIVEGAIN_MASK_SFT (0x7 << 12) +#define ADDA_UL_ODDTAP_MODE_SFT 11 +#define ADDA_UL_ODDTAP_MODE_MASK 0x1 +#define ADDA_UL_ODDTAP_MODE_MASK_SFT (0x1 << 11) +#define ADDA_UL_HALF_TAP_NUM_SFT 5 +#define ADDA_UL_HALF_TAP_NUM_MASK 0x3f +#define ADDA_UL_HALF_TAP_NUM_MASK_SFT (0x3f << 5) +#define FIFO_SOFT_RST_SFT 4 +#define FIFO_SOFT_RST_MASK 0x1 +#define FIFO_SOFT_RST_MASK_SFT (0x1 << 4) +#define FIFO_SOFT_RST_EN_SFT 3 +#define FIFO_SOFT_RST_EN_MASK 0x1 +#define FIFO_SOFT_RST_EN_MASK_SFT (0x1 << 3) +#define LR_SWAP_SFT 2 +#define LR_SWAP_MASK 0x1 +#define LR_SWAP_MASK_SFT (0x1 << 2) +#define GAIN_MODE_SFT 0 +#define GAIN_MODE_MASK 0x3 +#define GAIN_MODE_MASK_SFT (0x3 << 0) + +/* AFE_ADDA_UL0_SRC_CON2 */ +#define C_DAC_EN_CTL_SFT 27 +#define C_DAC_EN_CTL_MASK 0x1 +#define C_DAC_EN_CTL_MASK_SFT (0x1 << 27) +#define C_MUTE_SW_CTL_SFT 26 +#define C_MUTE_SW_CTL_MASK 0x1 +#define C_MUTE_SW_CTL_MASK_SFT (0x1 << 26) +#define C_AMP_DIV_CH2_CTL_SFT 21 +#define C_AMP_DIV_CH2_CTL_MASK 0x7 +#define C_AMP_DIV_CH2_CTL_MASK_SFT (0x7 << 21) +#define C_FREQ_DIV_CH2_CTL_SFT 16 +#define C_FREQ_DIV_CH2_CTL_MASK 0x1f +#define C_FREQ_DIV_CH2_CTL_MASK_SFT (0x1f << 16) +#define C_SINE_MODE_CH2_CTL_SFT 12 +#define C_SINE_MODE_CH2_CTL_MASK 0xf +#define C_SINE_MODE_CH2_CTL_MASK_SFT (0xf << 12) +#define C_AMP_DIV_CH1_CTL_SFT 9 +#define C_AMP_DIV_CH1_CTL_MASK 0x7 +#define C_AMP_DIV_CH1_CTL_MASK_SFT (0x7 << 9) +#define C_FREQ_DIV_CH1_CTL_SFT 4 +#define C_FREQ_DIV_CH1_CTL_MASK 0x1f +#define C_FREQ_DIV_CH1_CTL_MASK_SFT (0x1f << 4) +#define C_SINE_MODE_CH1_CTL_SFT 0 +#define C_SINE_MODE_CH1_CTL_MASK 0xf +#define C_SINE_MODE_CH1_CTL_MASK_SFT (0xf << 0) + +/* AFE_ADDA_UL0_SRC_DEBUG */ +#define UL_SLT_CNT_FLAG_RESET_CTL_SFT 16 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK 0x1 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK_SFT (0x1 << 16) +#define FIFO_DIGMIC_TESTIN_SFT 12 +#define FIFO_DIGMIC_TESTIN_MASK 0x3 +#define FIFO_DIGMIC_TESTIN_MASK_SFT (0x3 << 12) +#define FIFO_DIGMIC_WDATA_TESTEN_SFT 11 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK 0x1 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT (0x1 << 11) +#define SLT_CNT_THD_CTL_SFT 0 +#define SLT_CNT_THD_CTL_MASK 0x7ff +#define SLT_CNT_THD_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL0_SRC_DEBUG_MON0 */ +#define SLT_CNT_FLAG_CTL_SFT 16 +#define SLT_CNT_FLAG_CTL_MASK 0x1 +#define SLT_CNT_FLAG_CTL_MASK_SFT (0x1 << 16) +#define SLT_COUNTER_CTL_SFT 0 +#define SLT_COUNTER_CTL_MASK 0x7ff +#define SLT_COUNTER_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL0_IIR_COEF_02_01 */ +#define ADDA_IIR_COEF_02_01_SFT 0 +#define ADDA_IIR_COEF_02_01_MASK 0xffffffff +#define ADDA_IIR_COEF_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_IIR_COEF_04_03 */ +#define ADDA_IIR_COEF_04_03_SFT 0 +#define ADDA_IIR_COEF_04_03_MASK 0xffffffff +#define ADDA_IIR_COEF_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_IIR_COEF_06_05 */ +#define ADDA_IIR_COEF_06_05_SFT 0 +#define ADDA_IIR_COEF_06_05_MASK 0xffffffff +#define ADDA_IIR_COEF_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_IIR_COEF_08_07 */ +#define ADDA_IIR_COEF_08_07_SFT 0 +#define ADDA_IIR_COEF_08_07_MASK 0xffffffff +#define ADDA_IIR_COEF_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_IIR_COEF_10_09 */ +#define ADDA_IIR_COEF_10_09_SFT 0 +#define ADDA_IIR_COEF_10_09_MASK 0xffffffff +#define ADDA_IIR_COEF_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_02_01 */ +#define ADDA_ULCF_CFG_02_01_SFT 0 +#define ADDA_ULCF_CFG_02_01_MASK 0xffffffff +#define ADDA_ULCF_CFG_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_04_03 */ +#define ADDA_ULCF_CFG_04_03_SFT 0 +#define ADDA_ULCF_CFG_04_03_MASK 0xffffffff +#define ADDA_ULCF_CFG_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_06_05 */ +#define ADDA_ULCF_CFG_06_05_SFT 0 +#define ADDA_ULCF_CFG_06_05_MASK 0xffffffff +#define ADDA_ULCF_CFG_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_08_07 */ +#define ADDA_ULCF_CFG_08_07_SFT 0 +#define ADDA_ULCF_CFG_08_07_MASK 0xffffffff +#define ADDA_ULCF_CFG_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_10_09 */ +#define ADDA_ULCF_CFG_10_09_SFT 0 +#define ADDA_ULCF_CFG_10_09_MASK 0xffffffff +#define ADDA_ULCF_CFG_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_12_11 */ +#define ADDA_ULCF_CFG_12_11_SFT 0 +#define ADDA_ULCF_CFG_12_11_MASK 0xffffffff +#define ADDA_ULCF_CFG_12_11_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_14_13 */ +#define ADDA_ULCF_CFG_14_13_SFT 0 +#define ADDA_ULCF_CFG_14_13_MASK 0xffffffff +#define ADDA_ULCF_CFG_14_13_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_16_15 */ +#define ADDA_ULCF_CFG_16_15_SFT 0 +#define ADDA_ULCF_CFG_16_15_MASK 0xffffffff +#define ADDA_ULCF_CFG_16_15_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_18_17 */ +#define ADDA_ULCF_CFG_18_17_SFT 0 +#define ADDA_ULCF_CFG_18_17_MASK 0xffffffff +#define ADDA_ULCF_CFG_18_17_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_20_19 */ +#define ADDA_ULCF_CFG_20_19_SFT 0 +#define ADDA_ULCF_CFG_20_19_MASK 0xffffffff +#define ADDA_ULCF_CFG_20_19_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_22_21 */ +#define ADDA_ULCF_CFG_22_21_SFT 0 +#define ADDA_ULCF_CFG_22_21_MASK 0xffffffff +#define ADDA_ULCF_CFG_22_21_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_24_23 */ +#define ADDA_ULCF_CFG_24_23_SFT 0 +#define ADDA_ULCF_CFG_24_23_MASK 0xffffffff +#define ADDA_ULCF_CFG_24_23_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_26_25 */ +#define ADDA_ULCF_CFG_26_25_SFT 0 +#define ADDA_ULCF_CFG_26_25_MASK 0xffffffff +#define ADDA_ULCF_CFG_26_25_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_28_27 */ +#define ADDA_ULCF_CFG_28_27_SFT 0 +#define ADDA_ULCF_CFG_28_27_MASK 0xffffffff +#define ADDA_ULCF_CFG_28_27_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_30_29 */ +#define ADDA_ULCF_CFG_30_29_SFT 0 +#define ADDA_ULCF_CFG_30_29_MASK 0xffffffff +#define ADDA_ULCF_CFG_30_29_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_ULCF_CFG_32_31 */ +#define ADDA_ULCF_CFG_32_31_SFT 0 +#define ADDA_ULCF_CFG_32_31_MASK 0xffffffff +#define ADDA_ULCF_CFG_32_31_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_IP_VERSION */ +#define ADDA_ULCF_IP_VERSION_SFT 0 +#define ADDA_ULCF_IP_VERSION_MASK 0xffffffff +#define ADDA_ULCF_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_SRC_CON0 */ +#define ULCF_CFG_EN_CTL_SFT 31 +#define ULCF_CFG_EN_CTL_MASK 0x1 +#define ULCF_CFG_EN_CTL_MASK_SFT (0x1 << 31) +#define UL_DMIC_PHASE_SEL_CH1_SFT 27 +#define UL_DMIC_PHASE_SEL_CH1_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH1_MASK_SFT (0x7 << 27) +#define UL_DMIC_PHASE_SEL_CH2_SFT 24 +#define UL_DMIC_PHASE_SEL_CH2_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH2_MASK_SFT (0x7 << 24) +#define UL_DMIC_TWO_WIRE_CTL_SFT 23 +#define UL_DMIC_TWO_WIRE_CTL_MASK 0x1 +#define UL_DMIC_TWO_WIRE_CTL_MASK_SFT (0x1 << 23) +#define UL_MODE_3P25M_CH2_CTL_SFT 22 +#define UL_MODE_3P25M_CH2_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH2_CTL_MASK_SFT (0x1 << 22) +#define UL_MODE_3P25M_CH1_CTL_SFT 21 +#define UL_MODE_3P25M_CH1_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH1_CTL_MASK_SFT (0x1 << 21) +#define UL_VOICE_MODE_CH1_CH2_CTL_SFT 17 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK 0x7 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT (0x7 << 17) +#define UL_AP_DMIC_ON_SFT 16 +#define UL_AP_DMIC_ON_MASK 0x1 +#define UL_AP_DMIC_ON_MASK_SFT (0x1 << 16) +#define DMIC_LOW_POWER_MODE_CTL_SFT 14 +#define DMIC_LOW_POWER_MODE_CTL_MASK 0x3 +#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT (0x3 << 14) +#define UL_DISABLE_HW_CG_CTL_SFT 12 +#define UL_DISABLE_HW_CG_CTL_MASK 0x1 +#define UL_DISABLE_HW_CG_CTL_MASK_SFT (0x1 << 12) +#define AMIC_26M_SEL_CTL_SFT 11 +#define AMIC_26M_SEL_CTL_MASK 0x1 +#define AMIC_26M_SEL_CTL_MASK_SFT (0x1 << 11) +#define UL_IIR_ON_TMP_CTL_SFT 10 +#define UL_IIR_ON_TMP_CTL_MASK 0x1 +#define UL_IIR_ON_TMP_CTL_MASK_SFT (0x1 << 10) +#define UL_IIRMODE_CTL_SFT 7 +#define UL_IIRMODE_CTL_MASK 0x7 +#define UL_IIRMODE_CTL_MASK_SFT (0x7 << 7) +#define DIGMIC_4P33M_SEL_SFT 6 +#define DIGMIC_4P33M_SEL_MASK 0x1 +#define DIGMIC_4P33M_SEL_MASK_SFT (0x1 << 6) +#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT 5 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK 0x1 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT (0x1 << 5) +#define AMIC_6P5M_SEL_CTL_SFT 4 +#define AMIC_6P5M_SEL_CTL_MASK 0x1 +#define AMIC_6P5M_SEL_CTL_MASK_SFT (0x1 << 4) +#define AMIC_1P625M_SEL_CTL_SFT 3 +#define AMIC_1P625M_SEL_CTL_MASK 0x1 +#define AMIC_1P625M_SEL_CTL_MASK_SFT (0x1 << 3) +#define UL_LOOP_BACK_MODE_CTL_SFT 2 +#define UL_LOOP_BACK_MODE_CTL_MASK 0x1 +#define UL_LOOP_BACK_MODE_CTL_MASK_SFT (0x1 << 2) +#define UL_SDM_3_LEVEL_CTL_SFT 1 +#define UL_SDM_3_LEVEL_CTL_MASK 0x1 +#define UL_SDM_3_LEVEL_CTL_MASK_SFT (0x1 << 1) +#define UL_SRC_ON_TMP_CTL_SFT 0 +#define UL_SRC_ON_TMP_CTL_MASK 0x1 +#define UL_SRC_ON_TMP_CTL_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_UL1_SRC_CON1 */ +#define ADDA_UL_GAIN_VALUE_SFT 16 +#define ADDA_UL_GAIN_VALUE_MASK 0xffff +#define ADDA_UL_GAIN_VALUE_MASK_SFT (0xffff << 16) +#define ADDA_UL_POSTIVEGAIN_SFT 12 +#define ADDA_UL_POSTIVEGAIN_MASK 0x7 +#define ADDA_UL_POSTIVEGAIN_MASK_SFT (0x7 << 12) +#define ADDA_UL_ODDTAP_MODE_SFT 11 +#define ADDA_UL_ODDTAP_MODE_MASK 0x1 +#define ADDA_UL_ODDTAP_MODE_MASK_SFT (0x1 << 11) +#define ADDA_UL_HALF_TAP_NUM_SFT 5 +#define ADDA_UL_HALF_TAP_NUM_MASK 0x3f +#define ADDA_UL_HALF_TAP_NUM_MASK_SFT (0x3f << 5) +#define FIFO_SOFT_RST_SFT 4 +#define FIFO_SOFT_RST_MASK 0x1 +#define FIFO_SOFT_RST_MASK_SFT (0x1 << 4) +#define FIFO_SOFT_RST_EN_SFT 3 +#define FIFO_SOFT_RST_EN_MASK 0x1 +#define FIFO_SOFT_RST_EN_MASK_SFT (0x1 << 3) +#define LR_SWAP_SFT 2 +#define LR_SWAP_MASK 0x1 +#define LR_SWAP_MASK_SFT (0x1 << 2) +#define GAIN_MODE_SFT 0 +#define GAIN_MODE_MASK 0x3 +#define GAIN_MODE_MASK_SFT (0x3 << 0) + +/* AFE_ADDA_UL1_SRC_CON2 */ +#define C_DAC_EN_CTL_SFT 27 +#define C_DAC_EN_CTL_MASK 0x1 +#define C_DAC_EN_CTL_MASK_SFT (0x1 << 27) +#define C_MUTE_SW_CTL_SFT 26 +#define C_MUTE_SW_CTL_MASK 0x1 +#define C_MUTE_SW_CTL_MASK_SFT (0x1 << 26) +#define C_AMP_DIV_CH2_CTL_SFT 21 +#define C_AMP_DIV_CH2_CTL_MASK 0x7 +#define C_AMP_DIV_CH2_CTL_MASK_SFT (0x7 << 21) +#define C_FREQ_DIV_CH2_CTL_SFT 16 +#define C_FREQ_DIV_CH2_CTL_MASK 0x1f +#define C_FREQ_DIV_CH2_CTL_MASK_SFT (0x1f << 16) +#define C_SINE_MODE_CH2_CTL_SFT 12 +#define C_SINE_MODE_CH2_CTL_MASK 0xf +#define C_SINE_MODE_CH2_CTL_MASK_SFT (0xf << 12) +#define C_AMP_DIV_CH1_CTL_SFT 9 +#define C_AMP_DIV_CH1_CTL_MASK 0x7 +#define C_AMP_DIV_CH1_CTL_MASK_SFT (0x7 << 9) +#define C_FREQ_DIV_CH1_CTL_SFT 4 +#define C_FREQ_DIV_CH1_CTL_MASK 0x1f +#define C_FREQ_DIV_CH1_CTL_MASK_SFT (0x1f << 4) +#define C_SINE_MODE_CH1_CTL_SFT 0 +#define C_SINE_MODE_CH1_CTL_MASK 0xf +#define C_SINE_MODE_CH1_CTL_MASK_SFT (0xf << 0) + +/* AFE_ADDA_UL1_SRC_DEBUG */ +#define UL_SLT_CNT_FLAG_RESET_CTL_SFT 16 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK 0x1 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK_SFT (0x1 << 16) +#define FIFO_DIGMIC_TESTIN_SFT 12 +#define FIFO_DIGMIC_TESTIN_MASK 0x3 +#define FIFO_DIGMIC_TESTIN_MASK_SFT (0x3 << 12) +#define FIFO_DIGMIC_WDATA_TESTEN_SFT 11 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK 0x1 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT (0x1 << 11) +#define SLT_CNT_THD_CTL_SFT 0 +#define SLT_CNT_THD_CTL_MASK 0x7ff +#define SLT_CNT_THD_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL1_SRC_DEBUG_MON0 */ +#define SLT_CNT_FLAG_CTL_SFT 16 +#define SLT_CNT_FLAG_CTL_MASK 0x1 +#define SLT_CNT_FLAG_CTL_MASK_SFT (0x1 << 16) +#define SLT_COUNTER_CTL_SFT 0 +#define SLT_COUNTER_CTL_MASK 0x7ff +#define SLT_COUNTER_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL1_IIR_COEF_02_01 */ +#define ADDA_IIR_COEF_02_01_SFT 0 +#define ADDA_IIR_COEF_02_01_MASK 0xffffffff +#define ADDA_IIR_COEF_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_IIR_COEF_04_03 */ +#define ADDA_IIR_COEF_04_03_SFT 0 +#define ADDA_IIR_COEF_04_03_MASK 0xffffffff +#define ADDA_IIR_COEF_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_IIR_COEF_06_05 */ +#define ADDA_IIR_COEF_06_05_SFT 0 +#define ADDA_IIR_COEF_06_05_MASK 0xffffffff +#define ADDA_IIR_COEF_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_IIR_COEF_08_07 */ +#define ADDA_IIR_COEF_08_07_SFT 0 +#define ADDA_IIR_COEF_08_07_MASK 0xffffffff +#define ADDA_IIR_COEF_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_IIR_COEF_10_09 */ +#define ADDA_IIR_COEF_10_09_SFT 0 +#define ADDA_IIR_COEF_10_09_MASK 0xffffffff +#define ADDA_IIR_COEF_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_02_01 */ +#define ADDA_ULCF_CFG_02_01_SFT 0 +#define ADDA_ULCF_CFG_02_01_MASK 0xffffffff +#define ADDA_ULCF_CFG_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_04_03 */ +#define ADDA_ULCF_CFG_04_03_SFT 0 +#define ADDA_ULCF_CFG_04_03_MASK 0xffffffff +#define ADDA_ULCF_CFG_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_06_05 */ +#define ADDA_ULCF_CFG_06_05_SFT 0 +#define ADDA_ULCF_CFG_06_05_MASK 0xffffffff +#define ADDA_ULCF_CFG_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_08_07 */ +#define ADDA_ULCF_CFG_08_07_SFT 0 +#define ADDA_ULCF_CFG_08_07_MASK 0xffffffff +#define ADDA_ULCF_CFG_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_10_09 */ +#define ADDA_ULCF_CFG_10_09_SFT 0 +#define ADDA_ULCF_CFG_10_09_MASK 0xffffffff +#define ADDA_ULCF_CFG_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_12_11 */ +#define ADDA_ULCF_CFG_12_11_SFT 0 +#define ADDA_ULCF_CFG_12_11_MASK 0xffffffff +#define ADDA_ULCF_CFG_12_11_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_14_13 */ +#define ADDA_ULCF_CFG_14_13_SFT 0 +#define ADDA_ULCF_CFG_14_13_MASK 0xffffffff +#define ADDA_ULCF_CFG_14_13_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_16_15 */ +#define ADDA_ULCF_CFG_16_15_SFT 0 +#define ADDA_ULCF_CFG_16_15_MASK 0xffffffff +#define ADDA_ULCF_CFG_16_15_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_18_17 */ +#define ADDA_ULCF_CFG_18_17_SFT 0 +#define ADDA_ULCF_CFG_18_17_MASK 0xffffffff +#define ADDA_ULCF_CFG_18_17_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_20_19 */ +#define ADDA_ULCF_CFG_20_19_SFT 0 +#define ADDA_ULCF_CFG_20_19_MASK 0xffffffff +#define ADDA_ULCF_CFG_20_19_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_22_21 */ +#define ADDA_ULCF_CFG_22_21_SFT 0 +#define ADDA_ULCF_CFG_22_21_MASK 0xffffffff +#define ADDA_ULCF_CFG_22_21_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_24_23 */ +#define ADDA_ULCF_CFG_24_23_SFT 0 +#define ADDA_ULCF_CFG_24_23_MASK 0xffffffff +#define ADDA_ULCF_CFG_24_23_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_26_25 */ +#define ADDA_ULCF_CFG_26_25_SFT 0 +#define ADDA_ULCF_CFG_26_25_MASK 0xffffffff +#define ADDA_ULCF_CFG_26_25_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_28_27 */ +#define ADDA_ULCF_CFG_28_27_SFT 0 +#define ADDA_ULCF_CFG_28_27_MASK 0xffffffff +#define ADDA_ULCF_CFG_28_27_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_30_29 */ +#define ADDA_ULCF_CFG_30_29_SFT 0 +#define ADDA_ULCF_CFG_30_29_MASK 0xffffffff +#define ADDA_ULCF_CFG_30_29_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_ULCF_CFG_32_31 */ +#define ADDA_ULCF_CFG_32_31_SFT 0 +#define ADDA_ULCF_CFG_32_31_MASK 0xffffffff +#define ADDA_ULCF_CFG_32_31_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_IP_VERSION */ +#define ADDA_ULCF_IP_VERSION_SFT 0 +#define ADDA_ULCF_IP_VERSION_MASK 0xffffffff +#define ADDA_ULCF_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_SRC_CON0 */ +#define ULCF_CFG_EN_CTL_SFT 31 +#define ULCF_CFG_EN_CTL_MASK 0x1 +#define ULCF_CFG_EN_CTL_MASK_SFT (0x1 << 31) +#define UL_DMIC_PHASE_SEL_CH1_SFT 27 +#define UL_DMIC_PHASE_SEL_CH1_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH1_MASK_SFT (0x7 << 27) +#define UL_DMIC_PHASE_SEL_CH2_SFT 24 +#define UL_DMIC_PHASE_SEL_CH2_MASK 0x7 +#define UL_DMIC_PHASE_SEL_CH2_MASK_SFT (0x7 << 24) +#define UL_DMIC_TWO_WIRE_CTL_SFT 23 +#define UL_DMIC_TWO_WIRE_CTL_MASK 0x1 +#define UL_DMIC_TWO_WIRE_CTL_MASK_SFT (0x1 << 23) +#define UL_MODE_3P25M_CH2_CTL_SFT 22 +#define UL_MODE_3P25M_CH2_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH2_CTL_MASK_SFT (0x1 << 22) +#define UL_MODE_3P25M_CH1_CTL_SFT 21 +#define UL_MODE_3P25M_CH1_CTL_MASK 0x1 +#define UL_MODE_3P25M_CH1_CTL_MASK_SFT (0x1 << 21) +#define UL_VOICE_MODE_CH1_CH2_CTL_SFT 17 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK 0x7 +#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT (0x7 << 17) +#define UL_AP_DMIC_ON_SFT 16 +#define UL_AP_DMIC_ON_MASK 0x1 +#define UL_AP_DMIC_ON_MASK_SFT (0x1 << 16) +#define DMIC_LOW_POWER_MODE_CTL_SFT 14 +#define DMIC_LOW_POWER_MODE_CTL_MASK 0x3 +#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT (0x3 << 14) +#define UL_DISABLE_HW_CG_CTL_SFT 12 +#define UL_DISABLE_HW_CG_CTL_MASK 0x1 +#define UL_DISABLE_HW_CG_CTL_MASK_SFT (0x1 << 12) +#define AMIC_26M_SEL_CTL_SFT 11 +#define AMIC_26M_SEL_CTL_MASK 0x1 +#define AMIC_26M_SEL_CTL_MASK_SFT (0x1 << 11) +#define UL_IIR_ON_TMP_CTL_SFT 10 +#define UL_IIR_ON_TMP_CTL_MASK 0x1 +#define UL_IIR_ON_TMP_CTL_MASK_SFT (0x1 << 10) +#define UL_IIRMODE_CTL_SFT 7 +#define UL_IIRMODE_CTL_MASK 0x7 +#define UL_IIRMODE_CTL_MASK_SFT (0x7 << 7) +#define DIGMIC_4P33M_SEL_SFT 6 +#define DIGMIC_4P33M_SEL_MASK 0x1 +#define DIGMIC_4P33M_SEL_MASK_SFT (0x1 << 6) +#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT 5 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK 0x1 +#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT (0x1 << 5) +#define AMIC_6P5M_SEL_CTL_SFT 4 +#define AMIC_6P5M_SEL_CTL_MASK 0x1 +#define AMIC_6P5M_SEL_CTL_MASK_SFT (0x1 << 4) +#define AMIC_1P625M_SEL_CTL_SFT 3 +#define AMIC_1P625M_SEL_CTL_MASK 0x1 +#define AMIC_1P625M_SEL_CTL_MASK_SFT (0x1 << 3) +#define UL_LOOP_BACK_MODE_CTL_SFT 2 +#define UL_LOOP_BACK_MODE_CTL_MASK 0x1 +#define UL_LOOP_BACK_MODE_CTL_MASK_SFT (0x1 << 2) +#define UL_SDM_3_LEVEL_CTL_SFT 1 +#define UL_SDM_3_LEVEL_CTL_MASK 0x1 +#define UL_SDM_3_LEVEL_CTL_MASK_SFT (0x1 << 1) +#define UL_SRC_ON_TMP_CTL_SFT 0 +#define UL_SRC_ON_TMP_CTL_MASK 0x1 +#define UL_SRC_ON_TMP_CTL_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_UL2_SRC_CON1 */ +#define ADDA_UL_GAIN_VALUE_SFT 16 +#define ADDA_UL_GAIN_VALUE_MASK 0xffff +#define ADDA_UL_GAIN_VALUE_MASK_SFT (0xffff << 16) +#define ADDA_UL_POSTIVEGAIN_SFT 12 +#define ADDA_UL_POSTIVEGAIN_MASK 0x7 +#define ADDA_UL_POSTIVEGAIN_MASK_SFT (0x7 << 12) +#define ADDA_UL_ODDTAP_MODE_SFT 11 +#define ADDA_UL_ODDTAP_MODE_MASK 0x1 +#define ADDA_UL_ODDTAP_MODE_MASK_SFT (0x1 << 11) +#define ADDA_UL_HALF_TAP_NUM_SFT 5 +#define ADDA_UL_HALF_TAP_NUM_MASK 0x3f +#define ADDA_UL_HALF_TAP_NUM_MASK_SFT (0x3f << 5) +#define FIFO_SOFT_RST_SFT 4 +#define FIFO_SOFT_RST_MASK 0x1 +#define FIFO_SOFT_RST_MASK_SFT (0x1 << 4) +#define FIFO_SOFT_RST_EN_SFT 3 +#define FIFO_SOFT_RST_EN_MASK 0x1 +#define FIFO_SOFT_RST_EN_MASK_SFT (0x1 << 3) +#define LR_SWAP_SFT 2 +#define LR_SWAP_MASK 0x1 +#define LR_SWAP_MASK_SFT (0x1 << 2) +#define GAIN_MODE_SFT 0 +#define GAIN_MODE_MASK 0x3 +#define GAIN_MODE_MASK_SFT (0x3 << 0) + +/* AFE_ADDA_UL2_SRC_CON2 */ +#define C_DAC_EN_CTL_SFT 27 +#define C_DAC_EN_CTL_MASK 0x1 +#define C_DAC_EN_CTL_MASK_SFT (0x1 << 27) +#define C_MUTE_SW_CTL_SFT 26 +#define C_MUTE_SW_CTL_MASK 0x1 +#define C_MUTE_SW_CTL_MASK_SFT (0x1 << 26) +#define C_AMP_DIV_CH2_CTL_SFT 21 +#define C_AMP_DIV_CH2_CTL_MASK 0x7 +#define C_AMP_DIV_CH2_CTL_MASK_SFT (0x7 << 21) +#define C_FREQ_DIV_CH2_CTL_SFT 16 +#define C_FREQ_DIV_CH2_CTL_MASK 0x1f +#define C_FREQ_DIV_CH2_CTL_MASK_SFT (0x1f << 16) +#define C_SINE_MODE_CH2_CTL_SFT 12 +#define C_SINE_MODE_CH2_CTL_MASK 0xf +#define C_SINE_MODE_CH2_CTL_MASK_SFT (0xf << 12) +#define C_AMP_DIV_CH1_CTL_SFT 9 +#define C_AMP_DIV_CH1_CTL_MASK 0x7 +#define C_AMP_DIV_CH1_CTL_MASK_SFT (0x7 << 9) +#define C_FREQ_DIV_CH1_CTL_SFT 4 +#define C_FREQ_DIV_CH1_CTL_MASK 0x1f +#define C_FREQ_DIV_CH1_CTL_MASK_SFT (0x1f << 4) +#define C_SINE_MODE_CH1_CTL_SFT 0 +#define C_SINE_MODE_CH1_CTL_MASK 0xf +#define C_SINE_MODE_CH1_CTL_MASK_SFT (0xf << 0) + +/* AFE_ADDA_UL2_SRC_DEBUG */ +#define UL_SLT_CNT_FLAG_RESET_CTL_SFT 16 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK 0x1 +#define UL_SLT_CNT_FLAG_RESET_CTL_MASK_SFT (0x1 << 16) +#define FIFO_DIGMIC_TESTIN_SFT 12 +#define FIFO_DIGMIC_TESTIN_MASK 0x3 +#define FIFO_DIGMIC_TESTIN_MASK_SFT (0x3 << 12) +#define FIFO_DIGMIC_WDATA_TESTEN_SFT 11 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK 0x1 +#define FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT (0x1 << 11) +#define SLT_CNT_THD_CTL_SFT 0 +#define SLT_CNT_THD_CTL_MASK 0x7ff +#define SLT_CNT_THD_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL2_SRC_DEBUG_MON0 */ +#define SLT_CNT_FLAG_CTL_SFT 16 +#define SLT_CNT_FLAG_CTL_MASK 0x1 +#define SLT_CNT_FLAG_CTL_MASK_SFT (0x1 << 16) +#define SLT_COUNTER_CTL_SFT 0 +#define SLT_COUNTER_CTL_MASK 0x7ff +#define SLT_COUNTER_CTL_MASK_SFT (0x7ff << 0) + +/* AFE_ADDA_UL2_IIR_COEF_02_01 */ +#define ADDA_IIR_COEF_02_01_SFT 0 +#define ADDA_IIR_COEF_02_01_MASK 0xffffffff +#define ADDA_IIR_COEF_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_IIR_COEF_04_03 */ +#define ADDA_IIR_COEF_04_03_SFT 0 +#define ADDA_IIR_COEF_04_03_MASK 0xffffffff +#define ADDA_IIR_COEF_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_IIR_COEF_06_05 */ +#define ADDA_IIR_COEF_06_05_SFT 0 +#define ADDA_IIR_COEF_06_05_MASK 0xffffffff +#define ADDA_IIR_COEF_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_IIR_COEF_08_07 */ +#define ADDA_IIR_COEF_08_07_SFT 0 +#define ADDA_IIR_COEF_08_07_MASK 0xffffffff +#define ADDA_IIR_COEF_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_IIR_COEF_10_09 */ +#define ADDA_IIR_COEF_10_09_SFT 0 +#define ADDA_IIR_COEF_10_09_MASK 0xffffffff +#define ADDA_IIR_COEF_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_02_01 */ +#define ADDA_ULCF_CFG_02_01_SFT 0 +#define ADDA_ULCF_CFG_02_01_MASK 0xffffffff +#define ADDA_ULCF_CFG_02_01_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_04_03 */ +#define ADDA_ULCF_CFG_04_03_SFT 0 +#define ADDA_ULCF_CFG_04_03_MASK 0xffffffff +#define ADDA_ULCF_CFG_04_03_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_06_05 */ +#define ADDA_ULCF_CFG_06_05_SFT 0 +#define ADDA_ULCF_CFG_06_05_MASK 0xffffffff +#define ADDA_ULCF_CFG_06_05_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_08_07 */ +#define ADDA_ULCF_CFG_08_07_SFT 0 +#define ADDA_ULCF_CFG_08_07_MASK 0xffffffff +#define ADDA_ULCF_CFG_08_07_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_10_09 */ +#define ADDA_ULCF_CFG_10_09_SFT 0 +#define ADDA_ULCF_CFG_10_09_MASK 0xffffffff +#define ADDA_ULCF_CFG_10_09_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_12_11 */ +#define ADDA_ULCF_CFG_12_11_SFT 0 +#define ADDA_ULCF_CFG_12_11_MASK 0xffffffff +#define ADDA_ULCF_CFG_12_11_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_14_13 */ +#define ADDA_ULCF_CFG_14_13_SFT 0 +#define ADDA_ULCF_CFG_14_13_MASK 0xffffffff +#define ADDA_ULCF_CFG_14_13_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_16_15 */ +#define ADDA_ULCF_CFG_16_15_SFT 0 +#define ADDA_ULCF_CFG_16_15_MASK 0xffffffff +#define ADDA_ULCF_CFG_16_15_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_18_17 */ +#define ADDA_ULCF_CFG_18_17_SFT 0 +#define ADDA_ULCF_CFG_18_17_MASK 0xffffffff +#define ADDA_ULCF_CFG_18_17_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_20_19 */ +#define ADDA_ULCF_CFG_20_19_SFT 0 +#define ADDA_ULCF_CFG_20_19_MASK 0xffffffff +#define ADDA_ULCF_CFG_20_19_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_22_21 */ +#define ADDA_ULCF_CFG_22_21_SFT 0 +#define ADDA_ULCF_CFG_22_21_MASK 0xffffffff +#define ADDA_ULCF_CFG_22_21_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_24_23 */ +#define ADDA_ULCF_CFG_24_23_SFT 0 +#define ADDA_ULCF_CFG_24_23_MASK 0xffffffff +#define ADDA_ULCF_CFG_24_23_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_26_25 */ +#define ADDA_ULCF_CFG_26_25_SFT 0 +#define ADDA_ULCF_CFG_26_25_MASK 0xffffffff +#define ADDA_ULCF_CFG_26_25_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_28_27 */ +#define ADDA_ULCF_CFG_28_27_SFT 0 +#define ADDA_ULCF_CFG_28_27_MASK 0xffffffff +#define ADDA_ULCF_CFG_28_27_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_30_29 */ +#define ADDA_ULCF_CFG_30_29_SFT 0 +#define ADDA_ULCF_CFG_30_29_MASK 0xffffffff +#define ADDA_ULCF_CFG_30_29_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_ULCF_CFG_32_31 */ +#define ADDA_ULCF_CFG_32_31_SFT 0 +#define ADDA_ULCF_CFG_32_31_MASK 0xffffffff +#define ADDA_ULCF_CFG_32_31_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_IP_VERSION */ +#define ADDA_ULCF_IP_VERSION_SFT 0 +#define ADDA_ULCF_IP_VERSION_MASK 0xffffffff +#define ADDA_ULCF_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_PROXIMITY_CON0 */ +#define PROXIMITY_CH1_ON_SFT 12 +#define PROXIMITY_CH1_ON_MASK 0x1 +#define PROXIMITY_CH1_ON_MASK_SFT (0x1 << 12) +#define PROXIMITY_CH1_SEL_SFT 8 +#define PROXIMITY_CH1_SEL_MASK 0xf +#define PROXIMITY_CH1_SEL_MASK_SFT (0xf << 8) +#define PROXIMITY_CH2_ON_SFT 4 +#define PROXIMITY_CH2_ON_MASK 0x1 +#define PROXIMITY_CH2_ON_MASK_SFT (0x1 << 4) +#define PROXIMITY_CH2_SEL_SFT 0 +#define PROXIMITY_CH2_SEL_MASK 0xf +#define PROXIMITY_CH2_SEL_MASK_SFT (0xf << 0) + +/* AFE_ADDA_ULSRC_PHASE_CON0 */ +#define DMIC1_PHASE_FCLK_SEL_SFT 30 +#define DMIC1_PHASE_FCLK_SEL_MASK 0x3 +#define DMIC1_PHASE_FCLK_SEL_MASK_SFT (0x3 << 30) +#define DMIC0_PHASE_FCLK_SEL_SFT 28 +#define DMIC0_PHASE_FCLK_SEL_MASK 0x3 +#define DMIC0_PHASE_FCLK_SEL_MASK_SFT (0x3 << 28) +#define UL3_PHASE_FCLK_SEL_SFT 26 +#define UL3_PHASE_FCLK_SEL_MASK 0x3 +#define UL3_PHASE_FCLK_SEL_MASK_SFT (0x3 << 26) +#define UL2_PHASE_FCLK_SEL_SFT 24 +#define UL2_PHASE_FCLK_SEL_MASK 0x3 +#define UL2_PHASE_FCLK_SEL_MASK_SFT (0x3 << 24) +#define UL1_PHASE_FCLK_SEL_SFT 22 +#define UL1_PHASE_FCLK_SEL_MASK 0x3 +#define UL1_PHASE_FCLK_SEL_MASK_SFT (0x3 << 22) +#define UL0_PHASE_FCLK_SEL_SFT 20 +#define UL0_PHASE_FCLK_SEL_MASK 0x3 +#define UL0_PHASE_FCLK_SEL_MASK_SFT (0x3 << 20) +#define UL_PHASE_SYNC_FCLK_2_ON_SFT 18 +#define UL_PHASE_SYNC_FCLK_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_2_ON_MASK_SFT (0x1 << 18) +#define UL_PHASE_SYNC_FCLK_1_ON_SFT 17 +#define UL_PHASE_SYNC_FCLK_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_1_ON_MASK_SFT (0x1 << 17) +#define UL_PHASE_SYNC_FCLK_0_ON_SFT 16 +#define UL_PHASE_SYNC_FCLK_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_0_ON_MASK_SFT (0x1 << 16) +#define DMIC1_PHASE_HCLK_SEL_SFT 14 +#define DMIC1_PHASE_HCLK_SEL_MASK 0x3 +#define DMIC1_PHASE_HCLK_SEL_MASK_SFT (0x3 << 14) +#define DMIC0_PHASE_HCLK_SEL_SFT 12 +#define DMIC0_PHASE_HCLK_SEL_MASK 0x3 +#define DMIC0_PHASE_HCLK_SEL_MASK_SFT (0x3 << 12) +#define UL3_PHASE_HCLK_SEL_SFT 10 +#define UL3_PHASE_HCLK_SEL_MASK 0x3 +#define UL3_PHASE_HCLK_SEL_MASK_SFT (0x3 << 10) +#define UL2_PHASE_HCLK_SEL_SFT 8 +#define UL2_PHASE_HCLK_SEL_MASK 0x3 +#define UL2_PHASE_HCLK_SEL_MASK_SFT (0x3 << 8) +#define UL1_PHASE_HCLK_SEL_SFT 6 +#define UL1_PHASE_HCLK_SEL_MASK 0x3 +#define UL1_PHASE_HCLK_SEL_MASK_SFT (0x3 << 6) +#define UL0_PHASE_HCLK_SEL_SFT 4 +#define UL0_PHASE_HCLK_SEL_MASK 0x3 +#define UL0_PHASE_HCLK_SEL_MASK_SFT (0x3 << 4) +#define UL_PHASE_SYNC_HCLK_2_ON_SFT 2 +#define UL_PHASE_SYNC_HCLK_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_2_ON_MASK_SFT (0x1 << 2) +#define UL_PHASE_SYNC_HCLK_1_ON_SFT 1 +#define UL_PHASE_SYNC_HCLK_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_1_ON_MASK_SFT (0x1 << 1) +#define UL_PHASE_SYNC_HCLK_0_ON_SFT 0 +#define UL_PHASE_SYNC_HCLK_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_0_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_ULSRC_PHASE_CON1 */ +#define DMIC_CLK_PHASE_SYNC_SET_SFT 31 +#define DMIC_CLK_PHASE_SYNC_SET_MASK 0x1 +#define DMIC_CLK_PHASE_SYNC_SET_MASK_SFT (0x1 << 31) +#define DMIC1_PHASE_SYNC_FCLK_SET_SFT 11 +#define DMIC1_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define DMIC1_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 11) +#define DMIC1_PHASE_SYNC_HCLK_SET_SFT 10 +#define DMIC1_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define DMIC1_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 10) +#define DMIC0_PHASE_SYNC_FCLK_SET_SFT 9 +#define DMIC0_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define DMIC0_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 9) +#define DMIC0_PHASE_SYNC_HCLK_SET_SFT 8 +#define DMIC0_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define DMIC0_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 8) +#define UL3_PHASE_SYNC_FCLK_SET_SFT 7 +#define UL3_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define UL3_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 7) +#define UL3_PHASE_SYNC_HCLK_SET_SFT 6 +#define UL3_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define UL3_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 6) +#define UL2_PHASE_SYNC_FCLK_SET_SFT 5 +#define UL2_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define UL2_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 5) +#define UL2_PHASE_SYNC_HCLK_SET_SFT 4 +#define UL2_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define UL2_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 4) +#define UL1_PHASE_SYNC_FCLK_SET_SFT 3 +#define UL1_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define UL1_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 3) +#define UL1_PHASE_SYNC_HCLK_SET_SFT 2 +#define UL1_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define UL1_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 2) +#define UL0_PHASE_SYNC_FCLK_SET_SFT 1 +#define UL0_PHASE_SYNC_FCLK_SET_MASK 0x1 +#define UL0_PHASE_SYNC_FCLK_SET_MASK_SFT (0x1 << 1) +#define UL0_PHASE_SYNC_HCLK_SET_SFT 0 +#define UL0_PHASE_SYNC_HCLK_SET_MASK 0x1 +#define UL0_PHASE_SYNC_HCLK_SET_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_ULSRC_PHASE_CON2 */ +#define DMIC1_PHASE_SYNC_1X_EN_SEL_SFT 26 +#define DMIC1_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define DMIC1_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 26) +#define DMIC0_PHASE_SYNC_1X_EN_SEL_SFT 24 +#define DMIC0_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define DMIC0_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 24) +#define UL3_PHASE_SYNC_1X_EN_SEL_SFT 22 +#define UL3_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define UL3_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 22) +#define UL2_PHASE_SYNC_1X_EN_SEL_SFT 20 +#define UL2_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define UL2_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 20) +#define UL1_PHASE_SYNC_1X_EN_SEL_SFT 18 +#define UL1_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define UL1_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 18) +#define UL0_PHASE_SYNC_1X_EN_SEL_SFT 16 +#define UL0_PHASE_SYNC_1X_EN_SEL_MASK 0x3 +#define UL0_PHASE_SYNC_1X_EN_SEL_MASK_SFT (0x3 << 16) +#define UL_PHASE_SYNC_FCLK_1X_EN_2_ON_SFT 5 +#define UL_PHASE_SYNC_FCLK_1X_EN_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_1X_EN_2_ON_MASK_SFT (0x1 << 5) +#define UL_PHASE_SYNC_FCLK_1X_EN_1_ON_SFT 4 +#define UL_PHASE_SYNC_FCLK_1X_EN_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_1X_EN_1_ON_MASK_SFT (0x1 << 4) +#define UL_PHASE_SYNC_FCLK_1X_EN_0_ON_SFT 3 +#define UL_PHASE_SYNC_FCLK_1X_EN_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_FCLK_1X_EN_0_ON_MASK_SFT (0x1 << 3) +#define UL_PHASE_SYNC_HCLK_1X_EN_2_ON_SFT 2 +#define UL_PHASE_SYNC_HCLK_1X_EN_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_1X_EN_2_ON_MASK_SFT (0x1 << 2) +#define UL_PHASE_SYNC_HCLK_1X_EN_1_ON_SFT 1 +#define UL_PHASE_SYNC_HCLK_1X_EN_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_1X_EN_1_ON_MASK_SFT (0x1 << 1) +#define UL_PHASE_SYNC_HCLK_1X_EN_0_ON_SFT 0 +#define UL_PHASE_SYNC_HCLK_1X_EN_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_HCLK_1X_EN_0_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_ULSRC_PHASE_CON3 */ +#define DMIC1_PHASE_SYNC_SOFT_RST_SEL_SFT 26 +#define DMIC1_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define DMIC1_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 26) +#define DMIC0_PHASE_SYNC_SOFT_RST_SEL_SFT 24 +#define DMIC0_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define DMIC0_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 24) +#define UL3_PHASE_SYNC_SOFT_RST_SEL_SFT 22 +#define UL3_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define UL3_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 22) +#define UL2_PHASE_SYNC_SOFT_RST_SEL_SFT 20 +#define UL2_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define UL2_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 20) +#define UL1_PHASE_SYNC_SOFT_RST_SEL_SFT 18 +#define UL1_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define UL1_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 18) +#define UL0_PHASE_SYNC_SOFT_RST_SEL_SFT 16 +#define UL0_PHASE_SYNC_SOFT_RST_SEL_MASK 0x3 +#define UL0_PHASE_SYNC_SOFT_RST_SEL_MASK_SFT (0x3 << 16) +#define DMIC1_PHASE_SYNC_CH1_FIFO_SEL_SFT 13 +#define DMIC1_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define DMIC1_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 13) +#define DMIC0_PHASE_SYNC_CH1_FIFO_SEL_SFT 12 +#define DMIC0_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define DMIC0_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 12) +#define UL3_PHASE_SYNC_CH1_FIFO_SEL_SFT 11 +#define UL3_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define UL3_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 11) +#define UL2_PHASE_SYNC_CH1_FIFO_SEL_SFT 10 +#define UL2_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define UL2_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 10) +#define UL1_PHASE_SYNC_CH1_FIFO_SEL_SFT 9 +#define UL1_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define UL1_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 9) +#define UL0_PHASE_SYNC_CH1_FIFO_SEL_SFT 8 +#define UL0_PHASE_SYNC_CH1_FIFO_SEL_MASK 0x1 +#define UL0_PHASE_SYNC_CH1_FIFO_SEL_MASK_SFT (0x1 << 8) +#define UL_PHASE_SYNC_SOFT_RST_EN_2_ON_SFT 5 +#define UL_PHASE_SYNC_SOFT_RST_EN_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_EN_2_ON_MASK_SFT (0x1 << 5) +#define UL_PHASE_SYNC_SOFT_RST_EN_1_ON_SFT 4 +#define UL_PHASE_SYNC_SOFT_RST_EN_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_EN_1_ON_MASK_SFT (0x1 << 4) +#define UL_PHASE_SYNC_SOFT_RST_EN_0_ON_SFT 3 +#define UL_PHASE_SYNC_SOFT_RST_EN_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_EN_0_ON_MASK_SFT (0x1 << 3) +#define UL_PHASE_SYNC_SOFT_RST_2_ON_SFT 2 +#define UL_PHASE_SYNC_SOFT_RST_2_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_2_ON_MASK_SFT (0x1 << 2) +#define UL_PHASE_SYNC_SOFT_RST_1_ON_SFT 1 +#define UL_PHASE_SYNC_SOFT_RST_1_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_1_ON_MASK_SFT (0x1 << 1) +#define UL_PHASE_SYNC_SOFT_RST_0_ON_SFT 0 +#define UL_PHASE_SYNC_SOFT_RST_0_ON_MASK 0x1 +#define UL_PHASE_SYNC_SOFT_RST_0_ON_MASK_SFT (0x1 << 0) + +/* AFE_MTKAIF_IPM_VER_MON */ +#define RG_MTKAIF_IPM_VER_MON_SFT 0 +#define RG_MTKAIF_IPM_VER_MON_MASK 0xffffffff +#define RG_MTKAIF_IPM_VER_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_MTKAIF_MON_SEL */ +#define RG_MTKAIF_MON_SEL_SFT 0 +#define RG_MTKAIF_MON_SEL_MASK 0xff +#define RG_MTKAIF_MON_SEL_MASK_SFT (0xff << 0) + +/* AFE_MTKAIF_MON */ +#define RG_MTKAIF_MON_SFT 0 +#define RG_MTKAIF_MON_MASK 0xffffffff +#define RG_MTKAIF_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_MTKAIF0_CFG0 */ +#define RG_MTKAIF0_RXIF_CLKINV_SFT 31 +#define RG_MTKAIF0_RXIF_CLKINV_MASK 0x1 +#define RG_MTKAIF0_RXIF_CLKINV_MASK_SFT (0x1 << 31) +#define RG_MTKAIF0_RXIF_BYPASS_SRC_SFT 17 +#define RG_MTKAIF0_RXIF_BYPASS_SRC_MASK 0x1 +#define RG_MTKAIF0_RXIF_BYPASS_SRC_MASK_SFT (0x1 << 17) +#define RG_MTKAIF0_RXIF_PROTOCOL2_SFT 16 +#define RG_MTKAIF0_RXIF_PROTOCOL2_MASK 0x1 +#define RG_MTKAIF0_RXIF_PROTOCOL2_MASK_SFT (0x1 << 16) +#define RG_MTKAIF0_TXIF_NLE_DEBUG_SFT 8 +#define RG_MTKAIF0_TXIF_NLE_DEBUG_MASK 0x1 +#define RG_MTKAIF0_TXIF_NLE_DEBUG_MASK_SFT (0x1 << 8) +#define RG_MTKAIF0_TXIF_BYPASS_SRC_SFT 5 +#define RG_MTKAIF0_TXIF_BYPASS_SRC_MASK 0x1 +#define RG_MTKAIF0_TXIF_BYPASS_SRC_MASK_SFT (0x1 << 5) +#define RG_MTKAIF0_TXIF_PROTOCOL2_SFT 4 +#define RG_MTKAIF0_TXIF_PROTOCOL2_MASK 0x1 +#define RG_MTKAIF0_TXIF_PROTOCOL2_MASK_SFT (0x1 << 4) +#define RG_MTKAIF0_TXIF_8TO5_SFT 2 +#define RG_MTKAIF0_TXIF_8TO5_MASK 0x1 +#define RG_MTKAIF0_TXIF_8TO5_MASK_SFT (0x1 << 2) +#define RG_MTKAIF0_RXIF_8TO5_SFT 1 +#define RG_MTKAIF0_RXIF_8TO5_MASK 0x1 +#define RG_MTKAIF0_RXIF_8TO5_MASK_SFT (0x1 << 1) +#define RG_MTKAIF0_TX2RX_LOOPBACK1_SFT 0 +#define RG_MTKAIF0_TX2RX_LOOPBACK1_MASK 0x1 +#define RG_MTKAIF0_TX2RX_LOOPBACK1_MASK_SFT (0x1 << 0) + +/* AFE_MTKAIF0_TX_CFG0 */ +#define RG_MTKAIF0_TXIF_NLE_FIFO_SWAP_SFT 23 +#define RG_MTKAIF0_TXIF_NLE_FIFO_SWAP_MASK 0x1 +#define RG_MTKAIF0_TXIF_NLE_FIFO_SWAP_MASK_SFT (0x1 << 23) +#define RG_MTKAIF0_TXIF_NLE_FIFO_RSP_SFT 20 +#define RG_MTKAIF0_TXIF_NLE_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF0_TXIF_NLE_FIFO_RSP_MASK_SFT (0x7 << 20) +#define RG_MTKAIF0_TXIF_FIFO_SWAP_SFT 15 +#define RG_MTKAIF0_TXIF_FIFO_SWAP_MASK 0x1 +#define RG_MTKAIF0_TXIF_FIFO_SWAP_MASK_SFT (0x1 << 15) +#define RG_MTKAIF0_TXIF_FIFO_RSP_SFT 12 +#define RG_MTKAIF0_TXIF_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF0_TXIF_FIFO_RSP_MASK_SFT (0x7 << 12) +#define RG_MTKAIF0_TXIF_SYNC_WORD1_SFT 4 +#define RG_MTKAIF0_TXIF_SYNC_WORD1_MASK 0x7 +#define RG_MTKAIF0_TXIF_SYNC_WORD1_MASK_SFT (0x7 << 4) +#define RG_MTKAIF0_TXIF_SYNC_WORD0_SFT 0 +#define RG_MTKAIF0_TXIF_SYNC_WORD0_MASK 0x7 +#define RG_MTKAIF0_TXIF_SYNC_WORD0_MASK_SFT (0x7 << 0) + +/* AFE_MTKAIF0_RX_CFG0 */ +#define RG_MTKAIF0_RXIF_VOICE_MODE_SFT 20 +#define RG_MTKAIF0_RXIF_VOICE_MODE_MASK 0xf +#define RG_MTKAIF0_RXIF_VOICE_MODE_MASK_SFT (0xf << 20) +#define RG_MTKAIF0_RXIF_DETECT_ON_SFT 16 +#define RG_MTKAIF0_RXIF_DETECT_ON_MASK 0x1 +#define RG_MTKAIF0_RXIF_DETECT_ON_MASK_SFT (0x1 << 16) +#define RG_MTKAIF0_RXIF_DATA_BIT_SFT 8 +#define RG_MTKAIF0_RXIF_DATA_BIT_MASK 0x7 +#define RG_MTKAIF0_RXIF_DATA_BIT_MASK_SFT (0x7 << 8) +#define RG_MTKAIF0_RXIF_FIFO_RSP_SFT 4 +#define RG_MTKAIF0_RXIF_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF0_RXIF_FIFO_RSP_MASK_SFT (0x7 << 4) +#define RG_MTKAIF0_RXIF_DATA_MODE_SFT 0 +#define RG_MTKAIF0_RXIF_DATA_MODE_MASK 0x1 +#define RG_MTKAIF0_RXIF_DATA_MODE_MASK_SFT (0x1 << 0) + +/* AFE_MTKAIF0_RX_CFG1 */ +#define RG_MTKAIF0_RXIF_CLEAR_SYNC_FAIL_SFT 28 +#define RG_MTKAIF0_RXIF_CLEAR_SYNC_FAIL_MASK 0x1 +#define RG_MTKAIF0_RXIF_CLEAR_SYNC_FAIL_MASK_SFT (0x1 << 28) +#define RG_MTKAIF0_RXIF_SYNC_CNT_TABLE_SFT 16 +#define RG_MTKAIF0_RXIF_SYNC_CNT_TABLE_MASK 0xfff +#define RG_MTKAIF0_RXIF_SYNC_CNT_TABLE_MASK_SFT (0xfff << 16) +#define RG_MTKAIF0_RXIF_SYNC_SEARCH_TABLE_SFT 12 +#define RG_MTKAIF0_RXIF_SYNC_SEARCH_TABLE_MASK 0xf +#define RG_MTKAIF0_RXIF_SYNC_SEARCH_TABLE_MASK_SFT (0xf << 12) +#define RG_MTKAIF0_RXIF_INVALID_SYNC_CHECK_ROUND_SFT 8 +#define RG_MTKAIF0_RXIF_INVALID_SYNC_CHECK_ROUND_MASK 0xf +#define RG_MTKAIF0_RXIF_INVALID_SYNC_CHECK_ROUND_MASK_SFT (0xf << 8) +#define RG_MTKAIF0_RXIF_SYNC_CHECK_ROUND_SFT 4 +#define RG_MTKAIF0_RXIF_SYNC_CHECK_ROUND_MASK 0xf +#define RG_MTKAIF0_RXIF_SYNC_CHECK_ROUND_MASK_SFT (0xf << 4) + +/* AFE_MTKAIF0_RX_CFG2 */ +#define RG_MTKAIF0_RXIF_SYNC_WORD1_DISABLE_SFT 27 +#define RG_MTKAIF0_RXIF_SYNC_WORD1_DISABLE_MASK 0x1 +#define RG_MTKAIF0_RXIF_SYNC_WORD1_DISABLE_MASK_SFT (0x1 << 27) +#define RG_MTKAIF0_RXIF_SYNC_WORD1_SFT 24 +#define RG_MTKAIF0_RXIF_SYNC_WORD1_MASK 0x7 +#define RG_MTKAIF0_RXIF_SYNC_WORD1_MASK_SFT (0x7 << 24) +#define RG_MTKAIF0_RXIF_SYNC_WORD0_DISABLE_SFT 23 +#define RG_MTKAIF0_RXIF_SYNC_WORD0_DISABLE_MASK 0x1 +#define RG_MTKAIF0_RXIF_SYNC_WORD0_DISABLE_MASK_SFT (0x1 << 23) +#define RG_MTKAIF0_RXIF_SYNC_WORD0_SFT 20 +#define RG_MTKAIF0_RXIF_SYNC_WORD0_MASK 0x7 +#define RG_MTKAIF0_RXIF_SYNC_WORD0_MASK_SFT (0x7 << 20) +#define RG_MTKAIF0_RXIF_DELAY_CYCLE_SFT 12 +#define RG_MTKAIF0_RXIF_DELAY_CYCLE_MASK 0xf +#define RG_MTKAIF0_RXIF_DELAY_CYCLE_MASK_SFT (0xf << 12) +#define RG_MTKAIF0_RXIF_DELAY_DATA_SFT 8 +#define RG_MTKAIF0_RXIF_DELAY_DATA_MASK 0x1 +#define RG_MTKAIF0_RXIF_DELAY_DATA_MASK_SFT (0x1 << 8) + +/* AFE_MTKAIF1_CFG0 */ +#define RG_MTKAIF1_RXIF_CLKINV_ADC_SFT 31 +#define RG_MTKAIF1_RXIF_CLKINV_ADC_MASK 0x1 +#define RG_MTKAIF1_RXIF_CLKINV_ADC_MASK_SFT (0x1 << 31) +#define RG_MTKAIF1_RXIF_BYPASS_SRC_SFT 17 +#define RG_MTKAIF1_RXIF_BYPASS_SRC_MASK 0x1 +#define RG_MTKAIF1_RXIF_BYPASS_SRC_MASK_SFT (0x1 << 17) +#define RG_MTKAIF1_RXIF_PROTOCOL2_SFT 16 +#define RG_MTKAIF1_RXIF_PROTOCOL2_MASK 0x1 +#define RG_MTKAIF1_RXIF_PROTOCOL2_MASK_SFT (0x1 << 16) +#define RG_MTKAIF1_TXIF_NLE_DEBUG_SFT 8 +#define RG_MTKAIF1_TXIF_NLE_DEBUG_MASK 0x1 +#define RG_MTKAIF1_TXIF_NLE_DEBUG_MASK_SFT (0x1 << 8) +#define RG_MTKAIF1_TXIF_BYPASS_SRC_SFT 5 +#define RG_MTKAIF1_TXIF_BYPASS_SRC_MASK 0x1 +#define RG_MTKAIF1_TXIF_BYPASS_SRC_MASK_SFT (0x1 << 5) +#define RG_MTKAIF1_TXIF_PROTOCOL2_SFT 4 +#define RG_MTKAIF1_TXIF_PROTOCOL2_MASK 0x1 +#define RG_MTKAIF1_TXIF_PROTOCOL2_MASK_SFT (0x1 << 4) +#define RG_MTKAIF1_TXIF_8TO5_SFT 2 +#define RG_MTKAIF1_TXIF_8TO5_MASK 0x1 +#define RG_MTKAIF1_TXIF_8TO5_MASK_SFT (0x1 << 2) +#define RG_MTKAIF1_RXIF_8TO5_SFT 1 +#define RG_MTKAIF1_RXIF_8TO5_MASK 0x1 +#define RG_MTKAIF1_RXIF_8TO5_MASK_SFT (0x1 << 1) +#define RG_MTKAIF1_IF_LOOPBACK1_SFT 0 +#define RG_MTKAIF1_IF_LOOPBACK1_MASK 0x1 +#define RG_MTKAIF1_IF_LOOPBACK1_MASK_SFT (0x1 << 0) + +/* AFE_MTKAIF1_TX_CFG0 */ +#define RG_MTKAIF1_TXIF_NLE_FIFO_SWAP_SFT 23 +#define RG_MTKAIF1_TXIF_NLE_FIFO_SWAP_MASK 0x1 +#define RG_MTKAIF1_TXIF_NLE_FIFO_SWAP_MASK_SFT (0x1 << 23) +#define RG_MTKAIF1_TXIF_NLE_FIFO_RSP_SFT 20 +#define RG_MTKAIF1_TXIF_NLE_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF1_TXIF_NLE_FIFO_RSP_MASK_SFT (0x7 << 20) +#define RG_MTKAIF1_TXIF_FIFO_SWAP_SFT 15 +#define RG_MTKAIF1_TXIF_FIFO_SWAP_MASK 0x1 +#define RG_MTKAIF1_TXIF_FIFO_SWAP_MASK_SFT (0x1 << 15) +#define RG_MTKAIF1_TXIF_FIFO_RSP_SFT 12 +#define RG_MTKAIF1_TXIF_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF1_TXIF_FIFO_RSP_MASK_SFT (0x7 << 12) +#define RG_MTKAIF1_TXIF_SYNC_WORD1_SFT 4 +#define RG_MTKAIF1_TXIF_SYNC_WORD1_MASK 0x7 +#define RG_MTKAIF1_TXIF_SYNC_WORD1_MASK_SFT (0x7 << 4) +#define RG_MTKAIF1_TXIF_SYNC_WORD0_SFT 0 +#define RG_MTKAIF1_TXIF_SYNC_WORD0_MASK 0x7 +#define RG_MTKAIF1_TXIF_SYNC_WORD0_MASK_SFT (0x7 << 0) + +/* AFE_MTKAIF1_RX_CFG0 */ +#define RG_MTKAIF1_RXIF_VOICE_MODE_SFT 20 +#define RG_MTKAIF1_RXIF_VOICE_MODE_MASK 0xf +#define RG_MTKAIF1_RXIF_VOICE_MODE_MASK_SFT (0xf << 20) +#define RG_MTKAIF1_RXIF_DETECT_ON_SFT 16 +#define RG_MTKAIF1_RXIF_DETECT_ON_MASK 0x1 +#define RG_MTKAIF1_RXIF_DETECT_ON_MASK_SFT (0x1 << 16) +#define RG_MTKAIF1_RXIF_DATA_BIT_SFT 8 +#define RG_MTKAIF1_RXIF_DATA_BIT_MASK 0x7 +#define RG_MTKAIF1_RXIF_DATA_BIT_MASK_SFT (0x7 << 8) +#define RG_MTKAIF1_RXIF_FIFO_RSP_SFT 4 +#define RG_MTKAIF1_RXIF_FIFO_RSP_MASK 0x7 +#define RG_MTKAIF1_RXIF_FIFO_RSP_MASK_SFT (0x7 << 4) +#define RG_MTKAIF1_RXIF_DATA_MODE_SFT 0 +#define RG_MTKAIF1_RXIF_DATA_MODE_MASK 0x1 +#define RG_MTKAIF1_RXIF_DATA_MODE_MASK_SFT (0x1 << 0) + +/* AFE_MTKAIF1_RX_CFG1 */ +#define RG_MTKAIF1_RXIF_CLEAR_SYNC_FAIL_SFT 28 +#define RG_MTKAIF1_RXIF_CLEAR_SYNC_FAIL_MASK 0x1 +#define RG_MTKAIF1_RXIF_CLEAR_SYNC_FAIL_MASK_SFT (0x1 << 28) +#define RG_MTKAIF1_RXIF_SYNC_CNT_TABLE_SFT 16 +#define RG_MTKAIF1_RXIF_SYNC_CNT_TABLE_MASK 0xfff +#define RG_MTKAIF1_RXIF_SYNC_CNT_TABLE_MASK_SFT (0xfff << 16) +#define RG_MTKAIF1_RXIF_SYNC_SEARCH_TABLE_SFT 12 +#define RG_MTKAIF1_RXIF_SYNC_SEARCH_TABLE_MASK 0xf +#define RG_MTKAIF1_RXIF_SYNC_SEARCH_TABLE_MASK_SFT (0xf << 12) +#define RG_MTKAIF1_RXIF_INVALID_SYNC_CHECK_ROUND_SFT 8 +#define RG_MTKAIF1_RXIF_INVALID_SYNC_CHECK_ROUND_MASK 0xf +#define RG_MTKAIF1_RXIF_INVALID_SYNC_CHECK_ROUND_MASK_SFT (0xf << 8) +#define RG_MTKAIF1_RXIF_SYNC_CHECK_ROUND_SFT 4 +#define RG_MTKAIF1_RXIF_SYNC_CHECK_ROUND_MASK 0xf +#define RG_MTKAIF1_RXIF_SYNC_CHECK_ROUND_MASK_SFT (0xf << 4) + +/* AFE_MTKAIF1_RX_CFG2 */ +#define RG_MTKAIF1_RXIF_SYNC_WORD1_DISABLE_SFT 27 +#define RG_MTKAIF1_RXIF_SYNC_WORD1_DISABLE_MASK 0x1 +#define RG_MTKAIF1_RXIF_SYNC_WORD1_DISABLE_MASK_SFT (0x1 << 27) +#define RG_MTKAIF1_RXIF_SYNC_WORD1_SFT 24 +#define RG_MTKAIF1_RXIF_SYNC_WORD1_MASK 0x7 +#define RG_MTKAIF1_RXIF_SYNC_WORD1_MASK_SFT (0x7 << 24) +#define RG_MTKAIF1_RXIF_SYNC_WORD0_DISABLE_SFT 23 +#define RG_MTKAIF1_RXIF_SYNC_WORD0_DISABLE_MASK 0x1 +#define RG_MTKAIF1_RXIF_SYNC_WORD0_DISABLE_MASK_SFT (0x1 << 23) +#define RG_MTKAIF1_RXIF_SYNC_WORD0_SFT 20 +#define RG_MTKAIF1_RXIF_SYNC_WORD0_MASK 0x7 +#define RG_MTKAIF1_RXIF_SYNC_WORD0_MASK_SFT (0x7 << 20) +#define RG_MTKAIF1_RXIF_DELAY_CYCLE_SFT 12 +#define RG_MTKAIF1_RXIF_DELAY_CYCLE_MASK 0xf +#define RG_MTKAIF1_RXIF_DELAY_CYCLE_MASK_SFT (0xf << 12) +#define RG_MTKAIF1_RXIF_DELAY_DATA_SFT 8 +#define RG_MTKAIF1_RXIF_DELAY_DATA_MASK 0x1 +#define RG_MTKAIF1_RXIF_DELAY_DATA_MASK_SFT (0x1 << 8) + +/* AFE_AUD_PAD_TOP_CFG0 */ +#define AUD_PAD_TOP_FIFO_RSP_SFT 4 +#define AUD_PAD_TOP_FIFO_RSP_MASK 0xf +#define AUD_PAD_TOP_FIFO_RSP_MASK_SFT (0xf << 4) +#define RG_RX_PROTOCOL2_SFT 3 +#define RG_RX_PROTOCOL2_MASK 0x1 +#define RG_RX_PROTOCOL2_MASK_SFT (0x1 << 3) +#define RG_RX_FIFO_ON_SFT 0 +#define RG_RX_FIFO_ON_MASK 0x1 +#define RG_RX_FIFO_ON_MASK_SFT (0x1 << 0) + +/* AFE_AUD_PAD_TOP_MON */ +#define AUD_PAD_TOP_MON_SFT 0 +#define AUD_PAD_TOP_MON_MASK 0xffff +#define AUD_PAD_TOP_MON_MASK_SFT (0xffff << 0) + +/* AFE_ADDA_MTKAIFV4_TX_CFG0 */ +#define MTKAIFV4_TXIF_EN_SEL_SFT 12 +#define MTKAIFV4_TXIF_EN_SEL_MASK 0x1 +#define MTKAIFV4_TXIF_EN_SEL_MASK_SFT (0x1 << 12) +#define MTKAIFV4_TXIF_V4_SFT 11 +#define MTKAIFV4_TXIF_V4_MASK 0x1 +#define MTKAIFV4_TXIF_V4_MASK_SFT (0x1 << 11) +#define MTKAIFV4_ADDA6_OUT_EN_SEL_SFT 10 +#define MTKAIFV4_ADDA6_OUT_EN_SEL_MASK 0x1 +#define MTKAIFV4_ADDA6_OUT_EN_SEL_MASK_SFT (0x1 << 10) +#define MTKAIFV4_ADDA_OUT_EN_SEL_SFT 9 +#define MTKAIFV4_ADDA_OUT_EN_SEL_MASK 0x1 +#define MTKAIFV4_ADDA_OUT_EN_SEL_MASK_SFT (0x1 << 9) +#define MTKAIFV4_TXIF_INPUT_MODE_SFT 4 +#define MTKAIFV4_TXIF_INPUT_MODE_MASK 0x1f +#define MTKAIFV4_TXIF_INPUT_MODE_MASK_SFT (0x1f << 4) +#define MTKAIFV4_TXIF_FOUR_CHANNEL_SFT 1 +#define MTKAIFV4_TXIF_FOUR_CHANNEL_MASK 0x1 +#define MTKAIFV4_TXIF_FOUR_CHANNEL_MASK_SFT (0x1 << 1) +#define MTKAIFV4_TXIF_AFE_ON_SFT 0 +#define MTKAIFV4_TXIF_AFE_ON_MASK 0x1 +#define MTKAIFV4_TXIF_AFE_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA6_MTKAIFV4_TX_CFG0 */ +#define ADDA6_MTKAIFV4_TXIF_EN_SEL_SFT 12 +#define ADDA6_MTKAIFV4_TXIF_EN_SEL_MASK 0x1 +#define ADDA6_MTKAIFV4_TXIF_EN_SEL_MASK_SFT (0x1 << 12) +#define ADDA6_MTKAIFV4_TXIF_INPUT_MODE_SFT 4 +#define ADDA6_MTKAIFV4_TXIF_INPUT_MODE_MASK 0x1f +#define ADDA6_MTKAIFV4_TXIF_INPUT_MODE_MASK_SFT (0x1f << 4) +#define ADDA6_MTKAIFV4_TXIF_FOUR_CHANNEL_SFT 1 +#define ADDA6_MTKAIFV4_TXIF_FOUR_CHANNEL_MASK 0x1 +#define ADDA6_MTKAIFV4_TXIF_FOUR_CHANNEL_MASK_SFT (0x1 << 1) +#define ADDA6_MTKAIFV4_TXIF_AFE_ON_SFT 0 +#define ADDA6_MTKAIFV4_TXIF_AFE_ON_MASK 0x1 +#define ADDA6_MTKAIFV4_TXIF_AFE_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_MTKAIFV4_RX_CFG0 */ +#define MTKAIFV4_RXIF_CLKINV_SFT 31 +#define MTKAIFV4_RXIF_CLKINV_MASK 0x1 +#define MTKAIFV4_RXIF_CLKINV_MASK_SFT (0x1 << 31) +#define MTKAIFV4_RXIF_LOOPBACK_MODE_SFT 28 +#define MTKAIFV4_RXIF_LOOPBACK_MODE_MASK 0x1 +#define MTKAIFV4_RXIF_LOOPBACK_MODE_MASK_SFT (0x1 << 28) +#define MTKAIFV4_UL_CH7CH8_IN_EN_SEL_SFT 19 +#define MTKAIFV4_UL_CH7CH8_IN_EN_SEL_MASK 0x1 +#define MTKAIFV4_UL_CH7CH8_IN_EN_SEL_MASK_SFT (0x1 << 19) +#define MTKAIFV4_UL_CH5CH6_IN_EN_SEL_SFT 18 +#define MTKAIFV4_UL_CH5CH6_IN_EN_SEL_MASK 0x1 +#define MTKAIFV4_UL_CH5CH6_IN_EN_SEL_MASK_SFT (0x1 << 18) +#define MTKAIFV4_UL_CH3CH4_IN_EN_SEL_SFT 17 +#define MTKAIFV4_UL_CH3CH4_IN_EN_SEL_MASK 0x1 +#define MTKAIFV4_UL_CH3CH4_IN_EN_SEL_MASK_SFT (0x1 << 17) +#define MTKAIFV4_UL_CH1CH2_IN_EN_SEL_SFT 16 +#define MTKAIFV4_UL_CH1CH2_IN_EN_SEL_MASK 0x1 +#define MTKAIFV4_UL_CH1CH2_IN_EN_SEL_MASK_SFT (0x1 << 16) +#define MTKAIFV4_RXIF_EN_SEL_SFT 12 +#define MTKAIFV4_RXIF_EN_SEL_MASK 0x1 +#define MTKAIFV4_RXIF_EN_SEL_MASK_SFT (0x1 << 12) +#define MTKAIFV4_RXIF_INPUT_MODE_SFT 4 +#define MTKAIFV4_RXIF_INPUT_MODE_MASK 0x1f +#define MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT (0x1f << 4) +#define MTKAIFV4_RXIF_FOUR_CHANNEL_SFT 1 +#define MTKAIFV4_RXIF_FOUR_CHANNEL_MASK 0x1 +#define MTKAIFV4_RXIF_FOUR_CHANNEL_MASK_SFT (0x1 << 1) +#define MTKAIFV4_RXIF_AFE_ON_SFT 0 +#define MTKAIFV4_RXIF_AFE_ON_MASK 0x1 +#define MTKAIFV4_RXIF_AFE_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_MTKAIFV4_RX_CFG1 */ +#define MTKAIFV4_RXIF_SYNC_CNT_TABLE_SFT 17 +#define MTKAIFV4_RXIF_SYNC_CNT_TABLE_MASK 0xfff +#define MTKAIFV4_RXIF_SYNC_CNT_TABLE_MASK_SFT (0xfff << 17) +#define MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_SFT 12 +#define MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_MASK 0x1f +#define MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_MASK_SFT (0x1f << 12) +#define MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_SFT 8 +#define MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_MASK 0xf +#define MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_MASK_SFT (0xf << 8) +#define MTKAIFV4_RXIF_SYNC_CHECK_ROUND_SFT 4 +#define MTKAIFV4_RXIF_SYNC_CHECK_ROUND_MASK 0xf +#define MTKAIFV4_RXIF_SYNC_CHECK_ROUND_MASK_SFT (0xf << 4) +#define MTKAIFV4_RXIF_FIFO_RSP_SFT 1 +#define MTKAIFV4_RXIF_FIFO_RSP_MASK 0x7 +#define MTKAIFV4_RXIF_FIFO_RSP_MASK_SFT (0x7 << 1) +#define MTKAIFV4_RXIF_SELF_DEFINE_TABLE_SFT 0 +#define MTKAIFV4_RXIF_SELF_DEFINE_TABLE_MASK 0x1 +#define MTKAIFV4_RXIF_SELF_DEFINE_TABLE_MASK_SFT (0x1 << 0) + +/* AFE_ADDA6_MTKAIFV4_RX_CFG0 */ +#define ADDA6_MTKAIFV4_RXIF_CLKINV_SFT 31 +#define ADDA6_MTKAIFV4_RXIF_CLKINV_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_CLKINV_MASK_SFT (0x1 << 31) +#define ADDA6_MTKAIFV4_RXIF_LOOPBACK_MODE_SFT 28 +#define ADDA6_MTKAIFV4_RXIF_LOOPBACK_MODE_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_LOOPBACK_MODE_MASK_SFT (0x1 << 28) +#define ADDA6_MTKAIFV4_RXIF_EN_SEL_SFT 12 +#define ADDA6_MTKAIFV4_RXIF_EN_SEL_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_EN_SEL_MASK_SFT (0x1 << 12) +#define ADDA6_MTKAIFV4_RXIF_INPUT_MODE_SFT 4 +#define ADDA6_MTKAIFV4_RXIF_INPUT_MODE_MASK 0x1f +#define ADDA6_MTKAIFV4_RXIF_INPUT_MODE_MASK_SFT (0x1f << 4) +#define ADDA6_MTKAIFV4_RXIF_FOUR_CHANNEL_SFT 1 +#define ADDA6_MTKAIFV4_RXIF_FOUR_CHANNEL_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_FOUR_CHANNEL_MASK_SFT (0x1 << 1) +#define ADDA6_MTKAIFV4_RXIF_AFE_ON_SFT 0 +#define ADDA6_MTKAIFV4_RXIF_AFE_ON_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_AFE_ON_MASK_SFT (0x1 << 0) + +/* AFE_ADDA6_MTKAIFV4_RX_CFG1 */ +#define ADDA6_MTKAIFV4_RXIF_SYNC_CNT_TABLE_SFT 17 +#define ADDA6_MTKAIFV4_RXIF_SYNC_CNT_TABLE_MASK 0xfff +#define ADDA6_MTKAIFV4_RXIF_SYNC_CNT_TABLE_MASK_SFT (0xfff << 17) +#define ADDA6_MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_SFT 12 +#define ADDA6_MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_MASK 0x1f +#define ADDA6_MTKAIFV4_RXIF_SYNC_SEARCH_TABLE_MASK_SFT (0x1f << 12) +#define ADDA6_MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_SFT 8 +#define ADDA6_MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_MASK 0xf +#define ADDA6_MTKAIFV4_RXIF_INVAILD_SYNC_CHECK_ROUND_MASK_SFT (0xf << 8) +#define ADDA6_MTKAIFV4_RXIF_SYNC_CHECK_ROUND_SFT 4 +#define ADDA6_MTKAIFV4_RXIF_SYNC_CHECK_ROUND_MASK 0xf +#define ADDA6_MTKAIFV4_RXIF_SYNC_CHECK_ROUND_MASK_SFT (0xf << 4) +#define ADDA6_MTKAIFV4_RXIF_FIFO_RSP_SFT 1 +#define ADDA6_MTKAIFV4_RXIF_FIFO_RSP_MASK 0x7 +#define ADDA6_MTKAIFV4_RXIF_FIFO_RSP_MASK_SFT (0x7 << 1) +#define ADDA6_MTKAIFV4_RXIF_SELF_DEFINE_TABLE_SFT 0 +#define ADDA6_MTKAIFV4_RXIF_SELF_DEFINE_TABLE_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_SELF_DEFINE_TABLE_MASK_SFT (0x1 << 0) + +/* AFE_ADDA_MTKAIFV4_TX_SYNCWORD_CFG */ +#define ADDA6_MTKAIFV4_TXIF_SYNCWORD_SFT 16 +#define ADDA6_MTKAIFV4_TXIF_SYNCWORD_MASK 0xffff +#define ADDA6_MTKAIFV4_TXIF_SYNCWORD_MASK_SFT (0xffff << 16) +#define ADDA_MTKAIFV4_TXIF_SYNCWORD_SFT 0 +#define ADDA_MTKAIFV4_TXIF_SYNCWORD_MASK 0xffff +#define ADDA_MTKAIFV4_TXIF_SYNCWORD_MASK_SFT (0xffff << 0) + +/* AFE_ADDA_MTKAIFV4_RX_SYNCWORD_CFG */ +#define ADDA6_MTKAIFV4_RXIF_SYNCWORD_SFT 16 +#define ADDA6_MTKAIFV4_RXIF_SYNCWORD_MASK 0xffff +#define ADDA6_MTKAIFV4_RXIF_SYNCWORD_MASK_SFT (0xffff << 16) +#define ADDA_MTKAIFV4_RXIF_SYNCWORD_SFT 0 +#define ADDA_MTKAIFV4_RXIF_SYNCWORD_MASK 0xffff +#define ADDA_MTKAIFV4_RXIF_SYNCWORD_MASK_SFT (0xffff << 0) + +/* AFE_ADDA_MTKAIFV4_MON0 */ +#define MTKAIFV4_TXIF_SDATA_OUT_SFT 23 +#define MTKAIFV4_TXIF_SDATA_OUT_MASK 0x1 +#define MTKAIFV4_TXIF_SDATA_OUT_MASK_SFT (0x1 << 23) +#define MTKAIFV4_RXIF_SDATA_IN_SFT 22 +#define MTKAIFV4_RXIF_SDATA_IN_MASK 0x1 +#define MTKAIFV4_RXIF_SDATA_IN_MASK_SFT (0x1 << 22) +#define MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_SFT 21 +#define MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_MASK 0x1 +#define MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_MASK_SFT (0x1 << 21) +#define MTKAIFV4_RXIF_ADC_FIFO_STATUS_SFT 0 +#define MTKAIFV4_RXIF_ADC_FIFO_STATUS_MASK 0xfff +#define MTKAIFV4_RXIF_ADC_FIFO_STATUS_MASK_SFT (0xfff << 0) + +/* AFE_ADDA_MTKAIFV4_MON1 */ +#define MTKAIFV4_RXIF_OUT_CH4_SFT 24 +#define MTKAIFV4_RXIF_OUT_CH4_MASK 0xff +#define MTKAIFV4_RXIF_OUT_CH4_MASK_SFT (0xff << 24) +#define MTKAIFV4_RXIF_OUT_CH3_SFT 16 +#define MTKAIFV4_RXIF_OUT_CH3_MASK 0xff +#define MTKAIFV4_RXIF_OUT_CH3_MASK_SFT (0xff << 16) +#define MTKAIFV4_RXIF_OUT_CH2_SFT 8 +#define MTKAIFV4_RXIF_OUT_CH2_MASK 0xff +#define MTKAIFV4_RXIF_OUT_CH2_MASK_SFT (0xff << 8) +#define MTKAIFV4_RXIF_OUT_CH1_SFT 0 +#define MTKAIFV4_RXIF_OUT_CH1_MASK 0xff +#define MTKAIFV4_RXIF_OUT_CH1_MASK_SFT (0xff << 0) + +/* AFE_ADDA6_MTKAIFV4_MON0 */ +#define ADDA6_MTKAIFV4_TXIF_SDATA_OUT_SFT 23 +#define ADDA6_MTKAIFV4_TXIF_SDATA_OUT_MASK 0x1 +#define ADDA6_MTKAIFV4_TXIF_SDATA_OUT_MASK_SFT (0x1 << 23) +#define ADDA6_MTKAIFV4_RXIF_SDATA_IN_SFT 22 +#define ADDA6_MTKAIFV4_RXIF_SDATA_IN_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_SDATA_IN_MASK_SFT (0x1 << 22) +#define ADDA6_MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_SFT 21 +#define ADDA6_MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_MASK 0x1 +#define ADDA6_MTKAIFV4_RXIF_SEARCH_FAIL_FLAG_MASK_SFT (0x1 << 21) +#define ADDA6_MTKAIFV3P3_RXIF_ADC_FIFO_STATUS_SFT 0 +#define ADDA6_MTKAIFV3P3_RXIF_ADC_FIFO_STATUS_MASK 0xfff +#define ADDA6_MTKAIFV3P3_RXIF_ADC_FIFO_STATUS_MASK_SFT (0xfff << 0) + +/* ETDM_IN0_CON0 */ +#define REG_ETDM_IN_EN_SFT 0 +#define REG_ETDM_IN_EN_MASK 0x1 +#define REG_ETDM_IN_EN_MASK_SFT (0x1 << 0) +#define REG_SYNC_MODE_SFT 1 +#define REG_SYNC_MODE_MASK 0x1 +#define REG_SYNC_MODE_MASK_SFT (0x1 << 1) +#define REG_LSB_FIRST_SFT 3 +#define REG_LSB_FIRST_MASK 0x1 +#define REG_LSB_FIRST_MASK_SFT (0x1 << 3) +#define REG_SOFT_RST_SFT 4 +#define REG_SOFT_RST_MASK 0x1 +#define REG_SOFT_RST_MASK_SFT (0x1 << 4) +#define REG_SLAVE_MODE_SFT 5 +#define REG_SLAVE_MODE_MASK 0x1 +#define REG_SLAVE_MODE_MASK_SFT (0x1 << 5) +#define REG_FMT_SFT 6 +#define REG_FMT_MASK 0x7 +#define REG_FMT_MASK_SFT (0x7 << 6) +#define REG_LRCK_EDGE_SEL_SFT 10 +#define REG_LRCK_EDGE_SEL_MASK 0x1 +#define REG_LRCK_EDGE_SEL_MASK_SFT (0x1 << 10) +#define REG_BIT_LENGTH_SFT 11 +#define REG_BIT_LENGTH_MASK 0x1f +#define REG_BIT_LENGTH_MASK_SFT (0x1f << 11) +#define REG_WORD_LENGTH_SFT 16 +#define REG_WORD_LENGTH_MASK 0x1f +#define REG_WORD_LENGTH_MASK_SFT (0x1f << 16) +#define REG_CH_NUM_SFT 23 +#define REG_CH_NUM_MASK 0x1f +#define REG_CH_NUM_MASK_SFT (0x1f << 23) +#define REG_RELATCH_1X_EN_DOMAIN_SEL_SFT 28 +#define REG_RELATCH_1X_EN_DOMAIN_SEL_MASK 0x7 +#define REG_RELATCH_1X_EN_DOMAIN_SEL_MASK_SFT (0x7 << 28) +#define REG_VALID_TOGETHER_SFT 31 +#define REG_VALID_TOGETHER_MASK 0x1 +#define REG_VALID_TOGETHER_MASK_SFT (0x1 << 31) + +/* ETDM_IN0_CON1 */ +/* ETDM_IN1_CON1 */ +/* ETDM_IN2_CON1 */ +/* ETDM_IN3_CON1 */ +/* ETDM_IN4_CON1 */ +/* ETDM_IN5_CON1 */ +/* ETDM_IN6_CON1 */ +#define REG_INITIAL_COUNT_SFT 0 +#define REG_INITIAL_COUNT_MASK 0x1f +#define REG_INITIAL_COUNT_MASK_SFT (0x1f << 0) +#define REG_INITIAL_POINT_SFT 5 +#define REG_INITIAL_POINT_MASK 0x1f +#define REG_INITIAL_POINT_MASK_SFT (0x1f << 5) +#define REG_LRCK_AUTO_OFF_SFT 10 +#define REG_LRCK_AUTO_OFF_MASK 0x1 +#define REG_LRCK_AUTO_OFF_MASK_SFT (0x1 << 10) +#define REG_BCK_AUTO_OFF_SFT 11 +#define REG_BCK_AUTO_OFF_MASK 0x1 +#define REG_BCK_AUTO_OFF_MASK_SFT (0x1 << 11) +#define REG_INITIAL_LRCK_SFT 13 +#define REG_INITIAL_LRCK_MASK 0x1 +#define REG_INITIAL_LRCK_MASK_SFT (0x1 << 13) +#define REG_NO_ALIGN_1X_EN_SFT 14 +#define REG_NO_ALIGN_1X_EN_MASK 0x1 +#define REG_NO_ALIGN_1X_EN_MASK_SFT (0x1 << 14) +#define REG_LRCK_RESET_SFT 15 +#define REG_LRCK_RESET_MASK 0x1 +#define REG_LRCK_RESET_MASK_SFT (0x1 << 15) +#define PINMUX_MCLK_CTRL_OE_SFT 16 +#define PINMUX_MCLK_CTRL_OE_MASK 0x1 +#define PINMUX_MCLK_CTRL_OE_MASK_SFT (0x1 << 16) +#define REG_OUTPUT_CR_EN_SFT 18 +#define REG_OUTPUT_CR_EN_MASK 0x1 +#define REG_OUTPUT_CR_EN_MASK_SFT (0x1 << 18) +#define REG_LR_ALIGN_SFT 19 +#define REG_LR_ALIGN_MASK 0x1 +#define REG_LR_ALIGN_MASK_SFT (0x1 << 19) +#define REG_LRCK_WIDTH_SFT 20 +#define REG_LRCK_WIDTH_MASK 0x3ff +#define REG_LRCK_WIDTH_MASK_SFT (0x3ff << 20) +#define REG_DIRECT_INPUT_MASTER_BCK_SFT 30 +#define REG_DIRECT_INPUT_MASTER_BCK_MASK 0x1 +#define REG_DIRECT_INPUT_MASTER_BCK_MASK_SFT (0x1 << 30) +#define REG_LRCK_AUTO_MODE_SFT 31 +#define REG_LRCK_AUTO_MODE_MASK 0x1 +#define REG_LRCK_AUTO_MODE_MASK_SFT (0x1 << 31) + +/* ETDM_IN0_CON2 */ +/* ETDM_IN1_CON2 */ +/* ETDM_IN2_CON2 */ +/* ETDM_IN3_CON2 */ +/* ETDM_IN4_CON2 */ +/* ETDM_IN5_CON2 */ +/* ETDM_IN6_CON2 */ +#define REG_UPDATE_POINT_SFT 0 +#define REG_UPDATE_POINT_MASK 0x1f +#define REG_UPDATE_POINT_MASK_SFT (0x1f << 0) +#define REG_UPDATE_GAP_SFT 5 +#define REG_UPDATE_GAP_MASK 0x1f +#define REG_UPDATE_GAP_MASK_SFT (0x1f << 5) +#define REG_CLOCK_SOURCE_SEL_SFT 10 +#define REG_CLOCK_SOURCE_SEL_MASK 0x7 +#define REG_CLOCK_SOURCE_SEL_MASK_SFT (0x7 << 10) +#define REG_CK_EN_SEL_AUTO_SFT 14 +#define REG_CK_EN_SEL_AUTO_MASK 0x1 +#define REG_CK_EN_SEL_AUTO_MASK_SFT (0x1 << 14) +#define REG_MULTI_IP_TOTAL_CHNUM_SFT 15 +#define REG_MULTI_IP_TOTAL_CHNUM_MASK 0x1f +#define REG_MULTI_IP_TOTAL_CHNUM_MASK_SFT (0x1f << 15) +#define REG_MASK_AUTO_SFT 20 +#define REG_MASK_AUTO_MASK 0x1 +#define REG_MASK_AUTO_MASK_SFT (0x1 << 20) +#define REG_MASK_NUM_SFT 21 +#define REG_MASK_NUM_MASK 0x1f +#define REG_MASK_NUM_MASK_SFT (0x1f << 21) +#define REG_UPDATE_POINT_AUTO_SFT 26 +#define REG_UPDATE_POINT_AUTO_MASK 0x1 +#define REG_UPDATE_POINT_AUTO_MASK_SFT (0x1 << 26) +#define REG_SDATA_DELAY_0P5T_EN_SFT 27 +#define REG_SDATA_DELAY_0P5T_EN_MASK 0x1 +#define REG_SDATA_DELAY_0P5T_EN_MASK_SFT (0x1 << 27) +#define REG_SDATA_DELAY_BCK_INV_SFT 28 +#define REG_SDATA_DELAY_BCK_INV_MASK 0x1 +#define REG_SDATA_DELAY_BCK_INV_MASK_SFT (0x1 << 28) +#define REG_LRCK_DELAY_0P5T_EN_SFT 29 +#define REG_LRCK_DELAY_0P5T_EN_MASK 0x1 +#define REG_LRCK_DELAY_0P5T_EN_MASK_SFT (0x1 << 29) +#define REG_LRCK_DELAY_BCK_INV_SFT 30 +#define REG_LRCK_DELAY_BCK_INV_MASK 0x1 +#define REG_LRCK_DELAY_BCK_INV_MASK_SFT (0x1 << 30) +#define REG_MULTI_IP_MODE_SFT 31 +#define REG_MULTI_IP_MODE_MASK 0x1 +#define REG_MULTI_IP_MODE_MASK_SFT (0x1 << 31) + +/* ETDM_IN0_CON3 */ +/* ETDM_IN1_CON3 */ +/* ETDM_IN2_CON3 */ +/* ETDM_IN3_CON3 */ +/* ETDM_IN4_CON3 */ +/* ETDM_IN5_CON3 */ +/* ETDM_IN6_CON3 */ +#define REG_DISABLE_OUT_SFT 0 +#define REG_DISABLE_OUT_MASK 0xffff +#define REG_DISABLE_OUT_MASK_SFT (0xffff << 0) +#define REG_RJ_DATA_RIGHT_ALIGN_SFT 16 +#define REG_RJ_DATA_RIGHT_ALIGN_MASK 0x1 +#define REG_RJ_DATA_RIGHT_ALIGN_MASK_SFT (0x1 << 16) +#define REG_MONITOR_SEL_SFT 17 +#define REG_MONITOR_SEL_MASK 0x3 +#define REG_MONITOR_SEL_MASK_SFT (0x3 << 17) +#define REG_CNT_UPPER_LIMIT_SFT 19 +#define REG_CNT_UPPER_LIMIT_MASK 0x3f +#define REG_CNT_UPPER_LIMIT_MASK_SFT (0x3f << 19) +#define REG_COMPACT_SAMPLE_END_DIS_SFT 25 +#define REG_COMPACT_SAMPLE_END_DIS_MASK 0x1 +#define REG_COMPACT_SAMPLE_END_DIS_MASK_SFT (0x1 << 25) +#define REG_FS_TIMING_SEL_SFT 26 +#define REG_FS_TIMING_SEL_MASK 0x1f +#define REG_FS_TIMING_SEL_MASK_SFT (0x1f << 26) +#define REG_SAMPLE_END_MODE_SFT 31 +#define REG_SAMPLE_END_MODE_MASK 0x1 +#define REG_SAMPLE_END_MODE_MASK_SFT (0x1 << 31) + +/* ETDM_IN0_CON4 */ +/* ETDM_IN1_CON4 */ +/* ETDM_IN2_CON4 */ +/* ETDM_IN3_CON4 */ +/* ETDM_IN4_CON4 */ +/* ETDM_IN5_CON4 */ +/* ETDM_IN6_CON4 */ +#define REG_ALWAYS_OPEN_1X_EN_SFT 31 +#define REG_ALWAYS_OPEN_1X_EN_MASK 0x1 +#define REG_ALWAYS_OPEN_1X_EN_MASK_SFT (0x1 << 31) +#define REG_WAIT_LAST_SAMPLE_SFT 30 +#define REG_WAIT_LAST_SAMPLE_MASK 0x1 +#define REG_WAIT_LAST_SAMPLE_MASK_SFT (0x1 << 30) +#define REG_SAMPLE_END_POINT_SFT 25 +#define REG_SAMPLE_END_POINT_MASK 0x1f +#define REG_SAMPLE_END_POINT_MASK_SFT (0x1f << 25) +#define REG_RELATCH_1X_EN_SEL_SFT 20 +#define REG_RELATCH_1X_EN_SEL_MASK 0x1f +#define REG_RELATCH_1X_EN_SEL_MASK_SFT (0x1f << 20) +#define REG_MASTER_WS_INV_SFT 19 +#define REG_MASTER_WS_INV_MASK 0x1 +#define REG_MASTER_WS_INV_MASK_SFT (0x1 << 19) +#define REG_MASTER_BCK_INV_SFT 18 +#define REG_MASTER_BCK_INV_MASK 0x1 +#define REG_MASTER_BCK_INV_MASK_SFT (0x1 << 18) +#define REG_SLAVE_LRCK_INV_SFT 17 +#define REG_SLAVE_LRCK_INV_MASK 0x1 +#define REG_SLAVE_LRCK_INV_MASK_SFT (0x1 << 17) +#define REG_SLAVE_BCK_INV_SFT 16 +#define REG_SLAVE_BCK_INV_MASK 0x1 +#define REG_SLAVE_BCK_INV_MASK_SFT (0x1 << 16) +#define REG_REPACK_CHNUM_SFT 12 +#define REG_REPACK_CHNUM_MASK 0xf +#define REG_REPACK_CHNUM_MASK_SFT (0xf << 12) +#define REG_ASYNC_RESET_SFT 11 +#define REG_ASYNC_RESET_MASK 0x1 +#define REG_ASYNC_RESET_MASK_SFT (0x1 << 11) +#define REG_REPACK_WORD_LENGTH_SFT 9 +#define REG_REPACK_WORD_LENGTH_MASK 0x3 +#define REG_REPACK_WORD_LENGTH_MASK_SFT (0x3 << 9) +#define REG_REPACK_AUTO_MODE_SFT 8 +#define REG_REPACK_AUTO_MODE_MASK 0x1 +#define REG_REPACK_AUTO_MODE_MASK_SFT (0x1 << 8) +#define REG_REPACK_MODE_SFT 0 +#define REG_REPACK_MODE_MASK 0x3f +#define REG_REPACK_MODE_MASK_SFT (0x3f << 0) + +/* ETDM_IN0_CON5 */ +/* ETDM_IN1_CON5 */ +/* ETDM_IN2_CON5 */ +/* ETDM_IN3_CON5 */ +/* ETDM_IN4_CON5 */ +/* ETDM_IN5_CON5 */ +/* ETDM_IN6_CON5 */ +#define REG_LR_SWAP_SFT 16 +#define REG_LR_SWAP_MASK 0xffff +#define REG_LR_SWAP_MASK_SFT (0xffff << 16) +#define REG_ODD_FLAG_EN_SFT 0 +#define REG_ODD_FLAG_EN_MASK 0xffff +#define REG_ODD_FLAG_EN_MASK_SFT (0xffff << 0) + +/* ETDM_IN0_CON6 */ +/* ETDM_IN1_CON6 */ +/* ETDM_IN2_CON6 */ +/* ETDM_IN3_CON6 */ +/* ETDM_IN4_CON6 */ +/* ETDM_IN5_CON6 */ +/* ETDM_IN6_CON6 */ +#define LCH_DATA_REG_SFT 0 +#define LCH_DATA_REG_MASK 0xffffffff +#define LCH_DATA_REG_MASK_SFT (0xffffffff << 0) + +/* ETDM_IN0_CON7 */ +/* ETDM_IN1_CON7 */ +/* ETDM_IN2_CON7 */ +/* ETDM_IN3_CON7 */ +/* ETDM_IN4_CON7 */ +/* ETDM_IN5_CON7 */ +/* ETDM_IN6_CON7 */ +#define RCH_DATA_REG_SFT 0 +#define RCH_DATA_REG_MASK 0xffffffff +#define RCH_DATA_REG_MASK_SFT (0xffffffff << 0) + +/* ETDM_IN0_CON8 */ +/* ETDM_IN1_CON8 */ +/* ETDM_IN2_CON8 */ +/* ETDM_IN3_CON8 */ +/* ETDM_IN4_CON8 */ +/* ETDM_IN5_CON8 */ +/* ETDM_IN6_CON8 */ +#define REG_AFIFO_THRESHOLD_SFT 29 +#define REG_AFIFO_THRESHOLD_MASK 0x3 +#define REG_AFIFO_THRESHOLD_MASK_SFT (0x3 << 29) +#define REG_CK_EN_SEL_MANUAL_SFT 16 +#define REG_CK_EN_SEL_MANUAL_MASK 0x3ff +#define REG_CK_EN_SEL_MANUAL_MASK_SFT (0x3ff << 16) +#define REG_AFIFO_SW_RESET_SFT 15 +#define REG_AFIFO_SW_RESET_MASK 0x1 +#define REG_AFIFO_SW_RESET_MASK_SFT (0x1 << 15) +#define REG_AFIFO_RESET_SEL_SFT 14 +#define REG_AFIFO_RESET_SEL_MASK 0x1 +#define REG_AFIFO_RESET_SEL_MASK_SFT (0x1 << 14) +#define REG_AFIFO_AUTO_RESET_DIS_SFT 9 +#define REG_AFIFO_AUTO_RESET_DIS_MASK 0x1 +#define REG_AFIFO_AUTO_RESET_DIS_MASK_SFT (0x1 << 9) +#define REG_ETDM_USE_AFIFO_SFT 8 +#define REG_ETDM_USE_AFIFO_MASK 0x1 +#define REG_ETDM_USE_AFIFO_MASK_SFT (0x1 << 8) +#define REG_AFIFO_CLOCK_DOMAIN_SEL_SFT 5 +#define REG_AFIFO_CLOCK_DOMAIN_SEL_MASK 0x7 +#define REG_AFIFO_CLOCK_DOMAIN_SEL_MASK_SFT (0x7 << 5) +#define REG_AFIFO_MODE_SFT 0 +#define REG_AFIFO_MODE_MASK 0x1f +#define REG_AFIFO_MODE_MASK_SFT (0x1f << 0) + +/* ETDM_IN0_CON9 */ +/* ETDM_IN1_CON9 */ +/* ETDM_IN2_CON9 */ +/* ETDM_IN3_CON9 */ +/* ETDM_IN4_CON9 */ +/* ETDM_IN5_CON9 */ +/* ETDM_IN6_CON9 */ +#define REG_OUT2LATCH_TIME_SFT 10 +#define REG_OUT2LATCH_TIME_MASK 0x1f +#define REG_OUT2LATCH_TIME_MASK_SFT (0x1f << 10) +#define REG_ALMOST_END_BIT_COUNT_SFT 5 +#define REG_ALMOST_END_BIT_COUNT_MASK 0x1f +#define REG_ALMOST_END_BIT_COUNT_MASK_SFT (0x1f << 5) +#define REG_ALMOST_END_CH_COUNT_SFT 0 +#define REG_ALMOST_END_CH_COUNT_MASK 0x1f +#define REG_ALMOST_END_CH_COUNT_MASK_SFT (0x1f << 0) + +/* ETDM_IN0_MON */ +/* ETDM_IN1_MON */ +/* ETDM_IN2_MON */ +/* ETDM_IN3_MON */ +/* ETDM_IN4_MON */ +/* ETDM_IN5_MON */ +/* ETDM_IN6_MON */ +#define LRCK_INV_SFT 30 +#define LRCK_INV_MASK 0x1 +#define LRCK_INV_MASK_SFT (0x1 << 30) +#define EN_SYNC_OUT_SFT 29 +#define EN_SYNC_OUT_MASK 0x1 +#define EN_SYNC_OUT_MASK_SFT (0x1 << 29) +#define HOPPING_EN_SYNC_OUT_PRE_SFT 28 +#define HOPPING_EN_SYNC_OUT_PRE_MASK 0x1 +#define HOPPING_EN_SYNC_OUT_PRE_MASK_SFT (0x1 << 28) +#define WFULL_SFT 27 +#define WFULL_MASK 0x1 +#define WFULL_MASK_SFT (0x1 << 27) +#define REMPTY_SFT 26 +#define REMPTY_MASK 0x1 +#define REMPTY_MASK_SFT (0x1 << 26) +#define ETDM_2X_CK_EN_SFT 25 +#define ETDM_2X_CK_EN_MASK 0x1 +#define ETDM_2X_CK_EN_MASK_SFT (0x1 << 25) +#define ETDM_1X_CK_EN_SFT 24 +#define ETDM_1X_CK_EN_MASK 0x1 +#define ETDM_1X_CK_EN_MASK_SFT (0x1 << 24) +#define SDATA0_SFT 23 +#define SDATA0_MASK 0x1 +#define SDATA0_MASK_SFT (0x1 << 23) +#define CURRENT_STATUS_SFT 21 +#define CURRENT_STATUS_MASK 0x3 +#define CURRENT_STATUS_MASK_SFT (0x3 << 21) +#define BIT_POINT_SFT 16 +#define BIT_POINT_MASK 0x1f +#define BIT_POINT_MASK_SFT (0x1f << 16) +#define BIT_CH_COUNT_SFT 10 +#define BIT_CH_COUNT_MASK 0x3f +#define BIT_CH_COUNT_MASK_SFT (0x3f << 10) +#define BIT_COUNT_SFT 5 +#define BIT_COUNT_MASK 0x1f +#define BIT_COUNT_MASK_SFT (0x1f << 5) +#define CH_COUNT_SFT 0 +#define CH_COUNT_MASK 0x1f +#define CH_COUNT_MASK_SFT (0x1f << 0) + +/* ETDM_OUT0_CON0 */ +/* ETDM_OUT1_CON0 */ +/* ETDM_OUT2_CON0 */ +/* ETDM_OUT3_CON0 */ +/* ETDM_OUT4_CON0 */ +/* ETDM_OUT5_CON0 */ +/* ETDM_OUT6_CON0 */ +#define OUT_REG_ETDM_OUT_EN_SFT 0 +#define OUT_REG_ETDM_OUT_EN_MASK 0x1 +#define OUT_REG_ETDM_OUT_EN_MASK_SFT (0x1 << 0) +#define OUT_REG_SYNC_MODE_SFT 1 +#define OUT_REG_SYNC_MODE_MASK 0x1 +#define OUT_REG_SYNC_MODE_MASK_SFT (0x1 << 1) +#define OUT_REG_LSB_FIRST_SFT 3 +#define OUT_REG_LSB_FIRST_MASK 0x1 +#define OUT_REG_LSB_FIRST_MASK_SFT (0x1 << 3) +#define OUT_REG_SOFT_RST_SFT 4 +#define OUT_REG_SOFT_RST_MASK 0x1 +#define OUT_REG_SOFT_RST_MASK_SFT (0x1 << 4) +#define OUT_REG_SLAVE_MODE_SFT 5 +#define OUT_REG_SLAVE_MODE_MASK 0x1 +#define OUT_REG_SLAVE_MODE_MASK_SFT (0x1 << 5) +#define OUT_REG_FMT_SFT 6 +#define OUT_REG_FMT_MASK 0x7 +#define OUT_REG_FMT_MASK_SFT (0x7 << 6) +#define OUT_REG_LRCK_EDGE_SEL_SFT 10 +#define OUT_REG_LRCK_EDGE_SEL_MASK 0x1 +#define OUT_REG_LRCK_EDGE_SEL_MASK_SFT (0x1 << 10) +#define OUT_REG_BIT_LENGTH_SFT 11 +#define OUT_REG_BIT_LENGTH_MASK 0x1f +#define OUT_REG_BIT_LENGTH_MASK_SFT (0x1f << 11) +#define OUT_REG_WORD_LENGTH_SFT 16 +#define OUT_REG_WORD_LENGTH_MASK 0x1f +#define OUT_REG_WORD_LENGTH_MASK_SFT (0x1f << 16) +#define OUT_REG_CH_NUM_SFT 23 +#define OUT_REG_CH_NUM_MASK 0x1f +#define OUT_REG_CH_NUM_MASK_SFT (0x1f << 23) +#define OUT_REG_RELATCH_DOMAIN_SEL_SFT 28 +#define OUT_REG_RELATCH_DOMAIN_SEL_MASK 0x7 +#define OUT_REG_RELATCH_DOMAIN_SEL_MASK_SFT (0x7 << 28) +#define OUT_REG_VALID_TOGETHER_SFT 31 +#define OUT_REG_VALID_TOGETHER_MASK 0x1 +#define OUT_REG_VALID_TOGETHER_MASK_SFT (0x1 << 31) + +/* ETDM_OUT0_CON1 */ +/* ETDM_OUT1_CON1 */ +/* ETDM_OUT2_CON1 */ +/* ETDM_OUT3_CON1 */ +/* ETDM_OUT4_CON1 */ +/* ETDM_OUT5_CON1 */ +/* ETDM_OUT6_CON1 */ +#define OUT_REG_INITIAL_COUNT_SFT 0 +#define OUT_REG_INITIAL_COUNT_MASK 0x1f +#define OUT_REG_INITIAL_COUNT_MASK_SFT (0x1f << 0) +#define OUT_REG_INITIAL_POINT_SFT 5 +#define OUT_REG_INITIAL_POINT_MASK 0x1f +#define OUT_REG_INITIAL_POINT_MASK_SFT (0x1f << 5) +#define OUT_REG_LRCK_AUTO_OFF_SFT 10 +#define OUT_REG_LRCK_AUTO_OFF_MASK 0x1 +#define OUT_REG_LRCK_AUTO_OFF_MASK_SFT (0x1 << 10) +#define OUT_REG_BCK_AUTO_OFF_SFT 11 +#define OUT_REG_BCK_AUTO_OFF_MASK 0x1 +#define OUT_REG_BCK_AUTO_OFF_MASK_SFT (0x1 << 11) +#define OUT_REG_INITIAL_LRCK_SFT 13 +#define OUT_REG_INITIAL_LRCK_MASK 0x1 +#define OUT_REG_INITIAL_LRCK_MASK_SFT (0x1 << 13) +#define OUT_REG_NO_ALIGN_1X_EN_SFT 14 +#define OUT_REG_NO_ALIGN_1X_EN_MASK 0x1 +#define OUT_REG_NO_ALIGN_1X_EN_MASK_SFT (0x1 << 14) +#define OUT_REG_LRCK_RESET_SFT 15 +#define OUT_REG_LRCK_RESET_MASK 0x1 +#define OUT_REG_LRCK_RESET_MASK_SFT (0x1 << 15) +#define OUT_PINMUX_MCLK_CTRL_OE_SFT 16 +#define OUT_PINMUX_MCLK_CTRL_OE_MASK 0x1 +#define OUT_PINMUX_MCLK_CTRL_OE_MASK_SFT (0x1 << 16) +#define OUT_REG_OUTPUT_CR_EN_SFT 18 +#define OUT_REG_OUTPUT_CR_EN_MASK 0x1 +#define OUT_REG_OUTPUT_CR_EN_MASK_SFT (0x1 << 18) +#define OUT_REG_LRCK_WIDTH_SFT 19 +#define OUT_REG_LRCK_WIDTH_MASK 0x3ff +#define OUT_REG_LRCK_WIDTH_MASK_SFT (0x3ff << 19) +#define OUT_REG_LRCK_AUTO_MODE_SFT 29 +#define OUT_REG_LRCK_AUTO_MODE_MASK 0x1 +#define OUT_REG_LRCK_AUTO_MODE_MASK_SFT (0x1 << 29) +#define OUT_REG_DIRECT_INPUT_MASTER_BCK_SFT 30 +#define OUT_REG_DIRECT_INPUT_MASTER_BCK_MASK 0x1 +#define OUT_REG_DIRECT_INPUT_MASTER_BCK_MASK_SFT (0x1 << 30) +#define OUT_REG_16B_COMPACT_MODE_SFT 31 +#define OUT_REG_16B_COMPACT_MODE_MASK 0x1 +#define OUT_REG_16B_COMPACT_MODE_MASK_SFT (0x1 << 31) + +/* ETDM_OUT0_CON2 */ +/* ETDM_OUT1_CON2 */ +/* ETDM_OUT2_CON2 */ +/* ETDM_OUT3_CON2 */ +/* ETDM_OUT4_CON2 */ +/* ETDM_OUT5_CON2 */ +/* ETDM_OUT6_CON2 */ +#define OUT_REG_IN2LATCH_TIME_SFT 0 +#define OUT_REG_IN2LATCH_TIME_MASK 0x1f +#define OUT_REG_IN2LATCH_TIME_MASK_SFT (0x1f << 0) +#define OUT_REG_MASK_NUM_SFT 5 +#define OUT_REG_MASK_NUM_MASK 0x1f +#define OUT_REG_MASK_NUM_MASK_SFT (0x1f << 5) +#define OUT_REG_MASK_AUTO_SFT 10 +#define OUT_REG_MASK_AUTO_MASK 0x1 +#define OUT_REG_MASK_AUTO_MASK_SFT (0x1 << 10) +#define OUT_REG_SDATA_SHIFT_SFT 11 +#define OUT_REG_SDATA_SHIFT_MASK 0x3 +#define OUT_REG_SDATA_SHIFT_MASK_SFT (0x3 << 11) +#define OUT_REG_ALMOST_END_BIT_COUNT_SFT 13 +#define OUT_REG_ALMOST_END_BIT_COUNT_MASK 0x1f +#define OUT_REG_ALMOST_END_BIT_COUNT_MASK_SFT (0x1f << 13) +#define OUT_REG_SDATA_CON_SFT 18 +#define OUT_REG_SDATA_CON_MASK 0x3 +#define OUT_REG_SDATA_CON_MASK_SFT (0x3 << 18) +#define OUT_REG_REDUNDANT_0_SFT 20 +#define OUT_REG_REDUNDANT_0_MASK 0x1 +#define OUT_REG_REDUNDANT_0_MASK_SFT (0x1 << 20) +#define OUT_REG_SDATA_AUTO_OFF_SFT 21 +#define OUT_REG_SDATA_AUTO_OFF_MASK 0x1 +#define OUT_REG_SDATA_AUTO_OFF_MASK_SFT (0x1 << 21) +#define OUT_REG_BCK_OFF_TIME_SFT 22 +#define OUT_REG_BCK_OFF_TIME_MASK 0x3 +#define OUT_REG_BCK_OFF_TIME_MASK_SFT (0x3 << 22) +#define OUT_REG_MONITOR_SEL_SFT 24 +#define OUT_REG_MONITOR_SEL_MASK 0x3 +#define OUT_REG_MONITOR_SEL_MASK_SFT (0x3 << 24) +#define OUT_REG_SHIFT_AUTO_SFT 26 +#define OUT_REG_SHIFT_AUTO_MASK 0x1 +#define OUT_REG_SHIFT_AUTO_MASK_SFT (0x1 << 26) +#define OUT_REG_SDATA_DELAY_0P5T_EN_SFT 27 +#define OUT_REG_SDATA_DELAY_0P5T_EN_MASK 0x1 +#define OUT_REG_SDATA_DELAY_0P5T_EN_MASK_SFT (0x1 << 27) +#define OUT_REG_SDATA_DELAY_BCK_INV_SFT 28 +#define OUT_REG_SDATA_DELAY_BCK_INV_MASK 0x1 +#define OUT_REG_SDATA_DELAY_BCK_INV_MASK_SFT (0x1 << 28) +#define OUT_REG_LRCK_DELAY_0P5T_EN_SFT 29 +#define OUT_REG_LRCK_DELAY_0P5T_EN_MASK 0x1 +#define OUT_REG_LRCK_DELAY_0P5T_EN_MASK_SFT (0x1 << 29) +#define OUT_REG_LRCK_DELAY_BCK_INV_SFT 30 +#define OUT_REG_LRCK_DELAY_BCK_INV_MASK 0x1 +#define OUT_REG_LRCK_DELAY_BCK_INV_MASK_SFT (0x1 << 30) +#define OUT_REG_OFF_CR_EN_SFT 31 +#define OUT_REG_OFF_CR_EN_MASK 0x1 +#define OUT_REG_OFF_CR_EN_MASK_SFT (0x1 << 31) + +/* ETDM_OUT0_CON3 */ +/* ETDM_OUT1_CON3 */ +/* ETDM_OUT2_CON3 */ +/* ETDM_OUT3_CON3 */ +/* ETDM_OUT4_CON3 */ +/* ETDM_OUT5_CON3 */ +/* ETDM_OUT6_CON3 */ +#define OUT_REG_START_CH_PAIR0_SFT 0 +#define OUT_REG_START_CH_PAIR0_MASK 0xf +#define OUT_REG_START_CH_PAIR0_MASK_SFT (0xf << 0) +#define OUT_REG_START_CH_PAIR1_SFT 4 +#define OUT_REG_START_CH_PAIR1_MASK 0xf +#define OUT_REG_START_CH_PAIR1_MASK_SFT (0xf << 4) +#define OUT_REG_START_CH_PAIR2_SFT 8 +#define OUT_REG_START_CH_PAIR2_MASK 0xf +#define OUT_REG_START_CH_PAIR2_MASK_SFT (0xf << 8) +#define OUT_REG_START_CH_PAIR3_SFT 12 +#define OUT_REG_START_CH_PAIR3_MASK 0xf +#define OUT_REG_START_CH_PAIR3_MASK_SFT (0xf << 12) +#define OUT_REG_START_CH_PAIR4_SFT 16 +#define OUT_REG_START_CH_PAIR4_MASK 0xf +#define OUT_REG_START_CH_PAIR4_MASK_SFT (0xf << 16) +#define OUT_REG_START_CH_PAIR5_SFT 20 +#define OUT_REG_START_CH_PAIR5_MASK 0xf +#define OUT_REG_START_CH_PAIR5_MASK_SFT (0xf << 20) +#define OUT_REG_START_CH_PAIR6_SFT 24 +#define OUT_REG_START_CH_PAIR6_MASK 0xf +#define OUT_REG_START_CH_PAIR6_MASK_SFT (0xf << 24) +#define OUT_REG_START_CH_PAIR7_SFT 28 +#define OUT_REG_START_CH_PAIR7_MASK 0xf +#define OUT_REG_START_CH_PAIR7_MASK_SFT (0xf << 28) + +/* ETDM_OUT0_CON4 */ +/* ETDM_OUT1_CON4 */ +/* ETDM_OUT2_CON4 */ +/* ETDM_OUT3_CON4 */ +/* ETDM_OUT4_CON4 */ +/* ETDM_OUT5_CON4 */ +/* ETDM_OUT6_CON4 */ +#define OUT_REG_FS_TIMING_SEL_SFT 0 +#define OUT_REG_FS_TIMING_SEL_MASK 0x1f +#define OUT_REG_FS_TIMING_SEL_MASK_SFT (0x1f << 0) +#define OUT_REG_CLOCK_SOURCE_SEL_SFT 6 +#define OUT_REG_CLOCK_SOURCE_SEL_MASK 0x7 +#define OUT_REG_CLOCK_SOURCE_SEL_MASK_SFT (0x7 << 6) +#define OUT_REG_CK_EN_SEL_AUTO_SFT 10 +#define OUT_REG_CK_EN_SEL_AUTO_MASK 0x1 +#define OUT_REG_CK_EN_SEL_AUTO_MASK_SFT (0x1 << 10) +#define OUT_REG_ASYNC_RESET_SFT 11 +#define OUT_REG_ASYNC_RESET_MASK 0x1 +#define OUT_REG_ASYNC_RESET_MASK_SFT (0x1 << 11) +#define OUT_REG_CK_EN_SEL_MANUAL_SFT 14 +#define OUT_REG_CK_EN_SEL_MANUAL_MASK 0x3ff +#define OUT_REG_CK_EN_SEL_MANUAL_MASK_SFT (0x3ff << 14) +#define OUT_REG_RELATCH_EN_SEL_SFT 24 +#define OUT_REG_RELATCH_EN_SEL_MASK 0x1f +#define OUT_REG_RELATCH_EN_SEL_MASK_SFT (0x1f << 24) +#define OUT_REG_WAIT_LAST_SAMPLE_SFT 30 +#define OUT_REG_WAIT_LAST_SAMPLE_MASK 0x1 +#define OUT_REG_WAIT_LAST_SAMPLE_MASK_SFT (0x1 << 30) +#define OUT_REG_ALWAYS_OPEN_1X_EN_SFT 31 +#define OUT_REG_ALWAYS_OPEN_1X_EN_MASK 0x1 +#define OUT_REG_ALWAYS_OPEN_1X_EN_MASK_SFT (0x1 << 31) + +/* ETDM_OUT0_CON5 */ +/* ETDM_OUT1_CON5 */ +/* ETDM_OUT2_CON5 */ +/* ETDM_OUT3_CON5 */ +/* ETDM_OUT4_CON5 */ +/* ETDM_OUT5_CON5 */ +/* ETDM_OUT6_CON5 */ +#define OUT_REG_REPACK_BITNUM_SFT 0 +#define OUT_REG_REPACK_BITNUM_MASK 0x3 +#define OUT_REG_REPACK_BITNUM_MASK_SFT (0x3 << 0) +#define OUT_REG_REPACK_CHNUM_SFT 2 +#define OUT_REG_REPACK_CHNUM_MASK 0xf +#define OUT_REG_REPACK_CHNUM_MASK_SFT (0xf << 2) +#define OUT_REG_SLAVE_BCK_INV_SFT 7 +#define OUT_REG_SLAVE_BCK_INV_MASK 0x1 +#define OUT_REG_SLAVE_BCK_INV_MASK_SFT (0x1 << 7) +#define OUT_REG_SLAVE_LRCK_INV_SFT 8 +#define OUT_REG_SLAVE_LRCK_INV_MASK 0x1 +#define OUT_REG_SLAVE_LRCK_INV_MASK_SFT (0x1 << 8) +#define OUT_REG_MASTER_BCK_INV_SFT 9 +#define OUT_REG_MASTER_BCK_INV_MASK 0x1 +#define OUT_REG_MASTER_BCK_INV_MASK_SFT (0x1 << 9) +#define OUT_REG_MASTER_WS_INV_SFT 10 +#define OUT_REG_MASTER_WS_INV_MASK 0x1 +#define OUT_REG_MASTER_WS_INV_MASK_SFT (0x1 << 10) +#define OUT_REG_REPACK_24B_MSB_ALIGN_SFT 11 +#define OUT_REG_REPACK_24B_MSB_ALIGN_MASK 0x1 +#define OUT_REG_REPACK_24B_MSB_ALIGN_MASK_SFT (0x1 << 11) +#define OUT_REG_LR_SWAP_SFT 16 +#define OUT_REG_LR_SWAP_MASK 0xffff +#define OUT_REG_LR_SWAP_MASK_SFT (0xffff << 16) + +/* ETDM_OUT0_CON6 */ +/* ETDM_OUT1_CON6 */ +/* ETDM_OUT2_CON6 */ +/* ETDM_OUT3_CON6 */ +/* ETDM_OUT4_CON6 */ +/* ETDM_OUT5_CON6 */ +/* ETDM_OUT6_CON6 */ +#define OUT_LCH_DATA_REG_SFT 0 +#define OUT_LCH_DATA_REG_MASK 0xffffffff +#define OUT_LCH_DATA_REG_MASK_SFT (0xffffffff << 0) + +/* ETDM_OUT0_CON7 */ +/* ETDM_OUT1_CON7 */ +/* ETDM_OUT2_CON7 */ +/* ETDM_OUT3_CON7 */ +/* ETDM_OUT4_CON7 */ +/* ETDM_OUT5_CON7 */ +/* ETDM_OUT6_CON7 */ +#define OUT_RCH_DATA_REG_SFT 0 +#define OUT_RCH_DATA_REG_MASK 0xffffffff +#define OUT_RCH_DATA_REG_MASK_SFT (0xffffffff << 0) + +/* ETDM_OUT0_CON8 */ +/* ETDM_OUT1_CON8 */ +/* ETDM_OUT2_CON8 */ +/* ETDM_OUT3_CON8 */ +/* ETDM_OUT4_CON8 */ +/* ETDM_OUT5_CON8 */ +/* ETDM_OUT6_CON8 */ +#define OUT_REG_START_CH_PAIR8_SFT 0 +#define OUT_REG_START_CH_PAIR8_MASK 0xf +#define OUT_REG_START_CH_PAIR8_MASK_SFT (0xf << 0) +#define OUT_REG_START_CH_PAIR9_SFT 4 +#define OUT_REG_START_CH_PAIR9_MASK 0xf +#define OUT_REG_START_CH_PAIR9_MASK_SFT (0xf << 4) +#define OUT_REG_START_CH_PAIR10_SFT 8 +#define OUT_REG_START_CH_PAIR10_MASK 0xf +#define OUT_REG_START_CH_PAIR10_MASK_SFT (0xf << 8) +#define OUT_REG_START_CH_PAIR11_SFT 12 +#define OUT_REG_START_CH_PAIR11_MASK 0xf +#define OUT_REG_START_CH_PAIR11_MASK_SFT (0xf << 12) +#define OUT_REG_START_CH_PAIR12_SFT 16 +#define OUT_REG_START_CH_PAIR12_MASK 0xf +#define OUT_REG_START_CH_PAIR12_MASK_SFT (0xf << 16) +#define OUT_REG_START_CH_PAIR13_SFT 20 +#define OUT_REG_START_CH_PAIR13_MASK 0xf +#define OUT_REG_START_CH_PAIR13_MASK_SFT (0xf << 20) +#define OUT_REG_START_CH_PAIR14_SFT 24 +#define OUT_REG_START_CH_PAIR14_MASK 0xf +#define OUT_REG_START_CH_PAIR14_MASK_SFT (0xf << 24) +#define OUT_REG_START_CH_PAIR15_SFT 28 +#define OUT_REG_START_CH_PAIR15_MASK 0xf +#define OUT_REG_START_CH_PAIR15_MASK_SFT (0xf << 28) + +/* ETDM_OUT0_CON9 */ +/* ETDM_OUT1_CON9 */ +/* ETDM_OUT2_CON9 */ +/* ETDM_OUT3_CON9 */ +/* ETDM_OUT4_CON9 */ +/* ETDM_OUT5_CON9 */ +/* ETDM_OUT6_CON9 */ +#define OUT_REG_AFIFO_THRESHOLD_SFT 29 +#define OUT_REG_AFIFO_THRESHOLD_MASK 0x3 +#define OUT_REG_AFIFO_THRESHOLD_MASK_SFT (0x3 << 29) +#define OUT_REG_AFIFO_SW_RESET_SFT 15 +#define OUT_REG_AFIFO_SW_RESET_MASK 0x1 +#define OUT_REG_AFIFO_SW_RESET_MASK_SFT (0x1 << 15) +#define OUT_REG_AFIFO_RESET_SEL_SFT 14 +#define OUT_REG_AFIFO_RESET_SEL_MASK 0x1 +#define OUT_REG_AFIFO_RESET_SEL_MASK_SFT (0x1 << 14) +#define OUT_REG_AFIFO_AUTO_RESET_DIS_SFT 9 +#define OUT_REG_AFIFO_AUTO_RESET_DIS_MASK 0x1 +#define OUT_REG_AFIFO_AUTO_RESET_DIS_MASK_SFT (0x1 << 9) +#define OUT_REG_ETDM_USE_AFIFO_SFT 8 +#define OUT_REG_ETDM_USE_AFIFO_MASK 0x1 +#define OUT_REG_ETDM_USE_AFIFO_MASK_SFT (0x1 << 8) +#define OUT_REG_AFIFO_CLOCK_DOMAIN_SEL_SFT 5 +#define OUT_REG_AFIFO_CLOCK_DOMAIN_SEL_MASK 0x7 +#define OUT_REG_AFIFO_CLOCK_DOMAIN_SEL_MASK_SFT (0x7 << 5) +#define OUT_REG_AFIFO_MODE_SFT 0 +#define OUT_REG_AFIFO_MODE_MASK 0x1f +#define OUT_REG_AFIFO_MODE_MASK_SFT (0x1f << 0) + +/* ETDM_OUT0_MON */ +/* ETDM_OUT1_MON */ +/* ETDM_OUT2_MON */ +/* ETDM_OUT3_MON */ +/* ETDM_OUT4_MON */ +/* ETDM_OUT5_MON */ +/* ETDM_OUT6_MON */ +#define LRCK_INV_SFT 30 +#define LRCK_INV_MASK 0x1 +#define LRCK_INV_MASK_SFT (0x1 << 30) +#define EN_SYNC_OUT_SFT 29 +#define EN_SYNC_OUT_MASK 0x1 +#define EN_SYNC_OUT_MASK_SFT (0x1 << 29) +#define HOPPING_EN_SYNC_OUT_PRE_SFT 28 +#define HOPPING_EN_SYNC_OUT_PRE_MASK 0x1 +#define HOPPING_EN_SYNC_OUT_PRE_MASK_SFT (0x1 << 28) +#define ETDM_2X_CK_EN_SFT 25 +#define ETDM_2X_CK_EN_MASK 0x1 +#define ETDM_2X_CK_EN_MASK_SFT (0x1 << 25) +#define ETDM_1X_CK_EN_SFT 24 +#define ETDM_1X_CK_EN_MASK 0x1 +#define ETDM_1X_CK_EN_MASK_SFT (0x1 << 24) +#define SDATA0_SFT 23 +#define SDATA0_MASK 0x1 +#define SDATA0_MASK_SFT (0x1 << 23) +#define CURRENT_STATUS_SFT 21 +#define CURRENT_STATUS_MASK 0x3 +#define CURRENT_STATUS_MASK_SFT (0x3 << 21) +#define BIT_POINT_SFT 16 +#define BIT_POINT_MASK 0x1f +#define BIT_POINT_MASK_SFT (0x1f << 16) +#define BIT_CH_COUNT_SFT 10 +#define BIT_CH_COUNT_MASK 0x3f +#define BIT_CH_COUNT_MASK_SFT (0x3f << 10) +#define BIT_COUNT_SFT 5 +#define BIT_COUNT_MASK 0x1f +#define BIT_COUNT_MASK_SFT (0x1f << 5) +#define CH_COUNT_SFT 0 +#define CH_COUNT_MASK 0x1f +#define CH_COUNT_MASK_SFT (0x1f << 0) + +/* ETDM_0_3_COWORK_CON0 */ +#define ETDM_OUT0_DATA_SEL_SFT 0 +#define ETDM_OUT0_DATA_SEL_MASK 0xf +#define ETDM_OUT0_DATA_SEL_MASK_SFT (0xf << 0) +#define ETDM_OUT0_SYNC_SEL_SFT 4 +#define ETDM_OUT0_SYNC_SEL_MASK 0xf +#define ETDM_OUT0_SYNC_SEL_MASK_SFT (0xf << 4) +#define ETDM_OUT0_SLAVE_SEL_SFT 8 +#define ETDM_OUT0_SLAVE_SEL_MASK 0xf +#define ETDM_OUT0_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_OUT1_DATA_SEL_SFT 12 +#define ETDM_OUT1_DATA_SEL_MASK 0xf +#define ETDM_OUT1_DATA_SEL_MASK_SFT (0xf << 12) +#define ETDM_OUT1_SYNC_SEL_SFT 16 +#define ETDM_OUT1_SYNC_SEL_MASK 0xf +#define ETDM_OUT1_SYNC_SEL_MASK_SFT (0xf << 16) +#define ETDM_OUT1_SLAVE_SEL_SFT 20 +#define ETDM_OUT1_SLAVE_SEL_MASK 0xf +#define ETDM_OUT1_SLAVE_SEL_MASK_SFT (0xf << 20) +#define ETDM_IN0_SLAVE_SEL_SFT 24 +#define ETDM_IN0_SLAVE_SEL_MASK 0xf +#define ETDM_IN0_SLAVE_SEL_MASK_SFT (0xf << 24) +#define ETDM_IN0_SYNC_SEL_SFT 28 +#define ETDM_IN0_SYNC_SEL_MASK 0xf +#define ETDM_IN0_SYNC_SEL_MASK_SFT (0xf << 28) + +/* ETDM_0_3_COWORK_CON1 */ +#define ETDM_IN0_SDATA0_SEL_SFT 0 +#define ETDM_IN0_SDATA0_SEL_MASK 0xf +#define ETDM_IN0_SDATA0_SEL_MASK_SFT (0xf << 0) +#define ETDM_IN0_SDATA1_15_SEL_SFT 4 +#define ETDM_IN0_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN0_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_IN1_SLAVE_SEL_SFT 8 +#define ETDM_IN1_SLAVE_SEL_MASK 0xf +#define ETDM_IN1_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_IN1_SYNC_SEL_SFT 12 +#define ETDM_IN1_SYNC_SEL_MASK 0xf +#define ETDM_IN1_SYNC_SEL_MASK_SFT (0xf << 12) +#define ETDM_IN1_SDATA0_SEL_SFT 16 +#define ETDM_IN1_SDATA0_SEL_MASK 0xf +#define ETDM_IN1_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_IN1_SDATA1_15_SEL_SFT 20 +#define ETDM_IN1_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN1_SDATA1_15_SEL_MASK_SFT (0xf << 20) + +/* ETDM_0_3_COWORK_CON2 */ +#define ETDM_OUT2_DATA_SEL_SFT 0 +#define ETDM_OUT2_DATA_SEL_MASK 0xf +#define ETDM_OUT2_DATA_SEL_MASK_SFT (0xf << 0) +#define ETDM_OUT2_SYNC_SEL_SFT 4 +#define ETDM_OUT2_SYNC_SEL_MASK 0xf +#define ETDM_OUT2_SYNC_SEL_MASK_SFT (0xf << 4) +#define ETDM_OUT2_SLAVE_SEL_SFT 8 +#define ETDM_OUT2_SLAVE_SEL_MASK 0xf +#define ETDM_OUT2_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_OUT3_DATA_SEL_SFT 12 +#define ETDM_OUT3_DATA_SEL_MASK 0xf +#define ETDM_OUT3_DATA_SEL_MASK_SFT (0xf << 12) +#define ETDM_OUT3_SYNC_SEL_SFT 16 +#define ETDM_OUT3_SYNC_SEL_MASK 0xf +#define ETDM_OUT3_SYNC_SEL_MASK_SFT (0xf << 16) +#define ETDM_OUT3_SLAVE_SEL_SFT 20 +#define ETDM_OUT3_SLAVE_SEL_MASK 0xf +#define ETDM_OUT3_SLAVE_SEL_MASK_SFT (0xf << 20) +#define ETDM_IN2_SLAVE_SEL_SFT 24 +#define ETDM_IN2_SLAVE_SEL_MASK 0xf +#define ETDM_IN2_SLAVE_SEL_MASK_SFT (0xf << 24) +#define ETDM_IN2_SYNC_SEL_SFT 28 +#define ETDM_IN2_SYNC_SEL_MASK 0xf +#define ETDM_IN2_SYNC_SEL_MASK_SFT (0xf << 28) + +/* ETDM_0_3_COWORK_CON3 */ +#define ETDM_IN2_SDATA0_SEL_SFT 0 +#define ETDM_IN2_SDATA0_SEL_MASK 0xf +#define ETDM_IN2_SDATA0_SEL_MASK_SFT (0xf << 0) +#define ETDM_IN2_SDATA1_15_SEL_SFT 4 +#define ETDM_IN2_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN2_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_IN3_SLAVE_SEL_SFT 8 +#define ETDM_IN3_SLAVE_SEL_MASK 0xf +#define ETDM_IN3_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_IN3_SYNC_SEL_SFT 12 +#define ETDM_IN3_SYNC_SEL_MASK 0xf +#define ETDM_IN3_SYNC_SEL_MASK_SFT (0xf << 12) +#define ETDM_IN3_SDATA0_SEL_SFT 16 +#define ETDM_IN3_SDATA0_SEL_MASK 0xf +#define ETDM_IN3_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_IN3_SDATA1_15_SEL_SFT 20 +#define ETDM_IN3_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN3_SDATA1_15_SEL_MASK_SFT (0xf << 20) + +/* ETDM_4_7_COWORK_CON0 */ +#define ETDM_OUT4_DATA_SEL_SFT 0 +#define ETDM_OUT4_DATA_SEL_MASK 0xf +#define ETDM_OUT4_DATA_SEL_MASK_SFT (0xf << 0) +#define ETDM_OUT4_SYNC_SEL_SFT 4 +#define ETDM_OUT4_SYNC_SEL_MASK 0xf +#define ETDM_OUT4_SYNC_SEL_MASK_SFT (0xf << 4) +#define ETDM_OUT4_SLAVE_SEL_SFT 8 +#define ETDM_OUT4_SLAVE_SEL_MASK 0xf +#define ETDM_OUT4_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_OUT5_DATA_SEL_SFT 12 +#define ETDM_OUT5_DATA_SEL_MASK 0xf +#define ETDM_OUT5_DATA_SEL_MASK_SFT (0xf << 12) +#define ETDM_OUT5_SYNC_SEL_SFT 16 +#define ETDM_OUT5_SYNC_SEL_MASK 0xf +#define ETDM_OUT5_SYNC_SEL_MASK_SFT (0xf << 16) +#define ETDM_OUT5_SLAVE_SEL_SFT 20 +#define ETDM_OUT5_SLAVE_SEL_MASK 0xf +#define ETDM_OUT5_SLAVE_SEL_MASK_SFT (0xf << 20) +#define ETDM_IN4_SLAVE_SEL_SFT 24 +#define ETDM_IN4_SLAVE_SEL_MASK 0xf +#define ETDM_IN4_SLAVE_SEL_MASK_SFT (0xf << 24) +#define ETDM_IN4_SYNC_SEL_SFT 28 +#define ETDM_IN4_SYNC_SEL_MASK 0xf +#define ETDM_IN4_SYNC_SEL_MASK_SFT (0xf << 28) + +/* ETDM_4_7_COWORK_CON1 */ +#define ETDM_IN4_SDATA0_SEL_SFT 0 +#define ETDM_IN4_SDATA0_SEL_MASK 0xf +#define ETDM_IN4_SDATA0_SEL_MASK_SFT (0xf << 0) +#define ETDM_IN4_SDATA1_15_SEL_SFT 4 +#define ETDM_IN4_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN4_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_IN5_SLAVE_SEL_SFT 8 +#define ETDM_IN5_SLAVE_SEL_MASK 0xf +#define ETDM_IN5_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_IN5_SYNC_SEL_SFT 12 +#define ETDM_IN5_SYNC_SEL_MASK 0xf +#define ETDM_IN5_SYNC_SEL_MASK_SFT (0xf << 12) +#define ETDM_IN5_SDATA0_SEL_SFT 16 +#define ETDM_IN5_SDATA0_SEL_MASK 0xf +#define ETDM_IN5_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_IN5_SDATA1_15_SEL_SFT 20 +#define ETDM_IN5_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN5_SDATA1_15_SEL_MASK_SFT (0xf << 20) + +/* ETDM_4_7_COWORK_CON2 */ +#define ETDM_OUT6_DATA_SEL_SFT 0 +#define ETDM_OUT6_DATA_SEL_MASK 0xf +#define ETDM_OUT6_DATA_SEL_MASK_SFT (0xf << 0) +#define ETDM_OUT6_SYNC_SEL_SFT 4 +#define ETDM_OUT6_SYNC_SEL_MASK 0xf +#define ETDM_OUT6_SYNC_SEL_MASK_SFT (0xf << 4) +#define ETDM_OUT6_SLAVE_SEL_SFT 8 +#define ETDM_OUT6_SLAVE_SEL_MASK 0xf +#define ETDM_OUT6_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_OUT7_DATA_SEL_SFT 12 +#define ETDM_OUT7_DATA_SEL_MASK 0xf +#define ETDM_OUT7_DATA_SEL_MASK_SFT (0xf << 12) +#define ETDM_OUT7_SYNC_SEL_SFT 16 +#define ETDM_OUT7_SYNC_SEL_MASK 0xf +#define ETDM_OUT7_SYNC_SEL_MASK_SFT (0xf << 16) +#define ETDM_OUT7_SLAVE_SEL_SFT 20 +#define ETDM_OUT7_SLAVE_SEL_MASK 0xf +#define ETDM_OUT7_SLAVE_SEL_MASK_SFT (0xf << 20) +#define ETDM_IN6_SLAVE_SEL_SFT 24 +#define ETDM_IN6_SLAVE_SEL_MASK 0xf +#define ETDM_IN6_SLAVE_SEL_MASK_SFT (0xf << 24) +#define ETDM_IN6_SYNC_SEL_SFT 28 +#define ETDM_IN6_SYNC_SEL_MASK 0xf +#define ETDM_IN6_SYNC_SEL_MASK_SFT (0xf << 28) + +/* ETDM_4_7_COWORK_CON3 */ +#define ETDM_IN6_SDATA0_SEL_SFT 0 +#define ETDM_IN6_SDATA0_SEL_MASK 0xf +#define ETDM_IN6_SDATA0_SEL_MASK_SFT (0xf << 0) +#define ETDM_IN6_SDATA1_15_SEL_SFT 4 +#define ETDM_IN6_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN6_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_IN7_SLAVE_SEL_SFT 8 +#define ETDM_IN7_SLAVE_SEL_MASK 0xf +#define ETDM_IN7_SLAVE_SEL_MASK_SFT (0xf << 8) +#define ETDM_IN7_SYNC_SEL_SFT 12 +#define ETDM_IN7_SYNC_SEL_MASK 0xf +#define ETDM_IN7_SYNC_SEL_MASK_SFT (0xf << 12) +#define ETDM_IN7_SDATA0_SEL_SFT 16 +#define ETDM_IN7_SDATA0_SEL_MASK 0xf +#define ETDM_IN7_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_IN7_SDATA1_15_SEL_SFT 20 +#define ETDM_IN7_SDATA1_15_SEL_MASK 0xf +#define ETDM_IN7_SDATA1_15_SEL_MASK_SFT (0xf << 20) + +/* AFE_DPTX_CON */ +#define DPTX_CHANNEL_ENABLE_SFT 8 +#define DPTX_CHANNEL_ENABLE_MASK 0xff +#define DPTX_CHANNEL_ENABLE_MASK_SFT (0xff << 8) +#define DPTX_REGISTER_MONITOR_SELECT_SFT 3 +#define DPTX_REGISTER_MONITOR_SELECT_MASK 0xf +#define DPTX_REGISTER_MONITOR_SELECT_MASK_SFT (0xf << 3) +#define DPTX_16BIT_SFT 2 +#define DPTX_16BIT_MASK 0x1 +#define DPTX_16BIT_MASK_SFT (0x1 << 2) +#define DPTX_CHANNEL_NUMBER_SFT 1 +#define DPTX_CHANNEL_NUMBER_MASK 0x1 +#define DPTX_CHANNEL_NUMBER_MASK_SFT (0x1 << 1) +#define DPTX_ON_SFT 0 +#define DPTX_ON_MASK 0x1 +#define DPTX_ON_MASK_SFT (0x1 << 0) + +/* AFE_DPTX_MON */ +#define AFE_DPTX_MON0_SFT 0 +#define AFE_DPTX_MON0_MASK 0xffffffff +#define AFE_DPTX_MON0_MASK_SFT (0xffffffff << 0) + +/* AFE_TDM_CON1 */ +#define TDM_EN_SFT 0 +#define TDM_EN_MASK 0x1 +#define TDM_EN_MASK_SFT (0x1 << 0) +#define BCK_INVERSE_SFT 1 +#define BCK_INVERSE_MASK 0x1 +#define BCK_INVERSE_MASK_SFT (0x1 << 1) +#define LRCK_INVERSE_SFT 2 +#define LRCK_INVERSE_MASK 0x1 +#define LRCK_INVERSE_MASK_SFT (0x1 << 2) +#define DELAY_DATA_SFT 3 +#define DELAY_DATA_MASK 0x1 +#define DELAY_DATA_MASK_SFT (0x1 << 3) +#define LEFT_ALIGN_SFT 4 +#define LEFT_ALIGN_MASK 0x1 +#define LEFT_ALIGN_MASK_SFT (0x1 << 4) +#define TDM_LRCK_D0P5T_SFT 5 +#define TDM_LRCK_D0P5T_MASK 0x1 +#define TDM_LRCK_D0P5T_MASK_SFT (0x1 << 5) +#define TDM_SDATA_D0P5T_SFT 6 +#define TDM_SDATA_D0P5T_MASK 0x1 +#define TDM_SDATA_D0P5T_MASK_SFT (0x1 << 6) +#define WLEN_SFT 8 +#define WLEN_MASK 0x3 +#define WLEN_MASK_SFT (0x3 << 8) +#define CHANNEL_NUM_SFT 10 +#define CHANNEL_NUM_MASK 0x3 +#define CHANNEL_NUM_MASK_SFT (0x3 << 10) +#define CHANNEL_BCK_CYCLES_SFT 12 +#define CHANNEL_BCK_CYCLES_MASK 0x3 +#define CHANNEL_BCK_CYCLES_MASK_SFT (0x3 << 12) +#define HDMI_CLK_INV_SEL_SFT 15 +#define HDMI_CLK_INV_SEL_MASK 0x1 +#define HDMI_CLK_INV_SEL_MASK_SFT (0x1 << 15) +#define DAC_BIT_NUM_SFT 16 +#define DAC_BIT_NUM_MASK 0x1f +#define DAC_BIT_NUM_MASK_SFT (0x1f << 16) +#define LRCK_TDM_WIDTH_SFT 24 +#define LRCK_TDM_WIDTH_MASK 0xff +#define LRCK_TDM_WIDTH_MASK_SFT (0xff << 24) + +/* AFE_TDM_CON2 */ +#define ST_CH_PAIR_SOUT0_SFT 0 +#define ST_CH_PAIR_SOUT0_MASK 0x7 +#define ST_CH_PAIR_SOUT0_MASK_SFT (0x7 << 0) +#define ST_CH_PAIR_SOUT1_SFT 4 +#define ST_CH_PAIR_SOUT1_MASK 0x7 +#define ST_CH_PAIR_SOUT1_MASK_SFT (0x7 << 4) +#define ST_CH_PAIR_SOUT2_SFT 8 +#define ST_CH_PAIR_SOUT2_MASK 0x7 +#define ST_CH_PAIR_SOUT2_MASK_SFT (0x7 << 8) +#define ST_CH_PAIR_SOUT3_SFT 12 +#define ST_CH_PAIR_SOUT3_MASK 0x7 +#define ST_CH_PAIR_SOUT3_MASK_SFT (0x7 << 12) +#define TDM_FIX_VALUE_SEL_SFT 16 +#define TDM_FIX_VALUE_SEL_MASK 0x1 +#define TDM_FIX_VALUE_SEL_MASK_SFT (0x1 << 16) +#define TDM_I2S_LOOPBACK_SFT 20 +#define TDM_I2S_LOOPBACK_MASK 0x1 +#define TDM_I2S_LOOPBACK_MASK_SFT (0x1 << 20) +#define TDM_I2S_LOOPBACK_CH_SFT 21 +#define TDM_I2S_LOOPBACK_CH_MASK 0x3 +#define TDM_I2S_LOOPBACK_CH_MASK_SFT (0x3 << 21) +#define TDM_USE_SINEGEN_INPUT_SFT 23 +#define TDM_USE_SINEGEN_INPUT_MASK 0x1 +#define TDM_USE_SINEGEN_INPUT_MASK_SFT (0x1 << 23) +#define TDM_FIX_VALUE_SFT 24 +#define TDM_FIX_VALUE_MASK 0xff +#define TDM_FIX_VALUE_MASK_SFT (0xff << 24) + +/* AFE_TDM_CON3 */ +#define TDM_OUT_SEL_DOMAIN_SFT 29 +#define TDM_OUT_SEL_DOMAIN_MASK 0x7 +#define TDM_OUT_SEL_DOMAIN_MASK_SFT (0x7 << 29) +#define TDM_OUT_SEL_FS_SFT 24 +#define TDM_OUT_SEL_FS_MASK 0x1f +#define TDM_OUT_SEL_FS_MASK_SFT (0x1f << 24) +#define TDM_OUT_MON_SEL_SFT 3 +#define TDM_OUT_MON_SEL_MASK 0x1 +#define TDM_OUT_MON_SEL_MASK_SFT (0x1 << 3) +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_EN_SFT 2 +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_EN_MASK 0x1 +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_EN_MASK_SFT (0x1 << 2) +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_SFT 1 +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_MASK 0x1 +#define RG_TDM_OUT_ASYNC_FIFO_SOFT_RST_MASK_SFT (0x1 << 1) +#define TDM_UPDATE_EN_SEL_SFT 0 +#define TDM_UPDATE_EN_SEL_MASK 0x1 +#define TDM_UPDATE_EN_SEL_MASK_SFT (0x1 << 0) + +/* AFE_TDM_OUT_MON */ +#define AFE_TDM_OUT_MON_SFT 0 +#define AFE_TDM_OUT_MON_MASK 0xffffffff +#define AFE_TDM_OUT_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_HDMI_CONN0 */ +#define HDMI_O_7_SFT 21 +#define HDMI_O_7_MASK 0x7 +#define HDMI_O_7_MASK_SFT (0x7 << 21) +#define HDMI_O_6_SFT 18 +#define HDMI_O_6_MASK 0x7 +#define HDMI_O_6_MASK_SFT (0x7 << 18) +#define HDMI_O_5_SFT 15 +#define HDMI_O_5_MASK 0x7 +#define HDMI_O_5_MASK_SFT (0x7 << 15) +#define HDMI_O_4_SFT 12 +#define HDMI_O_4_MASK 0x7 +#define HDMI_O_4_MASK_SFT (0x7 << 12) +#define HDMI_O_3_SFT 9 +#define HDMI_O_3_MASK 0x7 +#define HDMI_O_3_MASK_SFT (0x7 << 9) +#define HDMI_O_2_SFT 6 +#define HDMI_O_2_MASK 0x7 +#define HDMI_O_2_MASK_SFT (0x7 << 6) +#define HDMI_O_1_SFT 3 +#define HDMI_O_1_MASK 0x7 +#define HDMI_O_1_MASK_SFT (0x7 << 3) +#define HDMI_O_0_SFT 0 +#define HDMI_O_0_MASK 0x7 +#define HDMI_O_0_MASK_SFT (0x7 << 0) + +/* AFE_TDM_TOP_IP_VERSION */ +#define AFE_TDM_TOP_IP_VERSION_SFT 0 +#define AFE_TDM_TOP_IP_VERSION_MASK 0xffffffff +#define AFE_TDM_TOP_IP_VERSION_MASK_SFT (0xffffffff << 0) + +/* AFE_CBIP_CFG0 */ +#define CBIP_TOP_SLV_MUX_WAY_EN_SFT 16 +#define CBIP_TOP_SLV_MUX_WAY_EN_MASK 0xffff +#define CBIP_TOP_SLV_MUX_WAY_EN_MASK_SFT (0xffff << 16) +#define RESERVED_04_SFT 15 +#define RESERVED_04_MASK 0x1 +#define RESERVED_04_MASK_SFT (0x1 << 15) +#define CBIP_ASYNC_MST_RG_FIFO_THRE_SFT 13 +#define CBIP_ASYNC_MST_RG_FIFO_THRE_MASK 0x3 +#define CBIP_ASYNC_MST_RG_FIFO_THRE_MASK_SFT (0x3 << 13) +#define CBIP_ASYNC_MST_POSTWRITE_DIS_SFT 12 +#define CBIP_ASYNC_MST_POSTWRITE_DIS_MASK 0x1 +#define CBIP_ASYNC_MST_POSTWRITE_DIS_MASK_SFT (0x1 << 12) +#define RESERVED_03_SFT 11 +#define RESERVED_03_MASK 0x1 +#define RESERVED_03_MASK_SFT (0x1 << 11) +#define CBIP_ASYNC_SLV_RG_FIFO_THRE_SFT 9 +#define CBIP_ASYNC_SLV_RG_FIFO_THRE_MASK 0x3 +#define CBIP_ASYNC_SLV_RG_FIFO_THRE_MASK_SFT (0x3 << 9) +#define CBIP_ASYNC_SLV_POSTWRITE_DIS_SFT 8 +#define CBIP_ASYNC_SLV_POSTWRITE_DIS_MASK 0x1 +#define CBIP_ASYNC_SLV_POSTWRITE_DIS_MASK_SFT (0x1 << 8) +#define AUDIOSYS_BUSY_SFT 7 +#define AUDIOSYS_BUSY_MASK 0x1 +#define AUDIOSYS_BUSY_MASK_SFT (0x1 << 7) +#define CBIP_SLV_DECODER_ERR_FLAG_EN_SFT 6 +#define CBIP_SLV_DECODER_ERR_FLAG_EN_MASK 0x1 +#define CBIP_SLV_DECODER_ERR_FLAG_EN_MASK_SFT (0x1 << 6) +#define CBIP_SLV_DECODER_SLAVE_WAY_EN_SFT 5 +#define CBIP_SLV_DECODER_SLAVE_WAY_EN_MASK 0x1 +#define CBIP_SLV_DECODER_SLAVE_WAY_EN_MASK_SFT (0x1 << 5) +#define APB_R2T_SFT 3 +#define APB_R2T_MASK 0x1 +#define APB_R2T_MASK_SFT (0x1 << 3) +#define APB_W2T_SFT 2 +#define APB_W2T_MASK 0x1 +#define APB_W2T_MASK_SFT (0x1 << 2) +#define AHB_IDLE_EN_INT_SFT 1 +#define AHB_IDLE_EN_INT_MASK 0x1 +#define AHB_IDLE_EN_INT_MASK_SFT (0x1 << 1) +#define AHB_IDLE_EN_EXT_SFT 0 +#define AHB_IDLE_EN_EXT_MASK 0x1 +#define AHB_IDLE_EN_EXT_MASK_SFT (0x1 << 0) + +/* AFE_CBIP_SLV_DECODER_MON0 */ +#define CBIP_SLV_DECODER_ERR_DOMAIN_SFT 4 +#define CBIP_SLV_DECODER_ERR_DOMAIN_MASK 0x1 +#define CBIP_SLV_DECODER_ERR_DOMAIN_MASK_SFT (0x1 << 4) +#define CBIP_SLV_DECODER_ERR_ID_SFT 3 +#define CBIP_SLV_DECODER_ERR_ID_MASK 0x1 +#define CBIP_SLV_DECODER_ERR_ID_MASK_SFT (0x1 << 3) +#define CBIP_SLV_DECODER_ERR_RW_SFT 2 +#define CBIP_SLV_DECODER_ERR_RW_MASK 0x1 +#define CBIP_SLV_DECODER_ERR_RW_MASK_SFT (0x1 << 2) +#define CBIP_SLV_DECODER_ERR_DECERR_SFT 1 +#define CBIP_SLV_DECODER_ERR_DECERR_MASK 0x1 +#define CBIP_SLV_DECODER_ERR_DECERR_MASK_SFT (0x1 << 1) +#define CBIP_SLV_DECODER_CTRL_UPDATE_STATUS_SFT 0 +#define CBIP_SLV_DECODER_CTRL_UPDATE_STATUS_MASK 0x1 +#define CBIP_SLV_DECODER_CTRL_UPDATE_STATUS_MASK_SFT (0x1 << 0) + +/* AFE_CBIP_SLV_DECODER_MON1 */ +#define CBIP_SLV_DECODER_ERR_ADDR_SFT 0 +#define CBIP_SLV_DECODER_ERR_ADDR_MASK 0xffffffff +#define CBIP_SLV_DECODER_ERR_ADDR_MASK_SFT (0xffffffff << 0) + +/* AFE_CBIP_SLV_MUX_MON_CFG */ +#define CBIP_SLV_MUX_ERR_FLAG_EN_SFT 3 +#define CBIP_SLV_MUX_ERR_FLAG_EN_MASK 0x1 +#define CBIP_SLV_MUX_ERR_FLAG_EN_MASK_SFT (0x1 << 3) +#define CBIP_SLV_MUX_REG_SLAVE_WAY_EN_SFT 2 +#define CBIP_SLV_MUX_REG_SLAVE_WAY_EN_MASK 0x1 +#define CBIP_SLV_MUX_REG_SLAVE_WAY_EN_MASK_SFT (0x1 << 2) +#define CBIP_SLV_MUX_REG_LAYER_WAY_EN_SFT 0 +#define CBIP_SLV_MUX_REG_LAYER_WAY_EN_MASK 0x3 +#define CBIP_SLV_MUX_REG_LAYER_WAY_EN_MASK_SFT (0x3 << 0) + +/* AFE_CBIP_SLV_MUX_MON0 */ +#define CBIP_SLV_MUX_ERR_DOMAIN_SFT 8 +#define CBIP_SLV_MUX_ERR_DOMAIN_MASK 0x1 +#define CBIP_SLV_MUX_ERR_DOMAIN_MASK_SFT (0x1 << 8) +#define CBIP_SLV_MUX_ERR_ID_SFT 7 +#define CBIP_SLV_MUX_ERR_ID_MASK 0x1 +#define CBIP_SLV_MUX_ERR_ID_MASK_SFT (0x1 << 7) +#define CBIP_SLV_MUX_ERR_RD_SFT 6 +#define CBIP_SLV_MUX_ERR_RD_MASK 0x1 +#define CBIP_SLV_MUX_ERR_RD_MASK_SFT (0x1 << 6) +#define CBIP_SLV_MUX_ERR_WR_SFT 5 +#define CBIP_SLV_MUX_ERR_WR_MASK 0x1 +#define CBIP_SLV_MUX_ERR_WR_MASK_SFT (0x1 << 5) +#define CBIP_SLV_MUX_ERR_EN_SLV_SFT 4 +#define CBIP_SLV_MUX_ERR_EN_SLV_MASK 0x1 +#define CBIP_SLV_MUX_ERR_EN_SLV_MASK_SFT (0x1 << 4) +#define CBIP_SLV_MUX_ERR_EN_MST_SFT 2 +#define CBIP_SLV_MUX_ERR_EN_MST_MASK 0x3 +#define CBIP_SLV_MUX_ERR_EN_MST_MASK_SFT (0x3 << 2) +#define CBIP_SLV_MUX_CTRL_UPDATE_STATUS_SFT 0 +#define CBIP_SLV_MUX_CTRL_UPDATE_STATUS_MASK 0x3 +#define CBIP_SLV_MUX_CTRL_UPDATE_STATUS_MASK_SFT (0x3 << 0) + +/* AFE_CBIP_SLV_MUX_MON1 */ +#define CBIP_SLV_MUX_ERR_ADDR_SFT 0 +#define CBIP_SLV_MUX_ERR_ADDR_MASK 0xffffffff +#define CBIP_SLV_MUX_ERR_ADDR_MASK_SFT (0xffffffff << 0) + +/* AFE_MEMIF_CON0 */ +#define CPU_COMPACT_MODE_SFT 2 +#define CPU_COMPACT_MODE_MASK 0x1 +#define CPU_COMPACT_MODE_MASK_SFT (0x1 << 2) +#define CPU_HD_ALIGN_SFT 1 +#define CPU_HD_ALIGN_MASK 0x1 +#define CPU_HD_ALIGN_MASK_SFT (0x1 << 1) +#define SYSRAM_SIGN_SFT 0 +#define SYSRAM_SIGN_MASK 0x1 +#define SYSRAM_SIGN_MASK_SFT (0x1 << 0) + +/* AFE_MEMIF_ONE_HEART */ +#define DL_ONE_HEART_ON_2_SFT 2 +#define DL_ONE_HEART_ON_2_MASK 0x1 +#define DL_ONE_HEART_ON_2_MASK_SFT (0x1 << 2) +#define DL_ONE_HEART_ON_1_SFT 1 +#define DL_ONE_HEART_ON_1_MASK 0x1 +#define DL_ONE_HEART_ON_1_MASK_SFT (0x1 << 1) +#define DL_ONE_HEART_ON_0_SFT 0 +#define DL_ONE_HEART_ON_0_MASK 0x1 +#define DL_ONE_HEART_ON_0_MASK_SFT (0x1 << 0) + +/* AFE_DL0_BASE_MSB */ +#define DL0_BASE_ADDR_MSB_SFT 0 +#define DL0_BASE_ADDR_MSB_MASK 0x1ff +#define DL0_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL0_BASE */ +#define DL0_BASE_ADDR_SFT 4 +#define DL0_BASE_ADDR_MASK 0xfffffff +#define DL0_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL0_CUR_MSB */ +#define DL0_CUR_PTR_MSB_SFT 0 +#define DL0_CUR_PTR_MSB_MASK 0x1ff +#define DL0_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL0_CUR */ +#define DL0_CUR_PTR_SFT 0 +#define DL0_CUR_PTR_MASK 0xffffffff +#define DL0_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL0_END_MSB */ +#define DL0_END_ADDR_MSB_SFT 0 +#define DL0_END_ADDR_MSB_MASK 0x1ff +#define DL0_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL0_END */ +#define DL0_END_ADDR_SFT 4 +#define DL0_END_ADDR_MASK 0xfffffff +#define DL0_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL0_RCH_MON */ +#define DL0_RCH_DATA_SFT 0 +#define DL0_RCH_DATA_MASK 0xffffffff +#define DL0_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL0_LCH_MON */ +#define DL0_LCH_DATA_SFT 0 +#define DL0_LCH_DATA_MASK 0xffffffff +#define DL0_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL0_CON0 */ +#define DL0_ON_SFT 28 +#define DL0_ON_MASK 0x1 +#define DL0_ON_MASK_SFT (0x1 << 28) +#define DL0_ONE_HEART_SEL_SFT 22 +#define DL0_ONE_HEART_SEL_MASK 0x3 +#define DL0_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL0_MINLEN_SFT 20 +#define DL0_MINLEN_MASK 0x3 +#define DL0_MINLEN_MASK_SFT (0x3 << 20) +#define DL0_MAXLEN_SFT 16 +#define DL0_MAXLEN_MASK 0x3 +#define DL0_MAXLEN_MASK_SFT (0x3 << 16) +#define DL0_SEL_DOMAIN_SFT 13 +#define DL0_SEL_DOMAIN_MASK 0x7 +#define DL0_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL0_SEL_FS_SFT 8 +#define DL0_SEL_FS_MASK 0x1f +#define DL0_SEL_FS_MASK_SFT (0x1f << 8) +#define DL0_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL0_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL0_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL0_PBUF_SIZE_SFT 5 +#define DL0_PBUF_SIZE_MASK 0x3 +#define DL0_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL0_MONO_SFT 4 +#define DL0_MONO_MASK 0x1 +#define DL0_MONO_MASK_SFT (0x1 << 4) +#define DL0_NORMAL_MODE_SFT 3 +#define DL0_NORMAL_MODE_MASK 0x1 +#define DL0_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL0_HALIGN_SFT 2 +#define DL0_HALIGN_MASK 0x1 +#define DL0_HALIGN_MASK_SFT (0x1 << 2) +#define DL0_HD_MODE_SFT 0 +#define DL0_HD_MODE_MASK 0x3 +#define DL0_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL1_BASE_MSB */ +#define DL1_BASE_ADDR_MSB_SFT 0 +#define DL1_BASE_ADDR_MSB_MASK 0x1ff +#define DL1_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL1_BASE */ +#define DL1_BASE_ADDR_SFT 4 +#define DL1_BASE_ADDR_MASK 0xfffffff +#define DL1_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL1_CUR_MSB */ +#define DL1_CUR_PTR_MSB_SFT 0 +#define DL1_CUR_PTR_MSB_MASK 0x1ff +#define DL1_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL1_CUR */ +#define DL1_CUR_PTR_SFT 0 +#define DL1_CUR_PTR_MASK 0xffffffff +#define DL1_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL1_END_MSB */ +#define DL1_END_ADDR_MSB_SFT 0 +#define DL1_END_ADDR_MSB_MASK 0x1ff +#define DL1_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL1_END */ +#define DL1_END_ADDR_SFT 4 +#define DL1_END_ADDR_MASK 0xfffffff +#define DL1_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL1_RCH_MON */ +#define DL1_RCH_DATA_SFT 0 +#define DL1_RCH_DATA_MASK 0xffffffff +#define DL1_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL1_LCH_MON */ +#define DL1_LCH_DATA_SFT 0 +#define DL1_LCH_DATA_MASK 0xffffffff +#define DL1_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL1_CON0 */ +#define DL1_ON_SFT 28 +#define DL1_ON_MASK 0x1 +#define DL1_ON_MASK_SFT (0x1 << 28) +#define DL1_ONE_HEART_SEL_SFT 22 +#define DL1_ONE_HEART_SEL_MASK 0x3 +#define DL1_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL1_MINLEN_SFT 20 +#define DL1_MINLEN_MASK 0x3 +#define DL1_MINLEN_MASK_SFT (0x3 << 20) +#define DL1_MAXLEN_SFT 16 +#define DL1_MAXLEN_MASK 0x3 +#define DL1_MAXLEN_MASK_SFT (0x3 << 16) +#define DL1_SEL_DOMAIN_SFT 13 +#define DL1_SEL_DOMAIN_MASK 0x7 +#define DL1_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL1_SEL_FS_SFT 8 +#define DL1_SEL_FS_MASK 0x1f +#define DL1_SEL_FS_MASK_SFT (0x1f << 8) +#define DL1_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL1_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL1_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL1_PBUF_SIZE_SFT 5 +#define DL1_PBUF_SIZE_MASK 0x3 +#define DL1_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL1_MONO_SFT 4 +#define DL1_MONO_MASK 0x1 +#define DL1_MONO_MASK_SFT (0x1 << 4) +#define DL1_NORMAL_MODE_SFT 3 +#define DL1_NORMAL_MODE_MASK 0x1 +#define DL1_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL1_HALIGN_SFT 2 +#define DL1_HALIGN_MASK 0x1 +#define DL1_HALIGN_MASK_SFT (0x1 << 2) +#define DL1_HD_MODE_SFT 0 +#define DL1_HD_MODE_MASK 0x3 +#define DL1_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL2_BASE_MSB */ +#define DL2_BASE__ADDR_MSB_SFT 0 +#define DL2_BASE__ADDR_MSB_MASK 0x1ff +#define DL2_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL2_BASE */ +#define DL2_BASE_ADDR_SFT 4 +#define DL2_BASE_ADDR_MASK 0xfffffff +#define DL2_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL2_CUR_MSB */ +#define DL2_CUR_PTR_MSB_SFT 0 +#define DL2_CUR_PTR_MSB_MASK 0x1ff +#define DL2_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL2_CUR */ +#define DL2_CUR_PTR_SFT 0 +#define DL2_CUR_PTR_MASK 0xffffffff +#define DL2_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL2_END_MSB */ +#define DL2_END_ADDR_MSB_SFT 0 +#define DL2_END_ADDR_MSB_MASK 0x1ff +#define DL2_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL2_END */ +#define DL2_END_ADDR_SFT 4 +#define DL2_END_ADDR_MASK 0xfffffff +#define DL2_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL2_RCH_MON */ +#define DL2_RCH_DATA_SFT 0 +#define DL2_RCH_DATA_MASK 0xffffffff +#define DL2_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL2_LCH_MON */ +#define DL2_LCH_DATA_SFT 0 +#define DL2_LCH_DATA_MASK 0xffffffff +#define DL2_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL2_CON0 */ +#define DL2_ON_SFT 28 +#define DL2_ON_MASK 0x1 +#define DL2_ON_MASK_SFT (0x1 << 28) +#define DL2_ONE_HEART_SEL_SFT 22 +#define DL2_ONE_HEART_SEL_MASK 0x3 +#define DL2_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL2_MINLEN_SFT 20 +#define DL2_MINLEN_MASK 0x3 +#define DL2_MINLEN_MASK_SFT (0x3 << 20) +#define DL2_MAXLEN_SFT 16 +#define DL2_MAXLEN_MASK 0x3 +#define DL2_MAXLEN_MASK_SFT (0x3 << 16) +#define DL2_SEL_DOMAIN_SFT 13 +#define DL2_SEL_DOMAIN_MASK 0x7 +#define DL2_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL2_SEL_FS_SFT 8 +#define DL2_SEL_FS_MASK 0x1f +#define DL2_SEL_FS_MASK_SFT (0x1f << 8) +#define DL2_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL2_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL2_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL2_PBUF_SIZE_SFT 5 +#define DL2_PBUF_SIZE_MASK 0x3 +#define DL2_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL2_MONO_SFT 4 +#define DL2_MONO_MASK 0x1 +#define DL2_MONO_MASK_SFT (0x1 << 4) +#define DL2_NORMAL_MODE_SFT 3 +#define DL2_NORMAL_MODE_MASK 0x1 +#define DL2_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL2_HALIGN_SFT 2 +#define DL2_HALIGN_MASK 0x1 +#define DL2_HALIGN_MASK_SFT (0x1 << 2) +#define DL2_HD_MODE_SFT 0 +#define DL2_HD_MODE_MASK 0x3 +#define DL2_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL3_BASE_MSB */ +#define DL3_BASE__ADDR_MSB_SFT 0 +#define DL3_BASE__ADDR_MSB_MASK 0x1ff +#define DL3_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL3_BASE */ +#define DL3_BASE_ADDR_SFT 4 +#define DL3_BASE_ADDR_MASK 0xfffffff +#define DL3_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL3_CUR_MSB */ +#define DL3_CUR_PTR_MSB_SFT 0 +#define DL3_CUR_PTR_MSB_MASK 0x1ff +#define DL3_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL3_CUR */ +#define DL3_CUR_PTR_SFT 0 +#define DL3_CUR_PTR_MASK 0xffffffff +#define DL3_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL3_END_MSB */ +#define DL3_END_ADDR_MSB_SFT 0 +#define DL3_END_ADDR_MSB_MASK 0x1ff +#define DL3_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL3_END */ +#define DL3_END_ADDR_SFT 4 +#define DL3_END_ADDR_MASK 0xfffffff +#define DL3_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL3_RCH_MON */ +#define DL3_RCH_DATA_SFT 0 +#define DL3_RCH_DATA_MASK 0xffffffff +#define DL3_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL3_LCH_MON */ +#define DL3_LCH_DATA_SFT 0 +#define DL3_LCH_DATA_MASK 0xffffffff +#define DL3_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL3_CON0 */ +#define DL3_ON_SFT 28 +#define DL3_ON_MASK 0x1 +#define DL3_ON_MASK_SFT (0x1 << 28) +#define DL3_ONE_HEART_SEL_SFT 22 +#define DL3_ONE_HEART_SEL_MASK 0x3 +#define DL3_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL3_MINLEN_SFT 20 +#define DL3_MINLEN_MASK 0x3 +#define DL3_MINLEN_MASK_SFT (0x3 << 20) +#define DL3_MAXLEN_SFT 16 +#define DL3_MAXLEN_MASK 0x3 +#define DL3_MAXLEN_MASK_SFT (0x3 << 16) +#define DL3_SEL_DOMAIN_SFT 13 +#define DL3_SEL_DOMAIN_MASK 0x7 +#define DL3_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL3_SEL_FS_SFT 8 +#define DL3_SEL_FS_MASK 0x1f +#define DL3_SEL_FS_MASK_SFT (0x1f << 8) +#define DL3_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL3_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL3_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL3_PBUF_SIZE_SFT 5 +#define DL3_PBUF_SIZE_MASK 0x3 +#define DL3_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL3_MONO_SFT 4 +#define DL3_MONO_MASK 0x1 +#define DL3_MONO_MASK_SFT (0x1 << 4) +#define DL3_NORMAL_MODE_SFT 3 +#define DL3_NORMAL_MODE_MASK 0x1 +#define DL3_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL3_HALIGN_SFT 2 +#define DL3_HALIGN_MASK 0x1 +#define DL3_HALIGN_MASK_SFT (0x1 << 2) +#define DL3_HD_MODE_SFT 0 +#define DL3_HD_MODE_MASK 0x3 +#define DL3_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL4_BASE_MSB */ +#define DL4_BASE__ADDR_MSB_SFT 0 +#define DL4_BASE__ADDR_MSB_MASK 0x1ff +#define DL4_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL4_BASE */ +#define DL4_BASE_ADDR_SFT 4 +#define DL4_BASE_ADDR_MASK 0xfffffff +#define DL4_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL4_CUR_MSB */ +#define DL4_CUR_PTR_MSB_SFT 0 +#define DL4_CUR_PTR_MSB_MASK 0x1ff +#define DL4_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL4_CUR */ +#define DL4_CUR_PTR_SFT 0 +#define DL4_CUR_PTR_MASK 0xffffffff +#define DL4_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL4_END_MSB */ +#define DL4_END_ADDR_MSB_SFT 0 +#define DL4_END_ADDR_MSB_MASK 0x1ff +#define DL4_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL4_END */ +#define DL4_END_ADDR_SFT 4 +#define DL4_END_ADDR_MASK 0xfffffff +#define DL4_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL4_RCH_MON */ +#define DL4_RCH_DATA_SFT 0 +#define DL4_RCH_DATA_MASK 0xffffffff +#define DL4_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL4_LCH_MON */ +#define DL4_LCH_DATA_SFT 0 +#define DL4_LCH_DATA_MASK 0xffffffff +#define DL4_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL4_CON0 */ +#define DL4_ON_SFT 28 +#define DL4_ON_MASK 0x1 +#define DL4_ON_MASK_SFT (0x1 << 28) +#define DL4_ONE_HEART_SEL_SFT 22 +#define DL4_ONE_HEART_SEL_MASK 0x3 +#define DL4_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL4_MINLEN_SFT 20 +#define DL4_MINLEN_MASK 0x3 +#define DL4_MINLEN_MASK_SFT (0x3 << 20) +#define DL4_MAXLEN_SFT 16 +#define DL4_MAXLEN_MASK 0x3 +#define DL4_MAXLEN_MASK_SFT (0x3 << 16) +#define DL4_SEL_DOMAIN_SFT 13 +#define DL4_SEL_DOMAIN_MASK 0x7 +#define DL4_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL4_SEL_FS_SFT 8 +#define DL4_SEL_FS_MASK 0x1f +#define DL4_SEL_FS_MASK_SFT (0x1f << 8) +#define DL4_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL4_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL4_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL4_PBUF_SIZE_SFT 5 +#define DL4_PBUF_SIZE_MASK 0x3 +#define DL4_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL4_MONO_SFT 4 +#define DL4_MONO_MASK 0x1 +#define DL4_MONO_MASK_SFT (0x1 << 4) +#define DL4_NORMAL_MODE_SFT 3 +#define DL4_NORMAL_MODE_MASK 0x1 +#define DL4_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL4_HALIGN_SFT 2 +#define DL4_HALIGN_MASK 0x1 +#define DL4_HALIGN_MASK_SFT (0x1 << 2) +#define DL4_HD_MODE_SFT 0 +#define DL4_HD_MODE_MASK 0x3 +#define DL4_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL5_BASE_MSB */ +#define DL5_BASE__ADDR_MSB_SFT 0 +#define DL5_BASE__ADDR_MSB_MASK 0x1ff +#define DL5_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL5_BASE */ +#define DL5_BASE_ADDR_SFT 4 +#define DL5_BASE_ADDR_MASK 0xfffffff +#define DL5_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL5_CUR_MSB */ +#define DL5_CUR_PTR_MSB_SFT 0 +#define DL5_CUR_PTR_MSB_MASK 0x1ff +#define DL5_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL5_CUR */ +#define DL5_CUR_PTR_SFT 0 +#define DL5_CUR_PTR_MASK 0xffffffff +#define DL5_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL5_END_MSB */ +#define DL5_END_ADDR_MSB_SFT 0 +#define DL5_END_ADDR_MSB_MASK 0x1ff +#define DL5_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL5_END */ +#define DL5_END_ADDR_SFT 4 +#define DL5_END_ADDR_MASK 0xfffffff +#define DL5_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL5_RCH_MON */ +#define DL5_RCH_DATA_SFT 0 +#define DL5_RCH_DATA_MASK 0xffffffff +#define DL5_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL5_LCH_MON */ +#define DL5_LCH_DATA_SFT 0 +#define DL5_LCH_DATA_MASK 0xffffffff +#define DL5_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL5_CON0 */ +#define DL5_ON_SFT 28 +#define DL5_ON_MASK 0x1 +#define DL5_ON_MASK_SFT (0x1 << 28) +#define DL5_ONE_HEART_SEL_SFT 22 +#define DL5_ONE_HEART_SEL_MASK 0x3 +#define DL5_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL5_MINLEN_SFT 20 +#define DL5_MINLEN_MASK 0x3 +#define DL5_MINLEN_MASK_SFT (0x3 << 20) +#define DL5_MAXLEN_SFT 16 +#define DL5_MAXLEN_MASK 0x3 +#define DL5_MAXLEN_MASK_SFT (0x3 << 16) +#define DL5_SEL_DOMAIN_SFT 13 +#define DL5_SEL_DOMAIN_MASK 0x7 +#define DL5_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL5_SEL_FS_SFT 8 +#define DL5_SEL_FS_MASK 0x1f +#define DL5_SEL_FS_MASK_SFT (0x1f << 8) +#define DL5_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL5_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL5_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL5_PBUF_SIZE_SFT 5 +#define DL5_PBUF_SIZE_MASK 0x3 +#define DL5_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL5_MONO_SFT 4 +#define DL5_MONO_MASK 0x1 +#define DL5_MONO_MASK_SFT (0x1 << 4) +#define DL5_NORMAL_MODE_SFT 3 +#define DL5_NORMAL_MODE_MASK 0x1 +#define DL5_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL5_HALIGN_SFT 2 +#define DL5_HALIGN_MASK 0x1 +#define DL5_HALIGN_MASK_SFT (0x1 << 2) +#define DL5_HD_MODE_SFT 0 +#define DL5_HD_MODE_MASK 0x3 +#define DL5_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL6_BASE_MSB */ +#define DL6_BASE__ADDR_MSB_SFT 0 +#define DL6_BASE__ADDR_MSB_MASK 0x1ff +#define DL6_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL6_BASE */ +#define DL6_BASE_ADDR_SFT 4 +#define DL6_BASE_ADDR_MASK 0xfffffff +#define DL6_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL6_CUR_MSB */ +#define DL6_CUR_PTR_MSB_SFT 0 +#define DL6_CUR_PTR_MSB_MASK 0x1ff +#define DL6_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL6_CUR */ +#define DL6_CUR_PTR_SFT 0 +#define DL6_CUR_PTR_MASK 0xffffffff +#define DL6_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL6_END_MSB */ +#define DL6_END_ADDR_MSB_SFT 0 +#define DL6_END_ADDR_MSB_MASK 0x1ff +#define DL6_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL6_END */ +#define DL6_END_ADDR_SFT 4 +#define DL6_END_ADDR_MASK 0xfffffff +#define DL6_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL6_RCH_MON */ +#define DL6_RCH_DATA_SFT 0 +#define DL6_RCH_DATA_MASK 0xffffffff +#define DL6_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL6_LCH_MON */ +#define DL6_LCH_DATA_SFT 0 +#define DL6_LCH_DATA_MASK 0xffffffff +#define DL6_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL6_CON0 */ +#define DL6_ON_SFT 28 +#define DL6_ON_MASK 0x1 +#define DL6_ON_MASK_SFT (0x1 << 28) +#define DL6_ONE_HEART_SEL_SFT 22 +#define DL6_ONE_HEART_SEL_MASK 0x3 +#define DL6_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL6_MINLEN_SFT 20 +#define DL6_MINLEN_MASK 0x3 +#define DL6_MINLEN_MASK_SFT (0x3 << 20) +#define DL6_MAXLEN_SFT 16 +#define DL6_MAXLEN_MASK 0x3 +#define DL6_MAXLEN_MASK_SFT (0x3 << 16) +#define DL6_SEL_DOMAIN_SFT 13 +#define DL6_SEL_DOMAIN_MASK 0x7 +#define DL6_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL6_SEL_FS_SFT 8 +#define DL6_SEL_FS_MASK 0x1f +#define DL6_SEL_FS_MASK_SFT (0x1f << 8) +#define DL6_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL6_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL6_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL6_PBUF_SIZE_SFT 5 +#define DL6_PBUF_SIZE_MASK 0x3 +#define DL6_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL6_MONO_SFT 4 +#define DL6_MONO_MASK 0x1 +#define DL6_MONO_MASK_SFT (0x1 << 4) +#define DL6_NORMAL_MODE_SFT 3 +#define DL6_NORMAL_MODE_MASK 0x1 +#define DL6_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL6_HALIGN_SFT 2 +#define DL6_HALIGN_MASK 0x1 +#define DL6_HALIGN_MASK_SFT (0x1 << 2) +#define DL6_HD_MODE_SFT 0 +#define DL6_HD_MODE_MASK 0x3 +#define DL6_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL7_BASE_MSB */ +#define DL7_BASE__ADDR_MSB_SFT 0 +#define DL7_BASE__ADDR_MSB_MASK 0x1ff +#define DL7_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL7_BASE */ +#define DL7_BASE_ADDR_SFT 4 +#define DL7_BASE_ADDR_MASK 0xfffffff +#define DL7_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL7_CUR_MSB */ +#define DL7_CUR_PTR_MSB_SFT 0 +#define DL7_CUR_PTR_MSB_MASK 0x1ff +#define DL7_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL7_CUR */ +#define DL7_CUR_PTR_SFT 0 +#define DL7_CUR_PTR_MASK 0xffffffff +#define DL7_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL7_END_MSB */ +#define DL7_END_ADDR_MSB_SFT 0 +#define DL7_END_ADDR_MSB_MASK 0x1ff +#define DL7_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL7_END */ +#define DL7_END_ADDR_SFT 4 +#define DL7_END_ADDR_MASK 0xfffffff +#define DL7_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL7_RCH_MON */ +#define DL7_RCH_DATA_SFT 0 +#define DL7_RCH_DATA_MASK 0xffffffff +#define DL7_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL7_LCH_MON */ +#define DL7_LCH_DATA_SFT 0 +#define DL7_LCH_DATA_MASK 0xffffffff +#define DL7_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL7_CON0 */ +#define DL7_ON_SFT 28 +#define DL7_ON_MASK 0x1 +#define DL7_ON_MASK_SFT (0x1 << 28) +#define DL7_ONE_HEART_SEL_SFT 22 +#define DL7_ONE_HEART_SEL_MASK 0x3 +#define DL7_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL7_MINLEN_SFT 20 +#define DL7_MINLEN_MASK 0x3 +#define DL7_MINLEN_MASK_SFT (0x3 << 20) +#define DL7_MAXLEN_SFT 16 +#define DL7_MAXLEN_MASK 0x3 +#define DL7_MAXLEN_MASK_SFT (0x3 << 16) +#define DL7_SEL_DOMAIN_SFT 13 +#define DL7_SEL_DOMAIN_MASK 0x7 +#define DL7_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL7_SEL_FS_SFT 8 +#define DL7_SEL_FS_MASK 0x1f +#define DL7_SEL_FS_MASK_SFT (0x1f << 8) +#define DL7_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL7_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL7_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL7_PBUF_SIZE_SFT 5 +#define DL7_PBUF_SIZE_MASK 0x3 +#define DL7_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL7_MONO_SFT 4 +#define DL7_MONO_MASK 0x1 +#define DL7_MONO_MASK_SFT (0x1 << 4) +#define DL7_NORMAL_MODE_SFT 3 +#define DL7_NORMAL_MODE_MASK 0x1 +#define DL7_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL7_HALIGN_SFT 2 +#define DL7_HALIGN_MASK 0x1 +#define DL7_HALIGN_MASK_SFT (0x1 << 2) +#define DL7_HD_MODE_SFT 0 +#define DL7_HD_MODE_MASK 0x3 +#define DL7_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL8_BASE_MSB */ +#define DL8_BASE__ADDR_MSB_SFT 0 +#define DL8_BASE__ADDR_MSB_MASK 0x1ff +#define DL8_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL8_BASE */ +#define DL8_BASE_ADDR_SFT 4 +#define DL8_BASE_ADDR_MASK 0xfffffff +#define DL8_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL8_CUR_MSB */ +#define DL8_CUR_PTR_MSB_SFT 0 +#define DL8_CUR_PTR_MSB_MASK 0x1ff +#define DL8_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL8_CUR */ +#define DL8_CUR_PTR_SFT 0 +#define DL8_CUR_PTR_MASK 0xffffffff +#define DL8_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL8_END_MSB */ +#define DL8_END_ADDR_MSB_SFT 0 +#define DL8_END_ADDR_MSB_MASK 0x1ff +#define DL8_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL8_END */ +#define DL8_END_ADDR_SFT 4 +#define DL8_END_ADDR_MASK 0xfffffff +#define DL8_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL8_RCH_MON */ +#define DL8_RCH_DATA_SFT 0 +#define DL8_RCH_DATA_MASK 0xffffffff +#define DL8_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL8_LCH_MON */ +#define DL8_LCH_DATA_SFT 0 +#define DL8_LCH_DATA_MASK 0xffffffff +#define DL8_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL8_CON0 */ +#define DL8_ON_SFT 28 +#define DL8_ON_MASK 0x1 +#define DL8_ON_MASK_SFT (0x1 << 28) +#define DL8_ONE_HEART_SEL_SFT 22 +#define DL8_ONE_HEART_SEL_MASK 0x3 +#define DL8_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL8_MINLEN_SFT 20 +#define DL8_MINLEN_MASK 0x3 +#define DL8_MINLEN_MASK_SFT (0x3 << 20) +#define DL8_MAXLEN_SFT 16 +#define DL8_MAXLEN_MASK 0x3 +#define DL8_MAXLEN_MASK_SFT (0x3 << 16) +#define DL8_SEL_DOMAIN_SFT 13 +#define DL8_SEL_DOMAIN_MASK 0x7 +#define DL8_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL8_SEL_FS_SFT 8 +#define DL8_SEL_FS_MASK 0x1f +#define DL8_SEL_FS_MASK_SFT (0x1f << 8) +#define DL8_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL8_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL8_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL8_PBUF_SIZE_SFT 5 +#define DL8_PBUF_SIZE_MASK 0x3 +#define DL8_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL8_MONO_SFT 4 +#define DL8_MONO_MASK 0x1 +#define DL8_MONO_MASK_SFT (0x1 << 4) +#define DL8_NORMAL_MODE_SFT 3 +#define DL8_NORMAL_MODE_MASK 0x1 +#define DL8_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL8_HALIGN_SFT 2 +#define DL8_HALIGN_MASK 0x1 +#define DL8_HALIGN_MASK_SFT (0x1 << 2) +#define DL8_HD_MODE_SFT 0 +#define DL8_HD_MODE_MASK 0x3 +#define DL8_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL_4CH_BASE_MSB */ +#define DL_4CH_BASE__ADDR_MSB_SFT 0 +#define DL_4CH_BASE__ADDR_MSB_MASK 0x1ff +#define DL_4CH_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_4CH_BASE */ +#define DL_4CH_BASE_ADDR_SFT 4 +#define DL_4CH_BASE_ADDR_MASK 0xfffffff +#define DL_4CH_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL_4CH_CUR_MSB */ +#define DL_4CH_CUR_PTR_MSB_SFT 0 +#define DL_4CH_CUR_PTR_MSB_MASK 0x1ff +#define DL_4CH_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_4CH_CUR */ +#define DL_4CH_CUR_PTR_SFT 0 +#define DL_4CH_CUR_PTR_MASK 0xffffffff +#define DL_4CH_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_4CH_END_MSB */ +#define DL_4CH_END_ADDR_MSB_SFT 0 +#define DL_4CH_END_ADDR_MSB_MASK 0x1ff +#define DL_4CH_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_4CH_END */ +#define DL_4CH_END_ADDR_SFT 4 +#define DL_4CH_END_ADDR_MASK 0xfffffff +#define DL_4CH_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL_4CH_CON0 */ +#define DL_4CH_ON_SFT 31 +#define DL_4CH_ON_MASK 0x1 +#define DL_4CH_ON_MASK_SFT (0x1 << 31) +#define DL_4CH_NUM_SFT 24 +#define DL_4CH_NUM_MASK 0x1f +#define DL_4CH_NUM_MASK_SFT (0x1f << 24) +#define DL_4CH_ONE_HEART_SEL_SFT 22 +#define DL_4CH_ONE_HEART_SEL_MASK 0x3 +#define DL_4CH_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL_4CH_MINLEN_SFT 20 +#define DL_4CH_MINLEN_MASK 0x3 +#define DL_4CH_MINLEN_MASK_SFT (0x3 << 20) +#define DL_4CH_MAXLEN_SFT 16 +#define DL_4CH_MAXLEN_MASK 0x3 +#define DL_4CH_MAXLEN_MASK_SFT (0x3 << 16) +#define DL_4CH_SEL_DOMAIN_SFT 13 +#define DL_4CH_SEL_DOMAIN_MASK 0x7 +#define DL_4CH_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL_4CH_SEL_FS_SFT 8 +#define DL_4CH_SEL_FS_MASK 0x1f +#define DL_4CH_SEL_FS_MASK_SFT (0x1f << 8) +#define DL_4CH_BUF_EMPTY_CLR_SFT 7 +#define DL_4CH_BUF_EMPTY_CLR_MASK 0x1 +#define DL_4CH_BUF_EMPTY_CLR_MASK_SFT (0x1 << 7) +#define DL_4CH_PBUF_SIZE_SFT 5 +#define DL_4CH_PBUF_SIZE_MASK 0x3 +#define DL_4CH_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL_4CH_HANG_CLR_SFT 4 +#define DL_4CH_HANG_CLR_MASK 0x1 +#define DL_4CH_HANG_CLR_MASK_SFT (0x1 << 4) +#define DL_4CH_NORMAL_MODE_SFT 3 +#define DL_4CH_NORMAL_MODE_MASK 0x1 +#define DL_4CH_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL_4CH_HALIGN_SFT 2 +#define DL_4CH_HALIGN_MASK 0x1 +#define DL_4CH_HALIGN_MASK_SFT (0x1 << 2) +#define DL_4CH_HD_MODE_SFT 0 +#define DL_4CH_HD_MODE_MASK 0x3 +#define DL_4CH_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL_24CH_BASE_MSB */ +#define DL_24CH_BASE__ADDR_MSB_SFT 0 +#define DL_24CH_BASE__ADDR_MSB_MASK 0x1ff +#define DL_24CH_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_24CH_BASE */ +#define DL_24CH_BASE_ADDR_SFT 4 +#define DL_24CH_BASE_ADDR_MASK 0xfffffff +#define DL_24CH_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL_24CH_CUR_MSB */ +#define DL_24CH_CUR_PTR_MSB_SFT 0 +#define DL_24CH_CUR_PTR_MSB_MASK 0x1ff +#define DL_24CH_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_24CH_CUR */ +#define DL_24CH_CUR_PTR_SFT 0 +#define DL_24CH_CUR_PTR_MASK 0xffffffff +#define DL_24CH_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_END_MSB */ +#define DL_24CH_END_ADDR_MSB_SFT 0 +#define DL_24CH_END_ADDR_MSB_MASK 0x1ff +#define DL_24CH_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL_24CH_END */ +#define DL_24CH_END_ADDR_SFT 4 +#define DL_24CH_END_ADDR_MASK 0xfffffff +#define DL_24CH_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL_24CH_CON0 */ +#define DL_24CH_ON_SFT 31 +#define DL_24CH_ON_MASK 0x1 +#define DL_24CH_ON_MASK_SFT (0x1 << 31) +#define DL_24CH_NUM_SFT 24 +#define DL_24CH_NUM_MASK 0x3f +#define DL_24CH_NUM_MASK_SFT (0x3f << 24) +#define DL_24CH_ONE_HEART_SEL_SFT 22 +#define DL_24CH_ONE_HEART_SEL_MASK 0x3 +#define DL_24CH_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL_24CH_MINLEN_SFT 20 +#define DL_24CH_MINLEN_MASK 0x3 +#define DL_24CH_MINLEN_MASK_SFT (0x3 << 20) +#define DL_24CH_MAXLEN_SFT 16 +#define DL_24CH_MAXLEN_MASK 0x3 +#define DL_24CH_MAXLEN_MASK_SFT (0x3 << 16) +#define DL_24CH_SEL_DOMAIN_SFT 13 +#define DL_24CH_SEL_DOMAIN_MASK 0x7 +#define DL_24CH_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL_24CH_SEL_FS_SFT 8 +#define DL_24CH_SEL_FS_MASK 0x1f +#define DL_24CH_SEL_FS_MASK_SFT (0x1f << 8) +#define DL_24CH_BUF_EMPTY_CLR_SFT 7 +#define DL_24CH_BUF_EMPTY_CLR_MASK 0x1 +#define DL_24CH_BUF_EMPTY_CLR_MASK_SFT (0x1 << 7) +#define DL_24CH_PBUF_SIZE_SFT 5 +#define DL_24CH_PBUF_SIZE_MASK 0x3 +#define DL_24CH_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL_24CH_HANG_CLR_SFT 4 +#define DL_24CH_HANG_CLR_MASK 0x1 +#define DL_24CH_HANG_CLR_MASK_SFT (0x1 << 4) +#define DL_24CH_NORMAL_MODE_SFT 3 +#define DL_24CH_NORMAL_MODE_MASK 0x1 +#define DL_24CH_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL_24CH_HALIGN_SFT 2 +#define DL_24CH_HALIGN_MASK 0x1 +#define DL_24CH_HALIGN_MASK_SFT (0x1 << 2) +#define DL_24CH_HD_MODE_SFT 0 +#define DL_24CH_HD_MODE_MASK 0x3 +#define DL_24CH_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL23_BASE_MSB */ +#define DL23_BASE__ADDR_MSB_SFT 0 +#define DL23_BASE__ADDR_MSB_MASK 0x1ff +#define DL23_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL23_BASE */ +#define DL23_BASE_ADDR_SFT 4 +#define DL23_BASE_ADDR_MASK 0xfffffff +#define DL23_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL23_CUR_MSB */ +#define DL23_CUR_PTR_MSB_SFT 0 +#define DL23_CUR_PTR_MSB_MASK 0x1ff +#define DL23_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL23_CUR */ +#define DL23_CUR_PTR_SFT 0 +#define DL23_CUR_PTR_MASK 0xffffffff +#define DL23_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL23_END_MSB */ +#define DL23_END_ADDR_MSB_SFT 0 +#define DL23_END_ADDR_MSB_MASK 0x1ff +#define DL23_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL23_END */ +#define DL23_END_ADDR_SFT 4 +#define DL23_END_ADDR_MASK 0xfffffff +#define DL23_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL23_RCH_MON */ +#define DL23_RCH_DATA_SFT 0 +#define DL23_RCH_DATA_MASK 0xffffffff +#define DL23_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL23_LCH_MON */ +#define DL23_LCH_DATA_SFT 0 +#define DL23_LCH_DATA_MASK 0xffffffff +#define DL23_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL23_CON0 */ +#define DL23_ON_SFT 28 +#define DL23_ON_MASK 0x1 +#define DL23_ON_MASK_SFT (0x1 << 28) +#define DL23_ONE_HEART_SEL_SFT 22 +#define DL23_ONE_HEART_SEL_MASK 0x3 +#define DL23_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL23_MINLEN_SFT 20 +#define DL23_MINLEN_MASK 0x3 +#define DL23_MINLEN_MASK_SFT (0x3 << 20) +#define DL23_MAXLEN_SFT 16 +#define DL23_MAXLEN_MASK 0x3 +#define DL23_MAXLEN_MASK_SFT (0x3 << 16) +#define DL23_SEL_DOMAIN_SFT 13 +#define DL23_SEL_DOMAIN_MASK 0x7 +#define DL23_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL23_SEL_FS_SFT 8 +#define DL23_SEL_FS_MASK 0x1f +#define DL23_SEL_FS_MASK_SFT (0x1f << 8) +#define DL23_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL23_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL23_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL23_PBUF_SIZE_SFT 5 +#define DL23_PBUF_SIZE_MASK 0x3 +#define DL23_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL23_MONO_SFT 4 +#define DL23_MONO_MASK 0x1 +#define DL23_MONO_MASK_SFT (0x1 << 4) +#define DL23_NORMAL_MODE_SFT 3 +#define DL23_NORMAL_MODE_MASK 0x1 +#define DL23_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL23_HALIGN_SFT 2 +#define DL23_HALIGN_MASK 0x1 +#define DL23_HALIGN_MASK_SFT (0x1 << 2) +#define DL23_HD_MODE_SFT 0 +#define DL23_HD_MODE_MASK 0x3 +#define DL23_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL24_BASE_MSB */ +#define DL24_BASE__ADDR_MSB_SFT 0 +#define DL24_BASE__ADDR_MSB_MASK 0x1ff +#define DL24_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL24_BASE */ +#define DL24_BASE_ADDR_SFT 4 +#define DL24_BASE_ADDR_MASK 0xfffffff +#define DL24_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL24_CUR_MSB */ +#define DL24_CUR_PTR_MSB_SFT 0 +#define DL24_CUR_PTR_MSB_MASK 0x1ff +#define DL24_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL24_CUR */ +#define DL24_CUR_PTR_SFT 0 +#define DL24_CUR_PTR_MASK 0xffffffff +#define DL24_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL24_END_MSB */ +#define DL24_END_ADDR_MSB_SFT 0 +#define DL24_END_ADDR_MSB_MASK 0x1ff +#define DL24_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL24_END */ +#define DL24_END_ADDR_SFT 4 +#define DL24_END_ADDR_MASK 0xfffffff +#define DL24_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL24_RCH_MON */ +#define DL24_RCH_DATA_SFT 0 +#define DL24_RCH_DATA_MASK 0xffffffff +#define DL24_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL24_LCH_MON */ +#define DL24_LCH_DATA_SFT 0 +#define DL24_LCH_DATA_MASK 0xffffffff +#define DL24_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL24_CON0 */ +#define DL24_ON_SFT 28 +#define DL24_ON_MASK 0x1 +#define DL24_ON_MASK_SFT (0x1 << 28) +#define DL24_ONE_HEART_SEL_SFT 22 +#define DL24_ONE_HEART_SEL_MASK 0x3 +#define DL24_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL24_MINLEN_SFT 20 +#define DL24_MINLEN_MASK 0x3 +#define DL24_MINLEN_MASK_SFT (0x3 << 20) +#define DL24_MAXLEN_SFT 16 +#define DL24_MAXLEN_MASK 0x3 +#define DL24_MAXLEN_MASK_SFT (0x3 << 16) +#define DL24_SEL_DOMAIN_SFT 13 +#define DL24_SEL_DOMAIN_MASK 0x7 +#define DL24_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL24_SEL_FS_SFT 8 +#define DL24_SEL_FS_MASK 0x1f +#define DL24_SEL_FS_MASK_SFT (0x1f << 8) +#define DL24_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL24_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL24_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL24_PBUF_SIZE_SFT 5 +#define DL24_PBUF_SIZE_MASK 0x3 +#define DL24_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL24_MONO_SFT 4 +#define DL24_MONO_MASK 0x1 +#define DL24_MONO_MASK_SFT (0x1 << 4) +#define DL24_NORMAL_MODE_SFT 3 +#define DL24_NORMAL_MODE_MASK 0x1 +#define DL24_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL24_HALIGN_SFT 2 +#define DL24_HALIGN_MASK 0x1 +#define DL24_HALIGN_MASK_SFT (0x1 << 2) +#define DL24_HD_MODE_SFT 0 +#define DL24_HD_MODE_MASK 0x3 +#define DL24_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL25_BASE_MSB */ +#define DL25_BASE__ADDR_MSB_SFT 0 +#define DL25_BASE__ADDR_MSB_MASK 0x1ff +#define DL25_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL25_BASE */ +#define DL25_BASE_ADDR_SFT 4 +#define DL25_BASE_ADDR_MASK 0xfffffff +#define DL25_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL25_CUR_MSB */ +#define DL25_CUR_PTR_MSB_SFT 0 +#define DL25_CUR_PTR_MSB_MASK 0x1ff +#define DL25_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL25_CUR */ +#define DL25_CUR_PTR_SFT 0 +#define DL25_CUR_PTR_MASK 0xffffffff +#define DL25_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL25_END_MSB */ +#define DL25_END_ADDR_MSB_SFT 0 +#define DL25_END_ADDR_MSB_MASK 0x1ff +#define DL25_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL25_END */ +#define DL25_END_ADDR_SFT 4 +#define DL25_END_ADDR_MASK 0xfffffff +#define DL25_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL25_RCH_MON */ +#define DL25_RCH_DATA_SFT 0 +#define DL25_RCH_DATA_MASK 0xffffffff +#define DL25_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL25_LCH_MON */ +#define DL25_LCH_DATA_SFT 0 +#define DL25_LCH_DATA_MASK 0xffffffff +#define DL25_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL25_CON0 */ +#define DL25_ON_SFT 28 +#define DL25_ON_MASK 0x1 +#define DL25_ON_MASK_SFT (0x1 << 28) +#define DL25_ONE_HEART_SEL_SFT 22 +#define DL25_ONE_HEART_SEL_MASK 0x3 +#define DL25_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL25_MINLEN_SFT 20 +#define DL25_MINLEN_MASK 0x3 +#define DL25_MINLEN_MASK_SFT (0x3 << 20) +#define DL25_MAXLEN_SFT 16 +#define DL25_MAXLEN_MASK 0x3 +#define DL25_MAXLEN_MASK_SFT (0x3 << 16) +#define DL25_SEL_DOMAIN_SFT 13 +#define DL25_SEL_DOMAIN_MASK 0x7 +#define DL25_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL25_SEL_FS_SFT 8 +#define DL25_SEL_FS_MASK 0x1f +#define DL25_SEL_FS_MASK_SFT (0x1f << 8) +#define DL25_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL25_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL25_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL25_PBUF_SIZE_SFT 5 +#define DL25_PBUF_SIZE_MASK 0x3 +#define DL25_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL25_MONO_SFT 4 +#define DL25_MONO_MASK 0x1 +#define DL25_MONO_MASK_SFT (0x1 << 4) +#define DL25_NORMAL_MODE_SFT 3 +#define DL25_NORMAL_MODE_MASK 0x1 +#define DL25_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL25_HALIGN_SFT 2 +#define DL25_HALIGN_MASK 0x1 +#define DL25_HALIGN_MASK_SFT (0x1 << 2) +#define DL25_HD_MODE_SFT 0 +#define DL25_HD_MODE_MASK 0x3 +#define DL25_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_DL26_BASE_MSB */ +#define DL26_BASE__ADDR_MSB_SFT 0 +#define DL26_BASE__ADDR_MSB_MASK 0x1ff +#define DL26_BASE__ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL26_BASE */ +#define DL26_BASE_ADDR_SFT 4 +#define DL26_BASE_ADDR_MASK 0xfffffff +#define DL26_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL26_CUR_MSB */ +#define DL26_CUR_PTR_MSB_SFT 0 +#define DL26_CUR_PTR_MSB_MASK 0x1ff +#define DL26_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL26_CUR */ +#define DL26_CUR_PTR_SFT 0 +#define DL26_CUR_PTR_MASK 0xffffffff +#define DL26_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_DL26_END_MSB */ +#define DL26_END_ADDR_MSB_SFT 0 +#define DL26_END_ADDR_MSB_MASK 0x1ff +#define DL26_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_DL26_END */ +#define DL26_END_ADDR_SFT 4 +#define DL26_END_ADDR_MASK 0xfffffff +#define DL26_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_DL26_RCH_MON */ +#define DL26_RCH_DATA_SFT 0 +#define DL26_RCH_DATA_MASK 0xffffffff +#define DL26_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL26_LCH_MON */ +#define DL26_LCH_DATA_SFT 0 +#define DL26_LCH_DATA_MASK 0xffffffff +#define DL26_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL26_CON0 */ +#define DL26_ON_SFT 28 +#define DL26_ON_MASK 0x1 +#define DL26_ON_MASK_SFT (0x1 << 28) +#define DL26_ONE_HEART_SEL_SFT 22 +#define DL26_ONE_HEART_SEL_MASK 0x3 +#define DL26_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define DL26_MINLEN_SFT 20 +#define DL26_MINLEN_MASK 0x3 +#define DL26_MINLEN_MASK_SFT (0x3 << 20) +#define DL26_MAXLEN_SFT 16 +#define DL26_MAXLEN_MASK 0x3 +#define DL26_MAXLEN_MASK_SFT (0x3 << 16) +#define DL26_SEL_DOMAIN_SFT 13 +#define DL26_SEL_DOMAIN_MASK 0x7 +#define DL26_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define DL26_SEL_FS_SFT 8 +#define DL26_SEL_FS_MASK 0x1f +#define DL26_SEL_FS_MASK_SFT (0x1f << 8) +#define DL26_SW_CLEAR_BUF_EMPTY_SFT 7 +#define DL26_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define DL26_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define DL26_PBUF_SIZE_SFT 5 +#define DL26_PBUF_SIZE_MASK 0x3 +#define DL26_PBUF_SIZE_MASK_SFT (0x3 << 5) +#define DL26_MONO_SFT 4 +#define DL26_MONO_MASK 0x1 +#define DL26_MONO_MASK_SFT (0x1 << 4) +#define DL26_NORMAL_MODE_SFT 3 +#define DL26_NORMAL_MODE_MASK 0x1 +#define DL26_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define DL26_HALIGN_SFT 2 +#define DL26_HALIGN_MASK 0x1 +#define DL26_HALIGN_MASK_SFT (0x1 << 2) +#define DL26_HD_MODE_SFT 0 +#define DL26_HD_MODE_MASK 0x3 +#define DL26_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL0_BASE_MSB */ +#define VUL0_BASE_ADDR_MSB_SFT 0 +#define VUL0_BASE_ADDR_MSB_MASK 0x1ff +#define VUL0_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL0_BASE */ +#define VUL0_BASE_ADDR_SFT 4 +#define VUL0_BASE_ADDR_MASK 0xfffffff +#define VUL0_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL0_CUR_MSB */ +#define VUL0_CUR_PTR_MSB_SFT 0 +#define VUL0_CUR_PTR_MSB_MASK 0x1ff +#define VUL0_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL0_CUR */ +#define VUL0_CUR_PTR_SFT 0 +#define VUL0_CUR_PTR_MASK 0xffffffff +#define VUL0_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL0_END_MSB */ +#define VUL0_END_ADDR_MSB_SFT 0 +#define VUL0_END_ADDR_MSB_MASK 0x1ff +#define VUL0_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL0_END */ +#define VUL0_END_ADDR_SFT 4 +#define VUL0_END_ADDR_MASK 0xfffffff +#define VUL0_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL0_RCH_MON */ +#define VUL0_RCH_DATA_SFT 0 +#define VUL0_RCH_DATA_MASK 0xffffffff +#define VUL0_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL0_LCH_MON */ +#define VUL0_LCH_DATA_SFT 0 +#define VUL0_LCH_DATA_MASK 0xffffffff +#define VUL0_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL0_CON0 */ +#define VUL0_ON_SFT 28 +#define VUL0_ON_MASK 0x1 +#define VUL0_ON_MASK_SFT (0x1 << 28) +#define VUL0_MINLEN_SFT 20 +#define VUL0_MINLEN_MASK 0x3 +#define VUL0_MINLEN_MASK_SFT (0x3 << 20) +#define VUL0_MAXLEN_SFT 16 +#define VUL0_MAXLEN_MASK 0x3 +#define VUL0_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL0_SEL_DOMAIN_SFT 13 +#define VUL0_SEL_DOMAIN_MASK 0x7 +#define VUL0_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL0_SEL_FS_SFT 8 +#define VUL0_SEL_FS_MASK 0x1f +#define VUL0_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL0_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL0_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL0_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL0_WR_SIGN_SFT 6 +#define VUL0_WR_SIGN_MASK 0x1 +#define VUL0_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL0_R_MONO_SFT 5 +#define VUL0_R_MONO_MASK 0x1 +#define VUL0_R_MONO_MASK_SFT (0x1 << 5) +#define VUL0_MONO_SFT 4 +#define VUL0_MONO_MASK 0x1 +#define VUL0_MONO_MASK_SFT (0x1 << 4) +#define VUL0_NORMAL_MODE_SFT 3 +#define VUL0_NORMAL_MODE_MASK 0x1 +#define VUL0_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL0_HALIGN_SFT 2 +#define VUL0_HALIGN_MASK 0x1 +#define VUL0_HALIGN_MASK_SFT (0x1 << 2) +#define VUL0_HD_MODE_SFT 0 +#define VUL0_HD_MODE_MASK 0x3 +#define VUL0_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL1_BASE_MSB */ +#define VUL1_BASE_ADDR_MSB_SFT 0 +#define VUL1_BASE_ADDR_MSB_MASK 0x1ff +#define VUL1_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL1_BASE */ +#define VUL1_BASE_ADDR_SFT 4 +#define VUL1_BASE_ADDR_MASK 0xfffffff +#define VUL1_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL1_CUR_MSB */ +#define VUL1_CUR_PTR_MSB_SFT 0 +#define VUL1_CUR_PTR_MSB_MASK 0x1ff +#define VUL1_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL1_CUR */ +#define VUL1_CUR_PTR_SFT 0 +#define VUL1_CUR_PTR_MASK 0xffffffff +#define VUL1_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL1_END_MSB */ +#define VUL1_END_ADDR_MSB_SFT 0 +#define VUL1_END_ADDR_MSB_MASK 0x1ff +#define VUL1_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL1_END */ +#define VUL1_END_ADDR_SFT 4 +#define VUL1_END_ADDR_MASK 0xfffffff +#define VUL1_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL1_RCH_MON */ +#define VUL1_RCH_DATA_SFT 0 +#define VUL1_RCH_DATA_MASK 0xffffffff +#define VUL1_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL1_LCH_MON */ +#define VUL1_LCH_DATA_SFT 0 +#define VUL1_LCH_DATA_MASK 0xffffffff +#define VUL1_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL1_CON0 */ +#define VUL1_ON_SFT 28 +#define VUL1_ON_MASK 0x1 +#define VUL1_ON_MASK_SFT (0x1 << 28) +#define VUL1_MINLEN_SFT 20 +#define VUL1_MINLEN_MASK 0x3 +#define VUL1_MINLEN_MASK_SFT (0x3 << 20) +#define VUL1_MAXLEN_SFT 16 +#define VUL1_MAXLEN_MASK 0x3 +#define VUL1_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL1_SEL_DOMAIN_SFT 13 +#define VUL1_SEL_DOMAIN_MASK 0x7 +#define VUL1_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL1_SEL_FS_SFT 8 +#define VUL1_SEL_FS_MASK 0x1f +#define VUL1_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL1_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL1_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL1_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL1_WR_SIGN_SFT 6 +#define VUL1_WR_SIGN_MASK 0x1 +#define VUL1_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL1_R_MONO_SFT 5 +#define VUL1_R_MONO_MASK 0x1 +#define VUL1_R_MONO_MASK_SFT (0x1 << 5) +#define VUL1_MONO_SFT 4 +#define VUL1_MONO_MASK 0x1 +#define VUL1_MONO_MASK_SFT (0x1 << 4) +#define VUL1_NORMAL_MODE_SFT 3 +#define VUL1_NORMAL_MODE_MASK 0x1 +#define VUL1_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL1_HALIGN_SFT 2 +#define VUL1_HALIGN_MASK 0x1 +#define VUL1_HALIGN_MASK_SFT (0x1 << 2) +#define VUL1_HD_MODE_SFT 0 +#define VUL1_HD_MODE_MASK 0x3 +#define VUL1_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL2_BASE_MSB */ +#define VUL2_BASE_ADDR_MSB_SFT 0 +#define VUL2_BASE_ADDR_MSB_MASK 0x1ff +#define VUL2_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL2_BASE */ +#define VUL2_BASE_ADDR_SFT 4 +#define VUL2_BASE_ADDR_MASK 0xfffffff +#define VUL2_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL2_CUR_MSB */ +#define VUL2_CUR_PTR_MSB_SFT 0 +#define VUL2_CUR_PTR_MSB_MASK 0x1ff +#define VUL2_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL2_CUR */ +#define VUL2_CUR_PTR_SFT 0 +#define VUL2_CUR_PTR_MASK 0xffffffff +#define VUL2_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL2_END_MSB */ +#define VUL2_END_ADDR_MSB_SFT 0 +#define VUL2_END_ADDR_MSB_MASK 0x1ff +#define VUL2_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL2_END */ +#define VUL2_END_ADDR_SFT 4 +#define VUL2_END_ADDR_MASK 0xfffffff +#define VUL2_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL2_RCH_MON */ +#define VUL2_RCH_DATA_SFT 0 +#define VUL2_RCH_DATA_MASK 0xffffffff +#define VUL2_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL2_LCH_MON */ +#define VUL2_LCH_DATA_SFT 0 +#define VUL2_LCH_DATA_MASK 0xffffffff +#define VUL2_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL2_CON0 */ +#define VUL2_ON_SFT 28 +#define VUL2_ON_MASK 0x1 +#define VUL2_ON_MASK_SFT (0x1 << 28) +#define VUL2_MINLEN_SFT 20 +#define VUL2_MINLEN_MASK 0x3 +#define VUL2_MINLEN_MASK_SFT (0x3 << 20) +#define VUL2_MAXLEN_SFT 16 +#define VUL2_MAXLEN_MASK 0x3 +#define VUL2_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL2_SEL_DOMAIN_SFT 13 +#define VUL2_SEL_DOMAIN_MASK 0x7 +#define VUL2_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL2_SEL_FS_SFT 8 +#define VUL2_SEL_FS_MASK 0x1f +#define VUL2_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL2_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL2_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL2_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL2_WR_SIGN_SFT 6 +#define VUL2_WR_SIGN_MASK 0x1 +#define VUL2_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL2_R_MONO_SFT 5 +#define VUL2_R_MONO_MASK 0x1 +#define VUL2_R_MONO_MASK_SFT (0x1 << 5) +#define VUL2_MONO_SFT 4 +#define VUL2_MONO_MASK 0x1 +#define VUL2_MONO_MASK_SFT (0x1 << 4) +#define VUL2_NORMAL_MODE_SFT 3 +#define VUL2_NORMAL_MODE_MASK 0x1 +#define VUL2_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL2_HALIGN_SFT 2 +#define VUL2_HALIGN_MASK 0x1 +#define VUL2_HALIGN_MASK_SFT (0x1 << 2) +#define VUL2_HD_MODE_SFT 0 +#define VUL2_HD_MODE_MASK 0x3 +#define VUL2_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL3_BASE_MSB */ +#define VUL3_BASE_ADDR_MSB_SFT 0 +#define VUL3_BASE_ADDR_MSB_MASK 0x1ff +#define VUL3_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL3_BASE */ +#define VUL3_BASE_ADDR_SFT 4 +#define VUL3_BASE_ADDR_MASK 0xfffffff +#define VUL3_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL3_CUR_MSB */ +#define VUL3_CUR_PTR_MSB_SFT 0 +#define VUL3_CUR_PTR_MSB_MASK 0x1ff +#define VUL3_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL3_CUR */ +#define VUL3_CUR_PTR_SFT 0 +#define VUL3_CUR_PTR_MASK 0xffffffff +#define VUL3_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL3_END_MSB */ +#define VUL3_END_ADDR_MSB_SFT 0 +#define VUL3_END_ADDR_MSB_MASK 0x1ff +#define VUL3_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL3_END */ +#define VUL3_END_ADDR_SFT 4 +#define VUL3_END_ADDR_MASK 0xfffffff +#define VUL3_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL3_RCH_MON */ +#define VUL3_RCH_DATA_SFT 0 +#define VUL3_RCH_DATA_MASK 0xffffffff +#define VUL3_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL3_LCH_MON */ +#define VUL3_LCH_DATA_SFT 0 +#define VUL3_LCH_DATA_MASK 0xffffffff +#define VUL3_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL3_CON0 */ +#define VUL3_ON_SFT 28 +#define VUL3_ON_MASK 0x1 +#define VUL3_ON_MASK_SFT (0x1 << 28) +#define VUL3_MINLEN_SFT 20 +#define VUL3_MINLEN_MASK 0x3 +#define VUL3_MINLEN_MASK_SFT (0x3 << 20) +#define VUL3_MAXLEN_SFT 16 +#define VUL3_MAXLEN_MASK 0x3 +#define VUL3_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL3_SEL_DOMAIN_SFT 13 +#define VUL3_SEL_DOMAIN_MASK 0x7 +#define VUL3_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL3_SEL_FS_SFT 8 +#define VUL3_SEL_FS_MASK 0x1f +#define VUL3_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL3_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL3_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL3_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL3_WR_SIGN_SFT 6 +#define VUL3_WR_SIGN_MASK 0x1 +#define VUL3_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL3_R_MONO_SFT 5 +#define VUL3_R_MONO_MASK 0x1 +#define VUL3_R_MONO_MASK_SFT (0x1 << 5) +#define VUL3_MONO_SFT 4 +#define VUL3_MONO_MASK 0x1 +#define VUL3_MONO_MASK_SFT (0x1 << 4) +#define VUL3_NORMAL_MODE_SFT 3 +#define VUL3_NORMAL_MODE_MASK 0x1 +#define VUL3_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL3_HALIGN_SFT 2 +#define VUL3_HALIGN_MASK 0x1 +#define VUL3_HALIGN_MASK_SFT (0x1 << 2) +#define VUL3_HD_MODE_SFT 0 +#define VUL3_HD_MODE_MASK 0x3 +#define VUL3_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL4_BASE_MSB */ +#define VUL4_BASE_ADDR_MSB_SFT 0 +#define VUL4_BASE_ADDR_MSB_MASK 0x1ff +#define VUL4_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL4_BASE */ +#define VUL4_BASE_ADDR_SFT 4 +#define VUL4_BASE_ADDR_MASK 0xfffffff +#define VUL4_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL4_CUR_MSB */ +#define VUL4_CUR_PTR_MSB_SFT 0 +#define VUL4_CUR_PTR_MSB_MASK 0x1ff +#define VUL4_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL4_CUR */ +#define VUL4_CUR_PTR_SFT 0 +#define VUL4_CUR_PTR_MASK 0xffffffff +#define VUL4_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL4_END_MSB */ +#define VUL4_END_ADDR_MSB_SFT 0 +#define VUL4_END_ADDR_MSB_MASK 0x1ff +#define VUL4_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL4_END */ +#define VUL4_END_ADDR_SFT 4 +#define VUL4_END_ADDR_MASK 0xfffffff +#define VUL4_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL4_RCH_MON */ +#define VUL4_RCH_DATA_SFT 0 +#define VUL4_RCH_DATA_MASK 0xffffffff +#define VUL4_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL4_LCH_MON */ +#define VUL4_LCH_DATA_SFT 0 +#define VUL4_LCH_DATA_MASK 0xffffffff +#define VUL4_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL4_CON0 */ +#define VUL4_ON_SFT 28 +#define VUL4_ON_MASK 0x1 +#define VUL4_ON_MASK_SFT (0x1 << 28) +#define VUL4_MINLEN_SFT 20 +#define VUL4_MINLEN_MASK 0x3 +#define VUL4_MINLEN_MASK_SFT (0x3 << 20) +#define VUL4_MAXLEN_SFT 16 +#define VUL4_MAXLEN_MASK 0x3 +#define VUL4_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL4_SEL_DOMAIN_SFT 13 +#define VUL4_SEL_DOMAIN_MASK 0x7 +#define VUL4_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL4_SEL_FS_SFT 8 +#define VUL4_SEL_FS_MASK 0x1f +#define VUL4_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL4_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL4_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL4_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL4_WR_SIGN_SFT 6 +#define VUL4_WR_SIGN_MASK 0x1 +#define VUL4_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL4_R_MONO_SFT 5 +#define VUL4_R_MONO_MASK 0x1 +#define VUL4_R_MONO_MASK_SFT (0x1 << 5) +#define VUL4_MONO_SFT 4 +#define VUL4_MONO_MASK 0x1 +#define VUL4_MONO_MASK_SFT (0x1 << 4) +#define VUL4_NORMAL_MODE_SFT 3 +#define VUL4_NORMAL_MODE_MASK 0x1 +#define VUL4_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL4_HALIGN_SFT 2 +#define VUL4_HALIGN_MASK 0x1 +#define VUL4_HALIGN_MASK_SFT (0x1 << 2) +#define VUL4_HD_MODE_SFT 0 +#define VUL4_HD_MODE_MASK 0x3 +#define VUL4_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL5_BASE_MSB */ +#define VUL5_BASE_ADDR_MSB_SFT 0 +#define VUL5_BASE_ADDR_MSB_MASK 0x1ff +#define VUL5_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL5_BASE */ +#define VUL5_BASE_ADDR_SFT 4 +#define VUL5_BASE_ADDR_MASK 0xfffffff +#define VUL5_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL5_CUR_MSB */ +#define VUL5_CUR_PTR_MSB_SFT 0 +#define VUL5_CUR_PTR_MSB_MASK 0x1ff +#define VUL5_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL5_CUR */ +#define VUL5_CUR_PTR_SFT 0 +#define VUL5_CUR_PTR_MASK 0xffffffff +#define VUL5_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL5_END_MSB */ +#define VUL5_END_ADDR_MSB_SFT 0 +#define VUL5_END_ADDR_MSB_MASK 0x1ff +#define VUL5_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL5_END */ +#define VUL5_END_ADDR_SFT 4 +#define VUL5_END_ADDR_MASK 0xfffffff +#define VUL5_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL5_RCH_MON */ +#define VUL5_RCH_DATA_SFT 0 +#define VUL5_RCH_DATA_MASK 0xffffffff +#define VUL5_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL5_LCH_MON */ +#define VUL5_LCH_DATA_SFT 0 +#define VUL5_LCH_DATA_MASK 0xffffffff +#define VUL5_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL5_CON0 */ +#define VUL5_ON_SFT 28 +#define VUL5_ON_MASK 0x1 +#define VUL5_ON_MASK_SFT (0x1 << 28) +#define VUL5_MINLEN_SFT 20 +#define VUL5_MINLEN_MASK 0x3 +#define VUL5_MINLEN_MASK_SFT (0x3 << 20) +#define VUL5_MAXLEN_SFT 16 +#define VUL5_MAXLEN_MASK 0x3 +#define VUL5_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL5_SEL_DOMAIN_SFT 13 +#define VUL5_SEL_DOMAIN_MASK 0x7 +#define VUL5_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL5_SEL_FS_SFT 8 +#define VUL5_SEL_FS_MASK 0x1f +#define VUL5_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL5_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL5_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL5_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL5_WR_SIGN_SFT 6 +#define VUL5_WR_SIGN_MASK 0x1 +#define VUL5_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL5_R_MONO_SFT 5 +#define VUL5_R_MONO_MASK 0x1 +#define VUL5_R_MONO_MASK_SFT (0x1 << 5) +#define VUL5_MONO_SFT 4 +#define VUL5_MONO_MASK 0x1 +#define VUL5_MONO_MASK_SFT (0x1 << 4) +#define VUL5_NORMAL_MODE_SFT 3 +#define VUL5_NORMAL_MODE_MASK 0x1 +#define VUL5_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL5_HALIGN_SFT 2 +#define VUL5_HALIGN_MASK 0x1 +#define VUL5_HALIGN_MASK_SFT (0x1 << 2) +#define VUL5_HD_MODE_SFT 0 +#define VUL5_HD_MODE_MASK 0x3 +#define VUL5_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL6_BASE_MSB */ +#define VUL6_BASE_ADDR_MSB_SFT 0 +#define VUL6_BASE_ADDR_MSB_MASK 0x1ff +#define VUL6_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL6_BASE */ +#define VUL6_BASE_ADDR_SFT 4 +#define VUL6_BASE_ADDR_MASK 0xfffffff +#define VUL6_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL6_CUR_MSB */ +#define VUL6_CUR_PTR_MSB_SFT 0 +#define VUL6_CUR_PTR_MSB_MASK 0x1ff +#define VUL6_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL6_CUR */ +#define VUL6_CUR_PTR_SFT 0 +#define VUL6_CUR_PTR_MASK 0xffffffff +#define VUL6_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL6_END_MSB */ +#define VUL6_END_ADDR_MSB_SFT 0 +#define VUL6_END_ADDR_MSB_MASK 0x1ff +#define VUL6_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL6_END */ +#define VUL6_END_ADDR_SFT 4 +#define VUL6_END_ADDR_MASK 0xfffffff +#define VUL6_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL6_RCH_MON */ +#define VUL6_RCH_DATA_SFT 0 +#define VUL6_RCH_DATA_MASK 0xffffffff +#define VUL6_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL6_LCH_MON */ +#define VUL6_LCH_DATA_SFT 0 +#define VUL6_LCH_DATA_MASK 0xffffffff +#define VUL6_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL6_CON0 */ +#define VUL6_ON_SFT 28 +#define VUL6_ON_MASK 0x1 +#define VUL6_ON_MASK_SFT (0x1 << 28) +#define VUL6_MINLEN_SFT 20 +#define VUL6_MINLEN_MASK 0x3 +#define VUL6_MINLEN_MASK_SFT (0x3 << 20) +#define VUL6_MAXLEN_SFT 16 +#define VUL6_MAXLEN_MASK 0x3 +#define VUL6_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL6_SEL_DOMAIN_SFT 13 +#define VUL6_SEL_DOMAIN_MASK 0x7 +#define VUL6_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL6_SEL_FS_SFT 8 +#define VUL6_SEL_FS_MASK 0x1f +#define VUL6_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL6_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL6_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL6_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL6_WR_SIGN_SFT 6 +#define VUL6_WR_SIGN_MASK 0x1 +#define VUL6_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL6_R_MONO_SFT 5 +#define VUL6_R_MONO_MASK 0x1 +#define VUL6_R_MONO_MASK_SFT (0x1 << 5) +#define VUL6_MONO_SFT 4 +#define VUL6_MONO_MASK 0x1 +#define VUL6_MONO_MASK_SFT (0x1 << 4) +#define VUL6_NORMAL_MODE_SFT 3 +#define VUL6_NORMAL_MODE_MASK 0x1 +#define VUL6_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL6_HALIGN_SFT 2 +#define VUL6_HALIGN_MASK 0x1 +#define VUL6_HALIGN_MASK_SFT (0x1 << 2) +#define VUL6_HD_MODE_SFT 0 +#define VUL6_HD_MODE_MASK 0x3 +#define VUL6_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL7_BASE_MSB */ +#define VUL7_BASE_ADDR_MSB_SFT 0 +#define VUL7_BASE_ADDR_MSB_MASK 0x1ff +#define VUL7_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL7_BASE */ +#define VUL7_BASE_ADDR_SFT 4 +#define VUL7_BASE_ADDR_MASK 0xfffffff +#define VUL7_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL7_CUR_MSB */ +#define VUL7_CUR_PTR_MSB_SFT 0 +#define VUL7_CUR_PTR_MSB_MASK 0x1ff +#define VUL7_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL7_CUR */ +#define VUL7_CUR_PTR_SFT 0 +#define VUL7_CUR_PTR_MASK 0xffffffff +#define VUL7_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL7_END_MSB */ +#define VUL7_END_ADDR_MSB_SFT 0 +#define VUL7_END_ADDR_MSB_MASK 0x1ff +#define VUL7_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL7_END */ +#define VUL7_END_ADDR_SFT 4 +#define VUL7_END_ADDR_MASK 0xfffffff +#define VUL7_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL7_RCH_MON */ +#define VUL7_RCH_DATA_SFT 0 +#define VUL7_RCH_DATA_MASK 0xffffffff +#define VUL7_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL7_LCH_MON */ +#define VUL7_LCH_DATA_SFT 0 +#define VUL7_LCH_DATA_MASK 0xffffffff +#define VUL7_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL7_CON0 */ +#define VUL7_ON_SFT 28 +#define VUL7_ON_MASK 0x1 +#define VUL7_ON_MASK_SFT (0x1 << 28) +#define VUL7_MINLEN_SFT 20 +#define VUL7_MINLEN_MASK 0x3 +#define VUL7_MINLEN_MASK_SFT (0x3 << 20) +#define VUL7_MAXLEN_SFT 16 +#define VUL7_MAXLEN_MASK 0x3 +#define VUL7_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL7_SEL_DOMAIN_SFT 13 +#define VUL7_SEL_DOMAIN_MASK 0x7 +#define VUL7_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL7_SEL_FS_SFT 8 +#define VUL7_SEL_FS_MASK 0x1f +#define VUL7_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL7_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL7_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL7_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL7_WR_SIGN_SFT 6 +#define VUL7_WR_SIGN_MASK 0x1 +#define VUL7_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL7_R_MONO_SFT 5 +#define VUL7_R_MONO_MASK 0x1 +#define VUL7_R_MONO_MASK_SFT (0x1 << 5) +#define VUL7_MONO_SFT 4 +#define VUL7_MONO_MASK 0x1 +#define VUL7_MONO_MASK_SFT (0x1 << 4) +#define VUL7_NORMAL_MODE_SFT 3 +#define VUL7_NORMAL_MODE_MASK 0x1 +#define VUL7_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL7_HALIGN_SFT 2 +#define VUL7_HALIGN_MASK 0x1 +#define VUL7_HALIGN_MASK_SFT (0x1 << 2) +#define VUL7_HD_MODE_SFT 0 +#define VUL7_HD_MODE_MASK 0x3 +#define VUL7_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL8_BASE_MSB */ +#define VUL8_BASE_ADDR_MSB_SFT 0 +#define VUL8_BASE_ADDR_MSB_MASK 0x1ff +#define VUL8_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL8_BASE */ +#define VUL8_BASE_ADDR_SFT 4 +#define VUL8_BASE_ADDR_MASK 0xfffffff +#define VUL8_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL8_CUR_MSB */ +#define VUL8_CUR_PTR_MSB_SFT 0 +#define VUL8_CUR_PTR_MSB_MASK 0x1ff +#define VUL8_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL8_CUR */ +#define VUL8_CUR_PTR_SFT 0 +#define VUL8_CUR_PTR_MASK 0xffffffff +#define VUL8_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL8_END_MSB */ +#define VUL8_END_ADDR_MSB_SFT 0 +#define VUL8_END_ADDR_MSB_MASK 0x1ff +#define VUL8_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL8_END */ +#define VUL8_END_ADDR_SFT 4 +#define VUL8_END_ADDR_MASK 0xfffffff +#define VUL8_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL8_RCH_MON */ +#define VUL8_RCH_DATA_SFT 0 +#define VUL8_RCH_DATA_MASK 0xffffffff +#define VUL8_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL8_LCH_MON */ +#define VUL8_LCH_DATA_SFT 0 +#define VUL8_LCH_DATA_MASK 0xffffffff +#define VUL8_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL8_CON0 */ +#define VUL8_ON_SFT 28 +#define VUL8_ON_MASK 0x1 +#define VUL8_ON_MASK_SFT (0x1 << 28) +#define VUL8_MINLEN_SFT 20 +#define VUL8_MINLEN_MASK 0x3 +#define VUL8_MINLEN_MASK_SFT (0x3 << 20) +#define VUL8_MAXLEN_SFT 16 +#define VUL8_MAXLEN_MASK 0x3 +#define VUL8_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL8_SEL_DOMAIN_SFT 13 +#define VUL8_SEL_DOMAIN_MASK 0x7 +#define VUL8_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL8_SEL_FS_SFT 8 +#define VUL8_SEL_FS_MASK 0x1f +#define VUL8_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL8_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL8_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL8_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL8_WR_SIGN_SFT 6 +#define VUL8_WR_SIGN_MASK 0x1 +#define VUL8_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL8_R_MONO_SFT 5 +#define VUL8_R_MONO_MASK 0x1 +#define VUL8_R_MONO_MASK_SFT (0x1 << 5) +#define VUL8_MONO_SFT 4 +#define VUL8_MONO_MASK 0x1 +#define VUL8_MONO_MASK_SFT (0x1 << 4) +#define VUL8_NORMAL_MODE_SFT 3 +#define VUL8_NORMAL_MODE_MASK 0x1 +#define VUL8_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL8_HALIGN_SFT 2 +#define VUL8_HALIGN_MASK 0x1 +#define VUL8_HALIGN_MASK_SFT (0x1 << 2) +#define VUL8_HD_MODE_SFT 0 +#define VUL8_HD_MODE_MASK 0x3 +#define VUL8_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL9_BASE_MSB */ +#define VUL9_BASE_ADDR_MSB_SFT 0 +#define VUL9_BASE_ADDR_MSB_MASK 0x1ff +#define VUL9_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL9_BASE */ +#define VUL9_BASE_ADDR_SFT 4 +#define VUL9_BASE_ADDR_MASK 0xfffffff +#define VUL9_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL9_CUR_MSB */ +#define VUL9_CUR_PTR_MSB_SFT 0 +#define VUL9_CUR_PTR_MSB_MASK 0x1ff +#define VUL9_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL9_CUR */ +#define VUL9_CUR_PTR_SFT 0 +#define VUL9_CUR_PTR_MASK 0xffffffff +#define VUL9_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL9_END_MSB */ +#define VUL9_END_ADDR_MSB_SFT 0 +#define VUL9_END_ADDR_MSB_MASK 0x1ff +#define VUL9_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL9_END */ +#define VUL9_END_ADDR_SFT 4 +#define VUL9_END_ADDR_MASK 0xfffffff +#define VUL9_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL9_RCH_MON */ +#define VUL9_RCH_DATA_SFT 0 +#define VUL9_RCH_DATA_MASK 0xffffffff +#define VUL9_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL9_LCH_MON */ +#define VUL9_LCH_DATA_SFT 0 +#define VUL9_LCH_DATA_MASK 0xffffffff +#define VUL9_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL9_CON0 */ +#define VUL9_ON_SFT 28 +#define VUL9_ON_MASK 0x1 +#define VUL9_ON_MASK_SFT (0x1 << 28) +#define VUL9_MINLEN_SFT 20 +#define VUL9_MINLEN_MASK 0x3 +#define VUL9_MINLEN_MASK_SFT (0x3 << 20) +#define VUL9_MAXLEN_SFT 16 +#define VUL9_MAXLEN_MASK 0x3 +#define VUL9_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL9_SEL_DOMAIN_SFT 13 +#define VUL9_SEL_DOMAIN_MASK 0x7 +#define VUL9_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL9_SEL_FS_SFT 8 +#define VUL9_SEL_FS_MASK 0x1f +#define VUL9_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL9_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL9_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL9_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL9_WR_SIGN_SFT 6 +#define VUL9_WR_SIGN_MASK 0x1 +#define VUL9_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL9_R_MONO_SFT 5 +#define VUL9_R_MONO_MASK 0x1 +#define VUL9_R_MONO_MASK_SFT (0x1 << 5) +#define VUL9_MONO_SFT 4 +#define VUL9_MONO_MASK 0x1 +#define VUL9_MONO_MASK_SFT (0x1 << 4) +#define VUL9_NORMAL_MODE_SFT 3 +#define VUL9_NORMAL_MODE_MASK 0x1 +#define VUL9_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL9_HALIGN_SFT 2 +#define VUL9_HALIGN_MASK 0x1 +#define VUL9_HALIGN_MASK_SFT (0x1 << 2) +#define VUL9_HD_MODE_SFT 0 +#define VUL9_HD_MODE_MASK 0x3 +#define VUL9_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL10_BASE_MSB */ +#define VUL10_BASE_ADDR_MSB_SFT 0 +#define VUL10_BASE_ADDR_MSB_MASK 0x1ff +#define VUL10_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL10_BASE */ +#define VUL10_BASE_ADDR_SFT 4 +#define VUL10_BASE_ADDR_MASK 0xfffffff +#define VUL10_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL10_CUR_MSB */ +#define VUL10_CUR_PTR_MSB_SFT 0 +#define VUL10_CUR_PTR_MSB_MASK 0x1ff +#define VUL10_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL10_CUR */ +#define VUL10_CUR_PTR_SFT 0 +#define VUL10_CUR_PTR_MASK 0xffffffff +#define VUL10_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL10_END_MSB */ +#define VUL10_END_ADDR_MSB_SFT 0 +#define VUL10_END_ADDR_MSB_MASK 0x1ff +#define VUL10_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL10_END */ +#define VUL10_END_ADDR_SFT 4 +#define VUL10_END_ADDR_MASK 0xfffffff +#define VUL10_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL10_RCH_MON */ +#define VUL10_RCH_DATA_SFT 0 +#define VUL10_RCH_DATA_MASK 0xffffffff +#define VUL10_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL10_LCH_MON */ +#define VUL10_LCH_DATA_SFT 0 +#define VUL10_LCH_DATA_MASK 0xffffffff +#define VUL10_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL10_CON0 */ +#define VUL10_ON_SFT 28 +#define VUL10_ON_MASK 0x1 +#define VUL10_ON_MASK_SFT (0x1 << 28) +#define VUL10_MINLEN_SFT 20 +#define VUL10_MINLEN_MASK 0x3 +#define VUL10_MINLEN_MASK_SFT (0x3 << 20) +#define VUL10_MAXLEN_SFT 16 +#define VUL10_MAXLEN_MASK 0x3 +#define VUL10_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL10_SEL_DOMAIN_SFT 13 +#define VUL10_SEL_DOMAIN_MASK 0x7 +#define VUL10_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL10_SEL_FS_SFT 8 +#define VUL10_SEL_FS_MASK 0x1f +#define VUL10_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL10_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL10_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL10_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL10_WR_SIGN_SFT 6 +#define VUL10_WR_SIGN_MASK 0x1 +#define VUL10_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL10_R_MONO_SFT 5 +#define VUL10_R_MONO_MASK 0x1 +#define VUL10_R_MONO_MASK_SFT (0x1 << 5) +#define VUL10_MONO_SFT 4 +#define VUL10_MONO_MASK 0x1 +#define VUL10_MONO_MASK_SFT (0x1 << 4) +#define VUL10_NORMAL_MODE_SFT 3 +#define VUL10_NORMAL_MODE_MASK 0x1 +#define VUL10_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL10_HALIGN_SFT 2 +#define VUL10_HALIGN_MASK 0x1 +#define VUL10_HALIGN_MASK_SFT (0x1 << 2) +#define VUL10_HD_MODE_SFT 0 +#define VUL10_HD_MODE_MASK 0x3 +#define VUL10_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL24_BASE_MSB */ +#define VUL24_BASE_ADDR_MSB_SFT 0 +#define VUL24_BASE_ADDR_MSB_MASK 0x1ff +#define VUL24_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL24_BASE */ +#define VUL24_BASE_ADDR_SFT 4 +#define VUL24_BASE_ADDR_MASK 0xfffffff +#define VUL24_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL24_CUR_MSB */ +#define VUL24_CUR_PTR_MSB_SFT 0 +#define VUL24_CUR_PTR_MSB_MASK 0x1ff +#define VUL24_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL24_CUR */ +#define VUL24_CUR_PTR_SFT 0 +#define VUL24_CUR_PTR_MASK 0xffffffff +#define VUL24_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL24_END_MSB */ +#define VUL24_END_ADDR_MSB_SFT 0 +#define VUL24_END_ADDR_MSB_MASK 0x1ff +#define VUL24_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL24_END */ +#define VUL24_END_ADDR_SFT 4 +#define VUL24_END_ADDR_MASK 0xfffffff +#define VUL24_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL24_CON0 */ +#define OUT_ON_USE_VUL24_SFT 29 +#define OUT_ON_USE_VUL24_MASK 0x1 +#define OUT_ON_USE_VUL24_MASK_SFT (0x1 << 29) +#define VUL24_ON_SFT 28 +#define VUL24_ON_MASK 0x1 +#define VUL24_ON_MASK_SFT (0x1 << 28) +#define VUL24_MINLEN_SFT 20 +#define VUL24_MINLEN_MASK 0x3 +#define VUL24_MINLEN_MASK_SFT (0x3 << 20) +#define VUL24_MAXLEN_SFT 16 +#define VUL24_MAXLEN_MASK 0x3 +#define VUL24_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL24_SEL_DOMAIN_SFT 13 +#define VUL24_SEL_DOMAIN_MASK 0x7 +#define VUL24_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL24_SEL_FS_SFT 8 +#define VUL24_SEL_FS_MASK 0x1f +#define VUL24_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL24_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL24_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL24_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL24_WR_SIGN_SFT 6 +#define VUL24_WR_SIGN_MASK 0x1 +#define VUL24_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL24_R_MONO_SFT 5 +#define VUL24_R_MONO_MASK 0x1 +#define VUL24_R_MONO_MASK_SFT (0x1 << 5) +#define VUL24_MONO_SFT 4 +#define VUL24_MONO_MASK 0x1 +#define VUL24_MONO_MASK_SFT (0x1 << 4) +#define VUL24_NORMAL_MODE_SFT 3 +#define VUL24_NORMAL_MODE_MASK 0x1 +#define VUL24_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL24_HALIGN_SFT 2 +#define VUL24_HALIGN_MASK 0x1 +#define VUL24_HALIGN_MASK_SFT (0x1 << 2) +#define VUL24_HD_MODE_SFT 0 +#define VUL24_HD_MODE_MASK 0x3 +#define VUL24_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL25_BASE_MSB */ +#define VUL25_BASE_ADDR_MSB_SFT 0 +#define VUL25_BASE_ADDR_MSB_MASK 0x1ff +#define VUL25_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL25_BASE */ +#define VUL25_BASE_ADDR_SFT 4 +#define VUL25_BASE_ADDR_MASK 0xfffffff +#define VUL25_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL25_CUR_MSB */ +#define VUL25_CUR_PTR_MSB_SFT 0 +#define VUL25_CUR_PTR_MSB_MASK 0x1ff +#define VUL25_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL25_CUR */ +#define VUL25_CUR_PTR_SFT 0 +#define VUL25_CUR_PTR_MASK 0xffffffff +#define VUL25_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL25_END_MSB */ +#define VUL25_END_ADDR_MSB_SFT 0 +#define VUL25_END_ADDR_MSB_MASK 0x1ff +#define VUL25_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL25_END */ +#define VUL25_END_ADDR_SFT 4 +#define VUL25_END_ADDR_MASK 0xfffffff +#define VUL25_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL25_CON0 */ +#define OUT_ON_USE_VUL25_SFT 29 +#define OUT_ON_USE_VUL25_MASK 0x1 +#define OUT_ON_USE_VUL25_MASK_SFT (0x1 << 29) +#define VUL25_ON_SFT 28 +#define VUL25_ON_MASK 0x1 +#define VUL25_ON_MASK_SFT (0x1 << 28) +#define VUL25_MINLEN_SFT 20 +#define VUL25_MINLEN_MASK 0x3 +#define VUL25_MINLEN_MASK_SFT (0x3 << 20) +#define VUL25_MAXLEN_SFT 16 +#define VUL25_MAXLEN_MASK 0x3 +#define VUL25_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL25_SEL_DOMAIN_SFT 13 +#define VUL25_SEL_DOMAIN_MASK 0x7 +#define VUL25_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL25_SEL_FS_SFT 8 +#define VUL25_SEL_FS_MASK 0x1f +#define VUL25_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL25_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL25_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL25_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL25_WR_SIGN_SFT 6 +#define VUL25_WR_SIGN_MASK 0x1 +#define VUL25_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL25_R_MONO_SFT 5 +#define VUL25_R_MONO_MASK 0x1 +#define VUL25_R_MONO_MASK_SFT (0x1 << 5) +#define VUL25_MONO_SFT 4 +#define VUL25_MONO_MASK 0x1 +#define VUL25_MONO_MASK_SFT (0x1 << 4) +#define VUL25_NORMAL_MODE_SFT 3 +#define VUL25_NORMAL_MODE_MASK 0x1 +#define VUL25_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL25_HALIGN_SFT 2 +#define VUL25_HALIGN_MASK 0x1 +#define VUL25_HALIGN_MASK_SFT (0x1 << 2) +#define VUL25_HD_MODE_SFT 0 +#define VUL25_HD_MODE_MASK 0x3 +#define VUL25_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL26_BASE_MSB */ +#define VUL26_BASE_ADDR_MSB_SFT 0 +#define VUL26_BASE_ADDR_MSB_MASK 0x1ff +#define VUL26_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL26_BASE */ +#define VUL26_BASE_ADDR_SFT 4 +#define VUL26_BASE_ADDR_MASK 0xfffffff +#define VUL26_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL26_CUR_MSB */ +#define VUL26_CUR_PTR_MSB_SFT 0 +#define VUL26_CUR_PTR_MSB_MASK 0x1ff +#define VUL26_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL26_CUR */ +#define VUL26_CUR_PTR_SFT 0 +#define VUL26_CUR_PTR_MASK 0xffffffff +#define VUL26_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL26_END_MSB */ +#define VUL26_END_ADDR_MSB_SFT 0 +#define VUL26_END_ADDR_MSB_MASK 0x1ff +#define VUL26_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL26_END */ +#define VUL26_END_ADDR_SFT 4 +#define VUL26_END_ADDR_MASK 0xfffffff +#define VUL26_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL26_CON0 */ +#define OUT_ON_USE_VUL26_SFT 29 +#define OUT_ON_USE_VUL26_MASK 0x1 +#define OUT_ON_USE_VUL26_MASK_SFT (0x1 << 29) +#define VUL26_ON_SFT 28 +#define VUL26_ON_MASK 0x1 +#define VUL26_ON_MASK_SFT (0x1 << 28) +#define VUL26_MINLEN_SFT 20 +#define VUL26_MINLEN_MASK 0x3 +#define VUL26_MINLEN_MASK_SFT (0x3 << 20) +#define VUL26_MAXLEN_SFT 16 +#define VUL26_MAXLEN_MASK 0x3 +#define VUL26_MAXLEN_MASK_SFT (0x3 << 16) +#define VUL26_SEL_DOMAIN_SFT 13 +#define VUL26_SEL_DOMAIN_MASK 0x7 +#define VUL26_SEL_DOMAIN_MASK_SFT (0x7 << 13) +#define VUL26_SEL_FS_SFT 8 +#define VUL26_SEL_FS_MASK 0x1f +#define VUL26_SEL_FS_MASK_SFT (0x1f << 8) +#define VUL26_SW_CLEAR_BUF_FULL_SFT 7 +#define VUL26_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL26_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 7) +#define VUL26_WR_SIGN_SFT 6 +#define VUL26_WR_SIGN_MASK 0x1 +#define VUL26_WR_SIGN_MASK_SFT (0x1 << 6) +#define VUL26_R_MONO_SFT 5 +#define VUL26_R_MONO_MASK 0x1 +#define VUL26_R_MONO_MASK_SFT (0x1 << 5) +#define VUL26_MONO_SFT 4 +#define VUL26_MONO_MASK 0x1 +#define VUL26_MONO_MASK_SFT (0x1 << 4) +#define VUL26_NORMAL_MODE_SFT 3 +#define VUL26_NORMAL_MODE_MASK 0x1 +#define VUL26_NORMAL_MODE_MASK_SFT (0x1 << 3) +#define VUL26_HALIGN_SFT 2 +#define VUL26_HALIGN_MASK 0x1 +#define VUL26_HALIGN_MASK_SFT (0x1 << 2) +#define VUL26_HD_MODE_SFT 0 +#define VUL26_HD_MODE_MASK 0x3 +#define VUL26_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL_CM0_BASE_MSB */ +#define VUL_CM0_BASE_ADDR_MSB_SFT 0 +#define VUL_CM0_BASE_ADDR_MSB_MASK 0x1ff +#define VUL_CM0_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM0_BASE */ +#define VUL_CM0_BASE_ADDR_SFT 4 +#define VUL_CM0_BASE_ADDR_MASK 0xfffffff +#define VUL_CM0_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM0_CUR_MSB */ +#define VUL_CM0_CUR_PTR_MSB_SFT 0 +#define VUL_CM0_CUR_PTR_MSB_MASK 0x1ff +#define VUL_CM0_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM0_CUR */ +#define VUL_CM0_CUR_PTR_SFT 0 +#define VUL_CM0_CUR_PTR_MASK 0xffffffff +#define VUL_CM0_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM0_END_MSB */ +#define VUL_CM0_END_ADDR_MSB_SFT 0 +#define VUL_CM0_END_ADDR_MSB_MASK 0x1ff +#define VUL_CM0_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM0_END */ +#define VUL_CM0_END_ADDR_SFT 4 +#define VUL_CM0_END_ADDR_MASK 0xfffffff +#define VUL_CM0_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM0_CON0 */ +#define VUL_CM0_ON_SFT 28 +#define VUL_CM0_ON_MASK 0x1 +#define VUL_CM0_ON_MASK_SFT (0x1 << 28) +#define VUL_CM0_REG_CH_SHIFT_MODE_SFT 26 +#define VUL_CM0_REG_CH_SHIFT_MODE_MASK 0x1 +#define VUL_CM0_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define VUL_CM0_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define VUL_CM0_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define VUL_CM0_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define VUL_CM0_SW_CLEAR_BUF_FULL_SFT 24 +#define VUL_CM0_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL_CM0_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define VUL_CM0_ULTRA_TH_SFT 20 +#define VUL_CM0_ULTRA_TH_MASK 0xf +#define VUL_CM0_ULTRA_TH_MASK_SFT (0xf << 20) +#define VUL_CM0_NORMAL_MODE_SFT 17 +#define VUL_CM0_NORMAL_MODE_MASK 0x1 +#define VUL_CM0_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define VUL_CM0_ODD_USE_EVEN_SFT 16 +#define VUL_CM0_ODD_USE_EVEN_MASK 0x1 +#define VUL_CM0_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define VUL_CM0_AXI_REQ_MAXLEN_SFT 12 +#define VUL_CM0_AXI_REQ_MAXLEN_MASK 0x3 +#define VUL_CM0_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define VUL_CM0_AXI_REQ_MINLEN_SFT 8 +#define VUL_CM0_AXI_REQ_MINLEN_MASK 0x3 +#define VUL_CM0_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define VUL_CM0_HALIGN_SFT 7 +#define VUL_CM0_HALIGN_MASK 0x1 +#define VUL_CM0_HALIGN_MASK_SFT (0x1 << 7) +#define VUL_CM0_SIGN_EXT_SFT 6 +#define VUL_CM0_SIGN_EXT_MASK 0x1 +#define VUL_CM0_SIGN_EXT_MASK_SFT (0x1 << 6) +#define VUL_CM0_HD_MODE_SFT 4 +#define VUL_CM0_HD_MODE_MASK 0x3 +#define VUL_CM0_HD_MODE_MASK_SFT (0x3 << 4) +#define VUL_CM0_MAKE_EXTRA_UPDATE_SFT 3 +#define VUL_CM0_MAKE_EXTRA_UPDATE_MASK 0x1 +#define VUL_CM0_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define VUL_CM0_AGENT_FREE_RUN_SFT 2 +#define VUL_CM0_AGENT_FREE_RUN_MASK 0x1 +#define VUL_CM0_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define VUL_CM0_USE_INT_ODD_SFT 1 +#define VUL_CM0_USE_INT_ODD_MASK 0x1 +#define VUL_CM0_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define VUL_CM0_INT_ODD_FLAG_SFT 0 +#define VUL_CM0_INT_ODD_FLAG_MASK 0x1 +#define VUL_CM0_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_VUL_CM1_BASE_MSB */ +#define VUL_CM1_BASE_ADDR_MSB_SFT 0 +#define VUL_CM1_BASE_ADDR_MSB_MASK 0x1ff +#define VUL_CM1_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM1_BASE */ +#define VUL_CM1_BASE_ADDR_SFT 4 +#define VUL_CM1_BASE_ADDR_MASK 0xfffffff +#define VUL_CM1_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM1_CUR_MSB */ +#define VUL_CM1_CUR_PTR_MSB_SFT 0 +#define VUL_CM1_CUR_PTR_MSB_MASK 0x1ff +#define VUL_CM1_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM1_CUR */ +#define VUL_CM1_CUR_PTR_SFT 0 +#define VUL_CM1_CUR_PTR_MASK 0xffffffff +#define VUL_CM1_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM1_END_MSB */ +#define VUL_CM1_END_ADDR_MSB_SFT 0 +#define VUL_CM1_END_ADDR_MSB_MASK 0x1ff +#define VUL_CM1_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM1_END */ +#define VUL_CM1_END_ADDR_SFT 4 +#define VUL_CM1_END_ADDR_MASK 0xfffffff +#define VUL_CM1_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM1_CON0 */ +#define VUL_CM1_ON_SFT 28 +#define VUL_CM1_ON_MASK 0x1 +#define VUL_CM1_ON_MASK_SFT (0x1 << 28) +#define VUL_CM1_REG_CH_SHIFT_MODE_SFT 26 +#define VUL_CM1_REG_CH_SHIFT_MODE_MASK 0x1 +#define VUL_CM1_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define VUL_CM1_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define VUL_CM1_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define VUL_CM1_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define VUL_CM1_SW_CLEAR_BUF_FULL_SFT 24 +#define VUL_CM1_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL_CM1_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define VUL_CM1_ULTRA_TH_SFT 20 +#define VUL_CM1_ULTRA_TH_MASK 0xf +#define VUL_CM1_ULTRA_TH_MASK_SFT (0xf << 20) +#define VUL_CM1_NORMAL_MODE_SFT 17 +#define VUL_CM1_NORMAL_MODE_MASK 0x1 +#define VUL_CM1_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define VUL_CM1_ODD_USE_EVEN_SFT 16 +#define VUL_CM1_ODD_USE_EVEN_MASK 0x1 +#define VUL_CM1_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define VUL_CM1_AXI_REQ_MAXLEN_SFT 12 +#define VUL_CM1_AXI_REQ_MAXLEN_MASK 0x3 +#define VUL_CM1_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define VUL_CM1_AXI_REQ_MINLEN_SFT 8 +#define VUL_CM1_AXI_REQ_MINLEN_MASK 0x3 +#define VUL_CM1_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define VUL_CM1_HALIGN_SFT 7 +#define VUL_CM1_HALIGN_MASK 0x1 +#define VUL_CM1_HALIGN_MASK_SFT (0x1 << 7) +#define VUL_CM1_SIGN_EXT_SFT 6 +#define VUL_CM1_SIGN_EXT_MASK 0x1 +#define VUL_CM1_SIGN_EXT_MASK_SFT (0x1 << 6) +#define VUL_CM1_HD_MODE_SFT 4 +#define VUL_CM1_HD_MODE_MASK 0x3 +#define VUL_CM1_HD_MODE_MASK_SFT (0x3 << 4) +#define VUL_CM1_MAKE_EXTRA_UPDATE_SFT 3 +#define VUL_CM1_MAKE_EXTRA_UPDATE_MASK 0x1 +#define VUL_CM1_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define VUL_CM1_AGENT_FREE_RUN_SFT 2 +#define VUL_CM1_AGENT_FREE_RUN_MASK 0x1 +#define VUL_CM1_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define VUL_CM1_USE_INT_ODD_SFT 1 +#define VUL_CM1_USE_INT_ODD_MASK 0x1 +#define VUL_CM1_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define VUL_CM1_INT_ODD_FLAG_SFT 0 +#define VUL_CM1_INT_ODD_FLAG_MASK 0x1 +#define VUL_CM1_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_VUL_CM2_BASE_MSB */ +#define VUL_CM2_BASE_ADDR_MSB_SFT 0 +#define VUL_CM2_BASE_ADDR_MSB_MASK 0x1ff +#define VUL_CM2_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM2_BASE */ +#define VUL_CM2_BASE_ADDR_SFT 4 +#define VUL_CM2_BASE_ADDR_MASK 0xfffffff +#define VUL_CM2_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM2_CUR_MSB */ +#define VUL_CM2_CUR_PTR_MSB_SFT 0 +#define VUL_CM2_CUR_PTR_MSB_MASK 0x1ff +#define VUL_CM2_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM2_CUR */ +#define VUL_CM2_CUR_PTR_SFT 0 +#define VUL_CM2_CUR_PTR_MASK 0xffffffff +#define VUL_CM2_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM2_END_MSB */ +#define VUL_CM2_END_ADDR_MSB_SFT 0 +#define VUL_CM2_END_ADDR_MSB_MASK 0x1ff +#define VUL_CM2_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_VUL_CM2_END */ +#define VUL_CM2_END_ADDR_SFT 4 +#define VUL_CM2_END_ADDR_MASK 0xfffffff +#define VUL_CM2_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_VUL_CM2_CON0 */ +#define VUL_CM2_ON_SFT 28 +#define VUL_CM2_ON_MASK 0x1 +#define VUL_CM2_ON_MASK_SFT (0x1 << 28) +#define VUL_CM2_REG_CH_SHIFT_MODE_SFT 26 +#define VUL_CM2_REG_CH_SHIFT_MODE_MASK 0x1 +#define VUL_CM2_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define VUL_CM2_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define VUL_CM2_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define VUL_CM2_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define VUL_CM2_SW_CLEAR_BUF_FULL_SFT 24 +#define VUL_CM2_SW_CLEAR_BUF_FULL_MASK 0x1 +#define VUL_CM2_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define VUL_CM2_ULTRA_TH_SFT 20 +#define VUL_CM2_ULTRA_TH_MASK 0xf +#define VUL_CM2_ULTRA_TH_MASK_SFT (0xf << 20) +#define VUL_CM2_NORMAL_MODE_SFT 17 +#define VUL_CM2_NORMAL_MODE_MASK 0x1 +#define VUL_CM2_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define VUL_CM2_ODD_USE_EVEN_SFT 16 +#define VUL_CM2_ODD_USE_EVEN_MASK 0x1 +#define VUL_CM2_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define VUL_CM2_AXI_REQ_MAXLEN_SFT 12 +#define VUL_CM2_AXI_REQ_MAXLEN_MASK 0x3 +#define VUL_CM2_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define VUL_CM2_AXI_REQ_MINLEN_SFT 8 +#define VUL_CM2_AXI_REQ_MINLEN_MASK 0x3 +#define VUL_CM2_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define VUL_CM2_HALIGN_SFT 7 +#define VUL_CM2_HALIGN_MASK 0x1 +#define VUL_CM2_HALIGN_MASK_SFT (0x1 << 7) +#define VUL_CM2_SIGN_EXT_SFT 6 +#define VUL_CM2_SIGN_EXT_MASK 0x1 +#define VUL_CM2_SIGN_EXT_MASK_SFT (0x1 << 6) +#define VUL_CM2_HD_MODE_SFT 4 +#define VUL_CM2_HD_MODE_MASK 0x3 +#define VUL_CM2_HD_MODE_MASK_SFT (0x3 << 4) +#define VUL_CM2_MAKE_EXTRA_UPDATE_SFT 3 +#define VUL_CM2_MAKE_EXTRA_UPDATE_MASK 0x1 +#define VUL_CM2_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define VUL_CM2_AGENT_FREE_RUN_SFT 2 +#define VUL_CM2_AGENT_FREE_RUN_MASK 0x1 +#define VUL_CM2_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define VUL_CM2_USE_INT_ODD_SFT 1 +#define VUL_CM2_USE_INT_ODD_MASK 0x1 +#define VUL_CM2_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define VUL_CM2_INT_ODD_FLAG_SFT 0 +#define VUL_CM2_INT_ODD_FLAG_MASK 0x1 +#define VUL_CM2_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN0_BASE_MSB */ +#define ETDM_IN0_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN0_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN0_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN0_BASE */ +#define ETDM_IN0_BASE_ADDR_SFT 4 +#define ETDM_IN0_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN0_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN0_CUR_MSB */ +#define ETDM_IN0_CUR_PTR_MSB_SFT 0 +#define ETDM_IN0_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN0_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN0_CUR */ +#define ETDM_IN0_CUR_PTR_SFT 0 +#define ETDM_IN0_CUR_PTR_MASK 0xffffffff +#define ETDM_IN0_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN0_END_MSB */ +#define ETDM_IN0_END_ADDR_MSB_SFT 0 +#define ETDM_IN0_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN0_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN0_END */ +#define ETDM_IN0_END_ADDR_SFT 4 +#define ETDM_IN0_END_ADDR_MASK 0xfffffff +#define ETDM_IN0_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN0_CON0 */ +#define ETDM_IN0_CH_NUM_SFT 28 +#define ETDM_IN0_CH_NUM_MASK 0xf +#define ETDM_IN0_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN0_ON_SFT 27 +#define ETDM_IN0_ON_MASK 0x1 +#define ETDM_IN0_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN0_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN0_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN0_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN0_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN0_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN0_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN0_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN0_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN0_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN0_ULTRA_TH_SFT 20 +#define ETDM_IN0_ULTRA_TH_MASK 0xf +#define ETDM_IN0_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN0_NORMAL_MODE_SFT 17 +#define ETDM_IN0_NORMAL_MODE_MASK 0x1 +#define ETDM_IN0_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN0_ODD_USE_EVEN_SFT 16 +#define ETDM_IN0_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN0_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN0_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN0_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN0_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN0_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN0_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN0_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN0_HALIGN_SFT 7 +#define ETDM_IN0_HALIGN_MASK 0x1 +#define ETDM_IN0_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN0_SIGN_EXT_SFT 6 +#define ETDM_IN0_SIGN_EXT_MASK 0x1 +#define ETDM_IN0_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN0_HD_MODE_SFT 4 +#define ETDM_IN0_HD_MODE_MASK 0x3 +#define ETDM_IN0_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN0_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN0_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN0_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN0_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN0_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN0_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN0_USE_INT_ODD_SFT 1 +#define ETDM_IN0_USE_INT_ODD_MASK 0x1 +#define ETDM_IN0_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN0_INT_ODD_FLAG_SFT 0 +#define ETDM_IN0_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN0_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN1_BASE_MSB */ +#define ETDM_IN1_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN1_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN1_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN1_BASE */ +#define ETDM_IN1_BASE_ADDR_SFT 4 +#define ETDM_IN1_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN1_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN1_CUR_MSB */ +#define ETDM_IN1_CUR_PTR_MSB_SFT 0 +#define ETDM_IN1_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN1_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN1_CUR */ +#define ETDM_IN1_CUR_PTR_SFT 0 +#define ETDM_IN1_CUR_PTR_MASK 0xffffffff +#define ETDM_IN1_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN1_END_MSB */ +#define ETDM_IN1_END_ADDR_MSB_SFT 0 +#define ETDM_IN1_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN1_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN1_END */ +#define ETDM_IN1_END_ADDR_SFT 4 +#define ETDM_IN1_END_ADDR_MASK 0xfffffff +#define ETDM_IN1_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN1_CON0 */ +#define ETDM_IN1_CH_NUM_SFT 28 +#define ETDM_IN1_CH_NUM_MASK 0xf +#define ETDM_IN1_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN1_ON_SFT 27 +#define ETDM_IN1_ON_MASK 0x1 +#define ETDM_IN1_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN1_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN1_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN1_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN1_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN1_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN1_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN1_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN1_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN1_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN1_ULTRA_TH_SFT 20 +#define ETDM_IN1_ULTRA_TH_MASK 0xf +#define ETDM_IN1_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN1_NORMAL_MODE_SFT 17 +#define ETDM_IN1_NORMAL_MODE_MASK 0x1 +#define ETDM_IN1_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN1_ODD_USE_EVEN_SFT 16 +#define ETDM_IN1_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN1_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN1_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN1_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN1_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN1_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN1_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN1_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN1_HALIGN_SFT 7 +#define ETDM_IN1_HALIGN_MASK 0x1 +#define ETDM_IN1_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN1_SIGN_EXT_SFT 6 +#define ETDM_IN1_SIGN_EXT_MASK 0x1 +#define ETDM_IN1_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN1_HD_MODE_SFT 4 +#define ETDM_IN1_HD_MODE_MASK 0x3 +#define ETDM_IN1_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN1_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN1_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN1_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN1_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN1_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN1_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN1_USE_INT_ODD_SFT 1 +#define ETDM_IN1_USE_INT_ODD_MASK 0x1 +#define ETDM_IN1_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN1_INT_ODD_FLAG_SFT 0 +#define ETDM_IN1_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN1_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN2_BASE_MSB */ +#define ETDM_IN2_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN2_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN2_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN2_BASE */ +#define ETDM_IN2_BASE_ADDR_SFT 4 +#define ETDM_IN2_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN2_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN2_CUR_MSB */ +#define ETDM_IN2_CUR_PTR_MSB_SFT 0 +#define ETDM_IN2_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN2_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN2_CUR */ +#define ETDM_IN2_CUR_PTR_SFT 0 +#define ETDM_IN2_CUR_PTR_MASK 0xffffffff +#define ETDM_IN2_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN2_END_MSB */ +#define ETDM_IN2_END_ADDR_MSB_SFT 0 +#define ETDM_IN2_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN2_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN2_END */ +#define ETDM_IN2_END_ADDR_SFT 4 +#define ETDM_IN2_END_ADDR_MASK 0xfffffff +#define ETDM_IN2_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN2_CON0 */ +#define ETDM_IN2_CH_NUM_SFT 28 +#define ETDM_IN2_CH_NUM_MASK 0xf +#define ETDM_IN2_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN2_ON_SFT 27 +#define ETDM_IN2_ON_MASK 0x1 +#define ETDM_IN2_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN2_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN2_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN2_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN2_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN2_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN2_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN2_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN2_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN2_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN2_ULTRA_TH_SFT 20 +#define ETDM_IN2_ULTRA_TH_MASK 0xf +#define ETDM_IN2_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN2_NORMAL_MODE_SFT 17 +#define ETDM_IN2_NORMAL_MODE_MASK 0x1 +#define ETDM_IN2_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN2_ODD_USE_EVEN_SFT 16 +#define ETDM_IN2_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN2_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN2_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN2_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN2_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN2_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN2_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN2_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN2_HALIGN_SFT 7 +#define ETDM_IN2_HALIGN_MASK 0x1 +#define ETDM_IN2_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN2_SIGN_EXT_SFT 6 +#define ETDM_IN2_SIGN_EXT_MASK 0x1 +#define ETDM_IN2_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN2_HD_MODE_SFT 4 +#define ETDM_IN2_HD_MODE_MASK 0x3 +#define ETDM_IN2_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN2_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN2_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN2_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN2_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN2_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN2_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN2_USE_INT_ODD_SFT 1 +#define ETDM_IN2_USE_INT_ODD_MASK 0x1 +#define ETDM_IN2_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN2_INT_ODD_FLAG_SFT 0 +#define ETDM_IN2_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN2_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN3_BASE_MSB */ +#define ETDM_IN3_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN3_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN3_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN3_BASE */ +#define ETDM_IN3_BASE_ADDR_SFT 4 +#define ETDM_IN3_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN3_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN3_CUR_MSB */ +#define ETDM_IN3_CUR_PTR_MSB_SFT 0 +#define ETDM_IN3_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN3_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN3_CUR */ +#define ETDM_IN3_CUR_PTR_SFT 0 +#define ETDM_IN3_CUR_PTR_MASK 0xffffffff +#define ETDM_IN3_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN3_END_MSB */ +#define ETDM_IN3_END_ADDR_MSB_SFT 0 +#define ETDM_IN3_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN3_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN3_END */ +#define ETDM_IN3_END_ADDR_SFT 4 +#define ETDM_IN3_END_ADDR_MASK 0xfffffff +#define ETDM_IN3_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN3_CON0 */ +#define ETDM_IN3_CH_NUM_SFT 28 +#define ETDM_IN3_CH_NUM_MASK 0xf +#define ETDM_IN3_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN3_ON_SFT 27 +#define ETDM_IN3_ON_MASK 0x1 +#define ETDM_IN3_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN3_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN3_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN3_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN3_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN3_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN3_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN3_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN3_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN3_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN3_ULTRA_TH_SFT 20 +#define ETDM_IN3_ULTRA_TH_MASK 0xf +#define ETDM_IN3_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN3_NORMAL_MODE_SFT 17 +#define ETDM_IN3_NORMAL_MODE_MASK 0x1 +#define ETDM_IN3_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN3_ODD_USE_EVEN_SFT 16 +#define ETDM_IN3_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN3_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN3_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN3_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN3_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN3_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN3_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN3_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN3_HALIGN_SFT 7 +#define ETDM_IN3_HALIGN_MASK 0x1 +#define ETDM_IN3_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN3_SIGN_EXT_SFT 6 +#define ETDM_IN3_SIGN_EXT_MASK 0x1 +#define ETDM_IN3_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN3_HD_MODE_SFT 4 +#define ETDM_IN3_HD_MODE_MASK 0x3 +#define ETDM_IN3_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN3_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN3_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN3_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN3_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN3_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN3_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN3_USE_INT_ODD_SFT 1 +#define ETDM_IN3_USE_INT_ODD_MASK 0x1 +#define ETDM_IN3_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN3_INT_ODD_FLAG_SFT 0 +#define ETDM_IN3_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN3_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN4_BASE_MSB */ +#define ETDM_IN4_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN4_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN4_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN4_BASE */ +#define ETDM_IN4_BASE_ADDR_SFT 4 +#define ETDM_IN4_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN4_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN4_CUR_MSB */ +#define ETDM_IN4_CUR_PTR_MSB_SFT 0 +#define ETDM_IN4_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN4_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN4_CUR */ +#define ETDM_IN4_CUR_PTR_SFT 0 +#define ETDM_IN4_CUR_PTR_MASK 0xffffffff +#define ETDM_IN4_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN4_END_MSB */ +#define ETDM_IN4_END_ADDR_MSB_SFT 0 +#define ETDM_IN4_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN4_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN4_END */ +#define ETDM_IN4_END_ADDR_SFT 4 +#define ETDM_IN4_END_ADDR_MASK 0xfffffff +#define ETDM_IN4_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN4_CON0 */ +#define ETDM_IN4_CH_NUM_SFT 28 +#define ETDM_IN4_CH_NUM_MASK 0xf +#define ETDM_IN4_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN4_ON_SFT 27 +#define ETDM_IN4_ON_MASK 0x1 +#define ETDM_IN4_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN4_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN4_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN4_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN4_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN4_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN4_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN4_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN4_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN4_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN4_ULTRA_TH_SFT 20 +#define ETDM_IN4_ULTRA_TH_MASK 0xf +#define ETDM_IN4_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN4_NORMAL_MODE_SFT 17 +#define ETDM_IN4_NORMAL_MODE_MASK 0x1 +#define ETDM_IN4_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN4_ODD_USE_EVEN_SFT 16 +#define ETDM_IN4_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN4_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN4_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN4_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN4_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN4_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN4_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN4_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN4_HALIGN_SFT 7 +#define ETDM_IN4_HALIGN_MASK 0x1 +#define ETDM_IN4_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN4_SIGN_EXT_SFT 6 +#define ETDM_IN4_SIGN_EXT_MASK 0x1 +#define ETDM_IN4_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN4_HD_MODE_SFT 4 +#define ETDM_IN4_HD_MODE_MASK 0x3 +#define ETDM_IN4_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN4_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN4_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN4_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN4_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN4_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN4_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN4_USE_INT_ODD_SFT 1 +#define ETDM_IN4_USE_INT_ODD_MASK 0x1 +#define ETDM_IN4_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN4_INT_ODD_FLAG_SFT 0 +#define ETDM_IN4_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN4_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN5_BASE_MSB */ +#define ETDM_IN5_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN5_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN5_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN5_BASE */ +#define ETDM_IN5_BASE_ADDR_SFT 4 +#define ETDM_IN5_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN5_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN5_CUR_MSB */ +#define ETDM_IN5_CUR_PTR_MSB_SFT 0 +#define ETDM_IN5_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN5_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN5_CUR */ +#define ETDM_IN5_CUR_PTR_SFT 0 +#define ETDM_IN5_CUR_PTR_MASK 0xffffffff +#define ETDM_IN5_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN5_END_MSB */ +#define ETDM_IN5_END_ADDR_MSB_SFT 0 +#define ETDM_IN5_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN5_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN5_END */ +#define ETDM_IN5_END_ADDR_SFT 4 +#define ETDM_IN5_END_ADDR_MASK 0xfffffff +#define ETDM_IN5_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN5_CON0 */ +#define ETDM_IN5_CH_NUM_SFT 28 +#define ETDM_IN5_CH_NUM_MASK 0xf +#define ETDM_IN5_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN5_ON_SFT 27 +#define ETDM_IN5_ON_MASK 0x1 +#define ETDM_IN5_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN5_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN5_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN5_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN5_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN5_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN5_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN5_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN5_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN5_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN5_ULTRA_TH_SFT 20 +#define ETDM_IN5_ULTRA_TH_MASK 0xf +#define ETDM_IN5_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN5_NORMAL_MODE_SFT 17 +#define ETDM_IN5_NORMAL_MODE_MASK 0x1 +#define ETDM_IN5_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN5_ODD_USE_EVEN_SFT 16 +#define ETDM_IN5_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN5_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN5_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN5_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN5_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN5_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN5_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN5_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN5_HALIGN_SFT 7 +#define ETDM_IN5_HALIGN_MASK 0x1 +#define ETDM_IN5_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN5_SIGN_EXT_SFT 6 +#define ETDM_IN5_SIGN_EXT_MASK 0x1 +#define ETDM_IN5_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN5_HD_MODE_SFT 4 +#define ETDM_IN5_HD_MODE_MASK 0x3 +#define ETDM_IN5_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN5_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN5_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN5_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN5_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN5_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN5_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN5_USE_INT_ODD_SFT 1 +#define ETDM_IN5_USE_INT_ODD_MASK 0x1 +#define ETDM_IN5_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN5_INT_ODD_FLAG_SFT 0 +#define ETDM_IN5_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN5_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_ETDM_IN6_BASE_MSB */ +#define ETDM_IN6_BASE_ADDR_MSB_SFT 0 +#define ETDM_IN6_BASE_ADDR_MSB_MASK 0x1ff +#define ETDM_IN6_BASE_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN6_BASE */ +#define ETDM_IN6_BASE_ADDR_SFT 4 +#define ETDM_IN6_BASE_ADDR_MASK 0xfffffff +#define ETDM_IN6_BASE_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN6_CUR_MSB */ +#define ETDM_IN6_CUR_PTR_MSB_SFT 0 +#define ETDM_IN6_CUR_PTR_MSB_MASK 0x1ff +#define ETDM_IN6_CUR_PTR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN6_CUR */ +#define ETDM_IN6_CUR_PTR_SFT 0 +#define ETDM_IN6_CUR_PTR_MASK 0xffffffff +#define ETDM_IN6_CUR_PTR_MASK_SFT (0xffffffff << 0) + +/* AFE_ETDM_IN6_END_MSB */ +#define ETDM_IN6_END_ADDR_MSB_SFT 0 +#define ETDM_IN6_END_ADDR_MSB_MASK 0x1ff +#define ETDM_IN6_END_ADDR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_ETDM_IN6_END */ +#define ETDM_IN6_END_ADDR_SFT 4 +#define ETDM_IN6_END_ADDR_MASK 0xfffffff +#define ETDM_IN6_END_ADDR_MASK_SFT (0xfffffff << 4) + +/* AFE_ETDM_IN6_CON0 */ +#define ETDM_IN6_CH_NUM_SFT 28 +#define ETDM_IN6_CH_NUM_MASK 0xf +#define ETDM_IN6_CH_NUM_MASK_SFT (0xf << 28) +#define ETDM_IN6_ON_SFT 27 +#define ETDM_IN6_ON_MASK 0x1 +#define ETDM_IN6_ON_MASK_SFT (0x1 << 27) +#define ETDM_IN6_REG_CH_SHIFT_MODE_SFT 26 +#define ETDM_IN6_REG_CH_SHIFT_MODE_MASK 0x1 +#define ETDM_IN6_REG_CH_SHIFT_MODE_MASK_SFT (0x1 << 26) +#define ETDM_IN6_RG_FORCE_NO_MASK_EXTRA_SFT 25 +#define ETDM_IN6_RG_FORCE_NO_MASK_EXTRA_MASK 0x1 +#define ETDM_IN6_RG_FORCE_NO_MASK_EXTRA_MASK_SFT (0x1 << 25) +#define ETDM_IN6_SW_CLEAR_BUF_FULL_SFT 24 +#define ETDM_IN6_SW_CLEAR_BUF_FULL_MASK 0x1 +#define ETDM_IN6_SW_CLEAR_BUF_FULL_MASK_SFT (0x1 << 24) +#define ETDM_IN6_ULTRA_TH_SFT 20 +#define ETDM_IN6_ULTRA_TH_MASK 0xf +#define ETDM_IN6_ULTRA_TH_MASK_SFT (0xf << 20) +#define ETDM_IN6_NORMAL_MODE_SFT 17 +#define ETDM_IN6_NORMAL_MODE_MASK 0x1 +#define ETDM_IN6_NORMAL_MODE_MASK_SFT (0x1 << 17) +#define ETDM_IN6_ODD_USE_EVEN_SFT 16 +#define ETDM_IN6_ODD_USE_EVEN_MASK 0x1 +#define ETDM_IN6_ODD_USE_EVEN_MASK_SFT (0x1 << 16) +#define ETDM_IN6_AXI_REQ_MAXLEN_SFT 12 +#define ETDM_IN6_AXI_REQ_MAXLEN_MASK 0x3 +#define ETDM_IN6_AXI_REQ_MAXLEN_MASK_SFT (0x3 << 12) +#define ETDM_IN6_AXI_REQ_MINLEN_SFT 8 +#define ETDM_IN6_AXI_REQ_MINLEN_MASK 0x3 +#define ETDM_IN6_AXI_REQ_MINLEN_MASK_SFT (0x3 << 8) +#define ETDM_IN6_HALIGN_SFT 7 +#define ETDM_IN6_HALIGN_MASK 0x1 +#define ETDM_IN6_HALIGN_MASK_SFT (0x1 << 7) +#define ETDM_IN6_SIGN_EXT_SFT 6 +#define ETDM_IN6_SIGN_EXT_MASK 0x1 +#define ETDM_IN6_SIGN_EXT_MASK_SFT (0x1 << 6) +#define ETDM_IN6_HD_MODE_SFT 4 +#define ETDM_IN6_HD_MODE_MASK 0x3 +#define ETDM_IN6_HD_MODE_MASK_SFT (0x3 << 4) +#define ETDM_IN6_MAKE_EXTRA_UPDATE_SFT 3 +#define ETDM_IN6_MAKE_EXTRA_UPDATE_MASK 0x1 +#define ETDM_IN6_MAKE_EXTRA_UPDATE_MASK_SFT (0x1 << 3) +#define ETDM_IN6_AGENT_FREE_RUN_SFT 2 +#define ETDM_IN6_AGENT_FREE_RUN_MASK 0x1 +#define ETDM_IN6_AGENT_FREE_RUN_MASK_SFT (0x1 << 2) +#define ETDM_IN6_USE_INT_ODD_SFT 1 +#define ETDM_IN6_USE_INT_ODD_MASK 0x1 +#define ETDM_IN6_USE_INT_ODD_MASK_SFT (0x1 << 1) +#define ETDM_IN6_INT_ODD_FLAG_SFT 0 +#define ETDM_IN6_INT_ODD_FLAG_MASK 0x1 +#define ETDM_IN6_INT_ODD_FLAG_MASK_SFT (0x1 << 0) + +/* AFE_HDMI_OUT_BASE_MSB */ +#define AFE_HDMI_OUT_BASE_MSB_SFT 0 +#define AFE_HDMI_OUT_BASE_MSB_MASK 0x1ff +#define AFE_HDMI_OUT_BASE_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_HDMI_OUT_BASE */ +#define AFE_HDMI_OUT_BASE_SFT 4 +#define AFE_HDMI_OUT_BASE_MASK 0xfffffff +#define AFE_HDMI_OUT_BASE_MASK_SFT (0xfffffff << 4) + +/* AFE_HDMI_OUT_CUR_MSB */ +#define AFE_HDMI_OUT_CUR_MSB_SFT 0 +#define AFE_HDMI_OUT_CUR_MSB_MASK 0x1ff +#define AFE_HDMI_OUT_CUR_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_HDMI_OUT_CUR */ +#define AFE_HDMI_OUT_CUR_SFT 0 +#define AFE_HDMI_OUT_CUR_MASK 0xffffffff +#define AFE_HDMI_OUT_CUR_MASK_SFT (0xffffffff << 0) + +/* AFE_HDMI_OUT_END_MSB */ +#define AFE_HDMI_OUT_END_MSB_SFT 0 +#define AFE_HDMI_OUT_END_MSB_MASK 0x1ff +#define AFE_HDMI_OUT_END_MSB_MASK_SFT (0x1ff << 0) + +/* AFE_HDMI_OUT_END */ +#define AFE_HDMI_OUT_END_SFT 4 +#define AFE_HDMI_OUT_END_MASK 0xfffffff +#define AFE_HDMI_OUT_END_MASK_SFT (0xfffffff << 4) +#define AFE_HDMI_OUT_END_LSB_SFT 0 +#define AFE_HDMI_OUT_END_LSB_MASK 0xf +#define AFE_HDMI_OUT_END_LSB_MASK_SFT (0xf << 0) + +/* AFE_HDMI_OUT_CON0 */ +#define HDMI_OUT_ON_SFT 28 +#define HDMI_OUT_ON_MASK 0x1 +#define HDMI_OUT_ON_MASK_SFT (0x1 << 28) +#define HDMI_CH_NUM_SFT 24 +#define HDMI_CH_NUM_MASK 0xf +#define HDMI_CH_NUM_MASK_SFT (0xf << 24) +#define HDMI_OUT_ONE_HEART_SEL_SFT 22 +#define HDMI_OUT_ONE_HEART_SEL_MASK 0x3 +#define HDMI_OUT_ONE_HEART_SEL_MASK_SFT (0x3 << 22) +#define HDMI_OUT_MINLEN_SFT 20 +#define HDMI_OUT_MINLEN_MASK 0x3 +#define HDMI_OUT_MINLEN_MASK_SFT (0x3 << 20) +#define HDMI_OUT_MAXLEN_SFT 16 +#define HDMI_OUT_MAXLEN_MASK 0x3 +#define HDMI_OUT_MAXLEN_MASK_SFT (0x3 << 16) +#define HDMI_OUT_SW_CLEAR_BUF_EMPTY_SFT 15 +#define HDMI_OUT_SW_CLEAR_BUF_EMPTY_MASK 0x1 +#define HDMI_OUT_SW_CLEAR_BUF_EMPTY_MASK_SFT (0x1 << 15) +#define HDMI_OUT_PBUF_SIZE_SFT 12 +#define HDMI_OUT_PBUF_SIZE_MASK 0x3 +#define HDMI_OUT_PBUF_SIZE_MASK_SFT (0x3 << 12) +#define HDMI_OUT_SW_CLEAR_HDMI_BUF_EMPTY_SFT 7 +#define HDMI_OUT_SW_CLEAR_HDMI_BUF_EMPTY_MASK 0x1 +#define HDMI_OUT_SW_CLEAR_HDMI_BUF_EMPTY_MASK_SFT (0x1 << 7) +#define HDMI_OUT_NORMAL_MODE_SFT 5 +#define HDMI_OUT_NORMAL_MODE_MASK 0x1 +#define HDMI_OUT_NORMAL_MODE_MASK_SFT (0x1 << 5) +#define HDMI_OUT_HALIGN_SFT 4 +#define HDMI_OUT_HALIGN_MASK 0x1 +#define HDMI_OUT_HALIGN_MASK_SFT (0x1 << 4) +#define HDMI_OUT_HD_MODE_SFT 0 +#define HDMI_OUT_HD_MODE_MASK 0x3 +#define HDMI_OUT_HD_MODE_MASK_SFT (0x3 << 0) + +/* AFE_VUL24_RCH_MON */ +#define VUL24_RCH_DATA_SFT 0 +#define VUL24_RCH_DATA_MASK 0xffffffff +#define VUL24_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL24_LCH_MON */ +#define VUL24_LCH_DATA_SFT 0 +#define VUL24_LCH_DATA_MASK 0xffffffff +#define VUL24_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL25_RCH_MON */ +#define VUL25_RCH_DATA_SFT 0 +#define VUL25_RCH_DATA_MASK 0xffffffff +#define VUL25_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL25_LCH_MON */ +#define VUL25_LCH_DATA_SFT 0 +#define VUL25_LCH_DATA_MASK 0xffffffff +#define VUL25_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL26_RCH_MON */ +#define VUL26_RCH_DATA_SFT 0 +#define VUL26_RCH_DATA_MASK 0xffffffff +#define VUL26_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL26_LCH_MON */ +#define VUL26_LCH_DATA_SFT 0 +#define VUL26_LCH_DATA_MASK 0xffffffff +#define VUL26_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM0_RCH_MON */ +#define VUL_CM0_RCH_DATA_SFT 0 +#define VUL_CM0_RCH_DATA_MASK 0xffffffff +#define VUL_CM0_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM0_LCH_MON */ +#define VUL_CM0_LCH_DATA_SFT 0 +#define VUL_CM0_LCH_DATA_MASK 0xffffffff +#define VUL_CM0_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM1_RCH_MON */ +#define VUL_CM1_RCH_DATA_SFT 0 +#define VUL_CM1_RCH_DATA_MASK 0xffffffff +#define VUL_CM1_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM1_LCH_MON */ +#define VUL_CM1_LCH_DATA_SFT 0 +#define VUL_CM1_LCH_DATA_MASK 0xffffffff +#define VUL_CM1_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM2_RCH_MON */ +#define VUL_CM2_RCH_DATA_SFT 0 +#define VUL_CM2_RCH_DATA_MASK 0xffffffff +#define VUL_CM2_RCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_VUL_CM2_LCH_MON */ +#define VUL_CM2_LCH_DATA_SFT 0 +#define VUL_CM2_LCH_DATA_MASK 0xffffffff +#define VUL_CM2_LCH_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_4CH_CH0_MON */ +#define DL_4CH_CH0_DATA_SFT 0 +#define DL_4CH_CH0_DATA_MASK 0xffffffff +#define DL_4CH_CH0_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_4CH_CH1_MON */ +#define DL_4CH_CH1_DATA_SFT 0 +#define DL_4CH_CH1_DATA_MASK 0xffffffff +#define DL_4CH_CH1_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_4CH_CH2_MON */ +#define DL_4CH_CH2_DATA_SFT 0 +#define DL_4CH_CH2_DATA_MASK 0xffffffff +#define DL_4CH_CH2_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_4CH_CH3_MON */ +#define DL_4CH_CH3_DATA_SFT 0 +#define DL_4CH_CH3_DATA_MASK 0xffffffff +#define DL_4CH_CH3_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH0_MON */ +#define DL_24CH_CH0_DATA_SFT 0 +#define DL_24CH_CH0_DATA_MASK 0xffffffff +#define DL_24CH_CH0_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH1_MON */ +#define DL_24CH_CH1_DATA_SFT 0 +#define DL_24CH_CH1_DATA_MASK 0xffffffff +#define DL_24CH_CH1_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH2_MON */ +#define DL_24CH_CH2_DATA_SFT 0 +#define DL_24CH_CH2_DATA_MASK 0xffffffff +#define DL_24CH_CH2_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH3_MON */ +#define DL_24CH_CH3_DATA_SFT 0 +#define DL_24CH_CH3_DATA_MASK 0xffffffff +#define DL_24CH_CH3_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH4_MON */ +#define DL_24CH_CH4_DATA_SFT 0 +#define DL_24CH_CH4_DATA_MASK 0xffffffff +#define DL_24CH_CH4_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH5_MON */ +#define DL_24CH_CH5_DATA_SFT 0 +#define DL_24CH_CH5_DATA_MASK 0xffffffff +#define DL_24CH_CH5_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH6_MON */ +#define DL_24CH_CH6_DATA_SFT 0 +#define DL_24CH_CH6_DATA_MASK 0xffffffff +#define DL_24CH_CH6_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH7_MON */ +#define DL_24CH_CH7_DATA_SFT 0 +#define DL_24CH_CH7_DATA_MASK 0xffffffff +#define DL_24CH_CH7_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH8_MON */ +#define DL_24CH_CH8_DATA_SFT 0 +#define DL_24CH_CH8_DATA_MASK 0xffffffff +#define DL_24CH_CH8_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH9_MON */ +#define DL_24CH_CH9_DATA_SFT 0 +#define DL_24CH_CH9_DATA_MASK 0xffffffff +#define DL_24CH_CH9_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH10_MON */ +#define DL_24CH_CH10_DATA_SFT 0 +#define DL_24CH_CH10_DATA_MASK 0xffffffff +#define DL_24CH_CH10_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH11_MON */ +#define DL_24CH_CH11_DATA_SFT 0 +#define DL_24CH_CH11_DATA_MASK 0xffffffff +#define DL_24CH_CH11_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH12_MON */ +#define DL_24CH_CH12_DATA_SFT 0 +#define DL_24CH_CH12_DATA_MASK 0xffffffff +#define DL_24CH_CH12_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH13_MON */ +#define DL_24CH_CH13_DATA_SFT 0 +#define DL_24CH_CH13_DATA_MASK 0xffffffff +#define DL_24CH_CH13_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH14_MON */ +#define DL_24CH_CH14_DATA_SFT 0 +#define DL_24CH_CH14_DATA_MASK 0xffffffff +#define DL_24CH_CH14_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_DL_24CH_CH15_MON */ +#define DL_24CH_CH15_DATA_SFT 0 +#define DL_24CH_CH15_DATA_MASK 0xffffffff +#define DL_24CH_CH15_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_SRAM_BOUND */ +#define SECURE_BIT_SFT 19 +#define SECURE_BIT_MASK 0x1 +#define SECURE_BIT_MASK_SFT (0x1 << 19) +#define SECURE_SRAM_BOUND_SFT 0 +#define SECURE_SRAM_BOUND_MASK 0x7ffff +#define SECURE_SRAM_BOUND_MASK_SFT (0x7ffff << 0) + +/* AFE_SECURE_CON0 */ +#define READ_EN15_NS_SFT 31 +#define READ_EN15_NS_MASK 0x1 +#define READ_EN15_NS_MASK_SFT (0x1 << 31) +#define WRITE_EN15_NS_SFT 30 +#define WRITE_EN15_NS_MASK 0x1 +#define WRITE_EN15_NS_MASK_SFT (0x1 << 30) +#define READ_EN14_NS_SFT 29 +#define READ_EN14_NS_MASK 0x1 +#define READ_EN14_NS_MASK_SFT (0x1 << 29) +#define WRITE_EN14_NS_SFT 28 +#define WRITE_EN14_NS_MASK 0x1 +#define WRITE_EN14_NS_MASK_SFT (0x1 << 28) +#define READ_EN13_NS_SFT 27 +#define READ_EN13_NS_MASK 0x1 +#define READ_EN13_NS_MASK_SFT (0x1 << 27) +#define WRITE_EN13_NS_SFT 26 +#define WRITE_EN13_NS_MASK 0x1 +#define WRITE_EN13_NS_MASK_SFT (0x1 << 26) +#define READ_EN12_NS_SFT 25 +#define READ_EN12_NS_MASK 0x1 +#define READ_EN12_NS_MASK_SFT (0x1 << 25) +#define WRITE_EN12_NS_SFT 24 +#define WRITE_EN12_NS_MASK 0x1 +#define WRITE_EN12_NS_MASK_SFT (0x1 << 24) +#define READ_EN11_NS_SFT 23 +#define READ_EN11_NS_MASK 0x1 +#define READ_EN11_NS_MASK_SFT (0x1 << 23) +#define WRITE_EN11_NS_SFT 22 +#define WRITE_EN11_NS_MASK 0x1 +#define WRITE_EN11_NS_MASK_SFT (0x1 << 22) +#define READ_EN10_NS_SFT 21 +#define READ_EN10_NS_MASK 0x1 +#define READ_EN10_NS_MASK_SFT (0x1 << 21) +#define WRITE_EN10_NS_SFT 20 +#define WRITE_EN10_NS_MASK 0x1 +#define WRITE_EN10_NS_MASK_SFT (0x1 << 20) +#define READ_EN9_NS_SFT 19 +#define READ_EN9_NS_MASK 0x1 +#define READ_EN9_NS_MASK_SFT (0x1 << 19) +#define WRITE_EN9_NS_SFT 18 +#define WRITE_EN9_NS_MASK 0x1 +#define WRITE_EN9_NS_MASK_SFT (0x1 << 18) +#define READ_EN8_NS_SFT 17 +#define READ_EN8_NS_MASK 0x1 +#define READ_EN8_NS_MASK_SFT (0x1 << 17) +#define WRITE_EN8_NS_SFT 16 +#define WRITE_EN8_NS_MASK 0x1 +#define WRITE_EN8_NS_MASK_SFT (0x1 << 16) +#define READ_EN7_NS_SFT 15 +#define READ_EN7_NS_MASK 0x1 +#define READ_EN7_NS_MASK_SFT (0x1 << 15) +#define WRITE_EN7_NS_SFT 14 +#define WRITE_EN7_NS_MASK 0x1 +#define WRITE_EN7_NS_MASK_SFT (0x1 << 14) +#define READ_EN6_NS_SFT 13 +#define READ_EN6_NS_MASK 0x1 +#define READ_EN6_NS_MASK_SFT (0x1 << 13) +#define WRITE_EN6_NS_SFT 12 +#define WRITE_EN6_NS_MASK 0x1 +#define WRITE_EN6_NS_MASK_SFT (0x1 << 12) +#define READ_EN5_NS_SFT 11 +#define READ_EN5_NS_MASK 0x1 +#define READ_EN5_NS_MASK_SFT (0x1 << 11) +#define WRITE_EN5_NS_SFT 10 +#define WRITE_EN5_NS_MASK 0x1 +#define WRITE_EN5_NS_MASK_SFT (0x1 << 10) +#define READ_EN4_NS_SFT 9 +#define READ_EN4_NS_MASK 0x1 +#define READ_EN4_NS_MASK_SFT (0x1 << 9) +#define WRITE_EN4_NS_SFT 8 +#define WRITE_EN4_NS_MASK 0x1 +#define WRITE_EN4_NS_MASK_SFT (0x1 << 8) +#define READ_EN3_NS_SFT 7 +#define READ_EN3_NS_MASK 0x1 +#define READ_EN3_NS_MASK_SFT (0x1 << 7) +#define WRITE_EN3_NS_SFT 6 +#define WRITE_EN3_NS_MASK 0x1 +#define WRITE_EN3_NS_MASK_SFT (0x1 << 6) +#define READ_EN2_NS_SFT 5 +#define READ_EN2_NS_MASK 0x1 +#define READ_EN2_NS_MASK_SFT (0x1 << 5) +#define WRITE_EN2_NS_SFT 4 +#define WRITE_EN2_NS_MASK 0x1 +#define WRITE_EN2_NS_MASK_SFT (0x1 << 4) +#define READ_EN1_NS_SFT 3 +#define READ_EN1_NS_MASK 0x1 +#define READ_EN1_NS_MASK_SFT (0x1 << 3) +#define WRITE_EN1_NS_SFT 2 +#define WRITE_EN1_NS_MASK 0x1 +#define WRITE_EN1_NS_MASK_SFT (0x1 << 2) +#define READ_EN0_NS_SFT 1 +#define READ_EN0_NS_MASK 0x1 +#define READ_EN0_NS_MASK_SFT (0x1 << 1) +#define WRITE_EN0_NS_SFT 0 +#define WRITE_EN0_NS_MASK 0x1 +#define WRITE_EN0_NS_MASK_SFT (0x1 << 0) + +/* AFE_SECURE_CON1 */ +#define READ_EN15_S_SFT 31 +#define READ_EN15_S_MASK 0x1 +#define READ_EN15_S_MASK_SFT (0x1 << 31) +#define WRITE_EN15_S_SFT 30 +#define WRITE_EN15_S_MASK 0x1 +#define WRITE_EN15_S_MASK_SFT (0x1 << 30) +#define READ_EN14_S_SFT 29 +#define READ_EN14_S_MASK 0x1 +#define READ_EN14_S_MASK_SFT (0x1 << 29) +#define WRITE_EN14_S_SFT 28 +#define WRITE_EN14_S_MASK 0x1 +#define WRITE_EN14_S_MASK_SFT (0x1 << 28) +#define READ_EN13_S_SFT 27 +#define READ_EN13_S_MASK 0x1 +#define READ_EN13_S_MASK_SFT (0x1 << 27) +#define WRITE_EN13_S_SFT 26 +#define WRITE_EN13_S_MASK 0x1 +#define WRITE_EN13_S_MASK_SFT (0x1 << 26) +#define READ_EN12_S_SFT 25 +#define READ_EN12_S_MASK 0x1 +#define READ_EN12_S_MASK_SFT (0x1 << 25) +#define WRITE_EN12_S_SFT 24 +#define WRITE_EN12_S_MASK 0x1 +#define WRITE_EN12_S_MASK_SFT (0x1 << 24) +#define READ_EN11_S_SFT 23 +#define READ_EN11_S_MASK 0x1 +#define READ_EN11_S_MASK_SFT (0x1 << 23) +#define WRITE_EN11_S_SFT 22 +#define WRITE_EN11_S_MASK 0x1 +#define WRITE_EN11_S_MASK_SFT (0x1 << 22) +#define READ_EN10_S_SFT 21 +#define READ_EN10_S_MASK 0x1 +#define READ_EN10_S_MASK_SFT (0x1 << 21) +#define WRITE_EN10_S_SFT 20 +#define WRITE_EN10_S_MASK 0x1 +#define WRITE_EN10_S_MASK_SFT (0x1 << 20) +#define READ_EN9_S_SFT 19 +#define READ_EN9_S_MASK 0x1 +#define READ_EN9_S_MASK_SFT (0x1 << 19) +#define WRITE_EN9_S_SFT 18 +#define WRITE_EN9_S_MASK 0x1 +#define WRITE_EN9_S_MASK_SFT (0x1 << 18) +#define READ_EN8_S_SFT 17 +#define READ_EN8_S_MASK 0x1 +#define READ_EN8_S_MASK_SFT (0x1 << 17) +#define WRITE_EN8_S_SFT 16 +#define WRITE_EN8_S_MASK 0x1 +#define WRITE_EN8_S_MASK_SFT (0x1 << 16) +#define READ_EN7_S_SFT 15 +#define READ_EN7_S_MASK 0x1 +#define READ_EN7_S_MASK_SFT (0x1 << 15) +#define WRITE_EN7_S_SFT 14 +#define WRITE_EN7_S_MASK 0x1 +#define WRITE_EN7_S_MASK_SFT (0x1 << 14) +#define READ_EN6_S_SFT 13 +#define READ_EN6_S_MASK 0x1 +#define READ_EN6_S_MASK_SFT (0x1 << 13) +#define WRITE_EN6_S_SFT 12 +#define WRITE_EN6_S_MASK 0x1 +#define WRITE_EN6_S_MASK_SFT (0x1 << 12) +#define READ_EN5_S_SFT 11 +#define READ_EN5_S_MASK 0x1 +#define READ_EN5_S_MASK_SFT (0x1 << 11) +#define WRITE_EN5_S_SFT 10 +#define WRITE_EN5_S_MASK 0x1 +#define WRITE_EN5_S_MASK_SFT (0x1 << 10) +#define READ_EN4_S_SFT 9 +#define READ_EN4_S_MASK 0x1 +#define READ_EN4_S_MASK_SFT (0x1 << 9) +#define WRITE_EN4_S_SFT 8 +#define WRITE_EN4_S_MASK 0x1 +#define WRITE_EN4_S_MASK_SFT (0x1 << 8) +#define READ_EN3_S_SFT 7 +#define READ_EN3_S_MASK 0x1 +#define READ_EN3_S_MASK_SFT (0x1 << 7) +#define WRITE_EN3_S_SFT 6 +#define WRITE_EN3_S_MASK 0x1 +#define WRITE_EN3_S_MASK_SFT (0x1 << 6) +#define READ_EN2_S_SFT 5 +#define READ_EN2_S_MASK 0x1 +#define READ_EN2_S_MASK_SFT (0x1 << 5) +#define WRITE_EN2_S_SFT 4 +#define WRITE_EN2_S_MASK 0x1 +#define WRITE_EN2_S_MASK_SFT (0x1 << 4) +#define READ_EN1_S_SFT 3 +#define READ_EN1_S_MASK 0x1 +#define READ_EN1_S_MASK_SFT (0x1 << 3) +#define WRITE_EN1_S_SFT 2 +#define WRITE_EN1_S_MASK 0x1 +#define WRITE_EN1_S_MASK_SFT (0x1 << 2) +#define READ_EN0_S_SFT 1 +#define READ_EN0_S_MASK 0x1 +#define READ_EN0_S_MASK_SFT (0x1 << 1) +#define WRITE_EN0_S_SFT 0 +#define WRITE_EN0_S_MASK 0x1 +#define WRITE_EN0_S_MASK_SFT (0x1 << 0) + +/* AFE_SE_SECURE_CON0 */ +#define AFE_HDMI_SE_SECURE_BIT_SFT 11 +#define AFE_HDMI_SE_SECURE_BIT_MASK 0x1 +#define AFE_HDMI_SE_SECURE_BIT_MASK_SFT (0x1 << 11) +#define AFE_SPDIF2_OUT_SE_SECURE_BIT_SFT 10 +#define AFE_SPDIF2_OUT_SE_SECURE_BIT_MASK 0x1 +#define AFE_SPDIF2_OUT_SE_SECURE_BIT_MASK_SFT (0x1 << 10) +#define AFE_SPDIF_OUT_SE_SECURE_BIT_SFT 9 +#define AFE_SPDIF_OUT_SE_SECURE_BIT_MASK 0x1 +#define AFE_SPDIF_OUT_SE_SECURE_BIT_MASK_SFT (0x1 << 9) +#define AFE_DL8_SE_SECURE_BIT_SFT 8 +#define AFE_DL8_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL8_SE_SECURE_BIT_MASK_SFT (0x1 << 8) +#define AFE_DL7_SE_SECURE_BIT_SFT 7 +#define AFE_DL7_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL7_SE_SECURE_BIT_MASK_SFT (0x1 << 7) +#define AFE_DL6_SE_SECURE_BIT_SFT 6 +#define AFE_DL6_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL6_SE_SECURE_BIT_MASK_SFT (0x1 << 6) +#define AFE_DL5_SE_SECURE_BIT_SFT 5 +#define AFE_DL5_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL5_SE_SECURE_BIT_MASK_SFT (0x1 << 5) +#define AFE_DL4_SE_SECURE_BIT_SFT 4 +#define AFE_DL4_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL4_SE_SECURE_BIT_MASK_SFT (0x1 << 4) +#define AFE_DL3_SE_SECURE_BIT_SFT 3 +#define AFE_DL3_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL3_SE_SECURE_BIT_MASK_SFT (0x1 << 3) +#define AFE_DL2_SE_SECURE_BIT_SFT 2 +#define AFE_DL2_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL2_SE_SECURE_BIT_MASK_SFT (0x1 << 2) +#define AFE_DL1_SE_SECURE_BIT_SFT 1 +#define AFE_DL1_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL1_SE_SECURE_BIT_MASK_SFT (0x1 << 1) +#define AFE_DL0_SE_SECURE_BIT_SFT 0 +#define AFE_DL0_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL0_SE_SECURE_BIT_MASK_SFT (0x1 << 0) + +/* AFE_SE_SECURE_CON1 */ +#define AFE_DL46_SE_SECURE_BIT_SFT 26 +#define AFE_DL46_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL46_SE_SECURE_BIT_MASK_SFT (0x1 << 26) +#define AFE_DL45_SE_SECURE_BIT_SFT 25 +#define AFE_DL45_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL45_SE_SECURE_BIT_MASK_SFT (0x1 << 25) +#define AFE_DL44_SE_SECURE_BIT_SFT 24 +#define AFE_DL44_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL44_SE_SECURE_BIT_MASK_SFT (0x1 << 24) +#define AFE_DL43_SE_SECURE_BIT_SFT 23 +#define AFE_DL43_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL43_SE_SECURE_BIT_MASK_SFT (0x1 << 23) +#define AFE_DL42_SE_SECURE_BIT_SFT 22 +#define AFE_DL42_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL42_SE_SECURE_BIT_MASK_SFT (0x1 << 22) +#define AFE_DL41_SE_SECURE_BIT_SFT 21 +#define AFE_DL41_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL41_SE_SECURE_BIT_MASK_SFT (0x1 << 21) +#define AFE_DL40_SE_SECURE_BIT_SFT 20 +#define AFE_DL40_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL40_SE_SECURE_BIT_MASK_SFT (0x1 << 20) +#define AFE_DL39_SE_SECURE_BIT_SFT 19 +#define AFE_DL39_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL39_SE_SECURE_BIT_MASK_SFT (0x1 << 19) +#define AFE_DL38_SE_SECURE_BIT_SFT 18 +#define AFE_DL38_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL38_SE_SECURE_BIT_MASK_SFT (0x1 << 18) +#define AFE_DL37_SE_SECURE_BIT_SFT 17 +#define AFE_DL37_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL37_SE_SECURE_BIT_MASK_SFT (0x1 << 17) +#define AFE_DL36_SE_SECURE_BIT_SFT 16 +#define AFE_DL36_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL36_SE_SECURE_BIT_MASK_SFT (0x1 << 16) +#define AFE_DL35_SE_SECURE_BIT_SFT 15 +#define AFE_DL35_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL35_SE_SECURE_BIT_MASK_SFT (0x1 << 15) +#define AFE_DL34_SE_SECURE_BIT_SFT 14 +#define AFE_DL34_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL34_SE_SECURE_BIT_MASK_SFT (0x1 << 14) +#define AFE_DL33_SE_SECURE_BIT_SFT 13 +#define AFE_DL33_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL33_SE_SECURE_BIT_MASK_SFT (0x1 << 13) +#define AFE_DL32_SE_SECURE_BIT_SFT 12 +#define AFE_DL32_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL32_SE_SECURE_BIT_MASK_SFT (0x1 << 12) +#define AFE_DL31_SE_SECURE_BIT_SFT 11 +#define AFE_DL31_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL31_SE_SECURE_BIT_MASK_SFT (0x1 << 11) +#define AFE_DL30_SE_SECURE_BIT_SFT 10 +#define AFE_DL30_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL30_SE_SECURE_BIT_MASK_SFT (0x1 << 10) +#define AFE_DL29_SE_SECURE_BIT_SFT 9 +#define AFE_DL29_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL29_SE_SECURE_BIT_MASK_SFT (0x1 << 9) +#define AFE_DL28_SE_SECURE_BIT_SFT 8 +#define AFE_DL28_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL28_SE_SECURE_BIT_MASK_SFT (0x1 << 8) +#define AFE_DL27_SE_SECURE_BIT_SFT 7 +#define AFE_DL27_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL27_SE_SECURE_BIT_MASK_SFT (0x1 << 7) +#define AFE_DL26_SE_SECURE_BIT_SFT 6 +#define AFE_DL26_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL26_SE_SECURE_BIT_MASK_SFT (0x1 << 6) +#define AFE_DL25_SE_SECURE_BIT_SFT 5 +#define AFE_DL25_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL25_SE_SECURE_BIT_MASK_SFT (0x1 << 5) +#define AFE_DL24_SE_SECURE_BIT_SFT 4 +#define AFE_DL24_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL24_SE_SECURE_BIT_MASK_SFT (0x1 << 4) +#define AFE_DL23_SE_SECURE_BIT_SFT 3 +#define AFE_DL23_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL23_SE_SECURE_BIT_MASK_SFT (0x1 << 3) +#define AFE_DL_48CH_SE_SECURE_BIT_SFT 2 +#define AFE_DL_48CH_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL_48CH_SE_SECURE_BIT_MASK_SFT (0x1 << 2) +#define AFE_DL_24CH_SE_SECURE_BIT_SFT 1 +#define AFE_DL_24CH_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL_24CH_SE_SECURE_BIT_MASK_SFT (0x1 << 1) +#define AFE_DL_4CH_SE_SECURE_BIT_SFT 0 +#define AFE_DL_4CH_SE_SECURE_BIT_MASK 0x1 +#define AFE_DL_4CH_SE_SECURE_BIT_MASK_SFT (0x1 << 0) + +/* AFE_SE_SECURE_CON2 */ +#define AFE_VUL38_SE_SECURE_BIT_SFT 28 +#define AFE_VUL38_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL38_SE_SECURE_BIT_MASK_SFT (0x1 << 28) +#define AFE_VUL37_SE_SECURE_BIT_SFT 27 +#define AFE_VUL37_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL37_SE_SECURE_BIT_MASK_SFT (0x1 << 27) +#define AFE_VUL36_SE_SECURE_BIT_SFT 26 +#define AFE_VUL36_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL36_SE_SECURE_BIT_MASK_SFT (0x1 << 26) +#define AFE_VUL35_SE_SECURE_BIT_SFT 25 +#define AFE_VUL35_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL35_SE_SECURE_BIT_MASK_SFT (0x1 << 25) +#define AFE_VUL34_SE_SECURE_BIT_SFT 24 +#define AFE_VUL34_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL34_SE_SECURE_BIT_MASK_SFT (0x1 << 24) +#define AFE_VUL33_SE_SECURE_BIT_SFT 23 +#define AFE_VUL33_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL33_SE_SECURE_BIT_MASK_SFT (0x1 << 23) +#define AFE_VUL32_SE_SECURE_BIT_SFT 22 +#define AFE_VUL32_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL32_SE_SECURE_BIT_MASK_SFT (0x1 << 22) +#define AFE_VUL31_SE_SECURE_BIT_SFT 21 +#define AFE_VUL31_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL31_SE_SECURE_BIT_MASK_SFT (0x1 << 21) +#define AFE_VUL30_SE_SECURE_BIT_SFT 20 +#define AFE_VUL30_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL30_SE_SECURE_BIT_MASK_SFT (0x1 << 20) +#define AFE_VUL29_SE_SECURE_BIT_SFT 19 +#define AFE_VUL29_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL29_SE_SECURE_BIT_MASK_SFT (0x1 << 19) +#define AFE_VUL28_SE_SECURE_BIT_SFT 18 +#define AFE_VUL28_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL28_SE_SECURE_BIT_MASK_SFT (0x1 << 18) +#define AFE_VUL27_SE_SECURE_BIT_SFT 17 +#define AFE_VUL27_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL27_SE_SECURE_BIT_MASK_SFT (0x1 << 17) +#define AFE_VUL26_SE_SECURE_BIT_SFT 16 +#define AFE_VUL26_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL26_SE_SECURE_BIT_MASK_SFT (0x1 << 16) +#define AFE_VUL25_SE_SECURE_BIT_SFT 15 +#define AFE_VUL25_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL25_SE_SECURE_BIT_MASK_SFT (0x1 << 15) +#define AFE_VUL24_SE_SECURE_BIT_SFT 14 +#define AFE_VUL24_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL24_SE_SECURE_BIT_MASK_SFT (0x1 << 14) +#define AFE_VUL_CM2_SE_SECURE_BIT_SFT 13 +#define AFE_VUL_CM2_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL_CM2_SE_SECURE_BIT_MASK_SFT (0x1 << 13) +#define AFE_VUL_CM1_SE_SECURE_BIT_SFT 12 +#define AFE_VUL_CM1_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL_CM1_SE_SECURE_BIT_MASK_SFT (0x1 << 12) +#define AFE_VUL_CM0_SE_SECURE_BIT_SFT 11 +#define AFE_VUL_CM0_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL_CM0_SE_SECURE_BIT_MASK_SFT (0x1 << 11) +#define AFE_VUL10_SE_SECURE_BIT_SFT 10 +#define AFE_VUL10_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL10_SE_SECURE_BIT_MASK_SFT (0x1 << 10) +#define AFE_VUL9_SE_SECURE_BIT_SFT 9 +#define AFE_VUL9_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL9_SE_SECURE_BIT_MASK_SFT (0x1 << 9) +#define AFE_VUL8_SE_SECURE_BIT_SFT 8 +#define AFE_VUL8_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL8_SE_SECURE_BIT_MASK_SFT (0x1 << 8) +#define AFE_VUL7_SE_SECURE_BIT_SFT 7 +#define AFE_VUL7_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL7_SE_SECURE_BIT_MASK_SFT (0x1 << 7) +#define AFE_VUL6_SE_SECURE_BIT_SFT 6 +#define AFE_VUL6_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL6_SE_SECURE_BIT_MASK_SFT (0x1 << 6) +#define AFE_VUL5_SE_SECURE_BIT_SFT 5 +#define AFE_VUL5_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL5_SE_SECURE_BIT_MASK_SFT (0x1 << 5) +#define AFE_VUL4_SE_SECURE_BIT_SFT 4 +#define AFE_VUL4_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL4_SE_SECURE_BIT_MASK_SFT (0x1 << 4) +#define AFE_VUL3_SE_SECURE_BIT_SFT 3 +#define AFE_VUL3_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL3_SE_SECURE_BIT_MASK_SFT (0x1 << 3) +#define AFE_VUL2_SE_SECURE_BIT_SFT 2 +#define AFE_VUL2_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL2_SE_SECURE_BIT_MASK_SFT (0x1 << 2) +#define AFE_VUL1_SE_SECURE_BIT_SFT 1 +#define AFE_VUL1_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL1_SE_SECURE_BIT_MASK_SFT (0x1 << 1) +#define AFE_VUL0_SE_SECURE_BIT_SFT 0 +#define AFE_VUL0_SE_SECURE_BIT_MASK 0x1 +#define AFE_VUL0_SE_SECURE_BIT_MASK_SFT (0x1 << 0) + +/* AFE_SE_SECURE_CON3 */ +#define AFE_SPDIFIN_SE_SECURE_BIT_SFT 10 +#define AFE_SPDIFIN_SE_SECURE_BIT_MASK 0x1 +#define AFE_SPDIFIN_SE_SECURE_BIT_MASK_SFT (0x1 << 10) +#define AFE_TDM_IN_SE_SECURE_BIT_SFT 9 +#define AFE_TDM_IN_SE_SECURE_BIT_MASK 0x1 +#define AFE_TDM_IN_SE_SECURE_BIT_MASK_SFT (0x1 << 9) +#define AFE_MPHONE_EARC_SE_SECURE_BIT_SFT 8 +#define AFE_MPHONE_EARC_SE_SECURE_BIT_MASK 0x1 +#define AFE_MPHONE_EARC_SE_SECURE_BIT_MASK_SFT (0x1 << 8) +#define AFE_MPHONE_SPDIF_SE_SECURE_BIT_SFT 7 +#define AFE_MPHONE_SPDIF_SE_SECURE_BIT_MASK 0x1 +#define AFE_MPHONE_SPDIF_SE_SECURE_BIT_MASK_SFT (0x1 << 7) +#define AFE_ETDM_IN6_SE_SECURE_BIT_SFT 6 +#define AFE_ETDM_IN6_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN6_SE_SECURE_BIT_MASK_SFT (0x1 << 6) +#define AFE_ETDM_IN5_SE_SECURE_BIT_SFT 5 +#define AFE_ETDM_IN5_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN5_SE_SECURE_BIT_MASK_SFT (0x1 << 5) +#define AFE_ETDM_IN4_SE_SECURE_BIT_SFT 4 +#define AFE_ETDM_IN4_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN4_SE_SECURE_BIT_MASK_SFT (0x1 << 4) +#define AFE_ETDM_IN3_SE_SECURE_BIT_SFT 3 +#define AFE_ETDM_IN3_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN3_SE_SECURE_BIT_MASK_SFT (0x1 << 3) +#define AFE_ETDM_IN2_SE_SECURE_BIT_SFT 2 +#define AFE_ETDM_IN2_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN2_SE_SECURE_BIT_MASK_SFT (0x1 << 2) +#define AFE_ETDM_IN1_SE_SECURE_BIT_SFT 1 +#define AFE_ETDM_IN1_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN1_SE_SECURE_BIT_MASK_SFT (0x1 << 1) +#define AFE_ETDM_IN0_SE_SECURE_BIT_SFT 0 +#define AFE_ETDM_IN0_SE_SECURE_BIT_MASK 0x1 +#define AFE_ETDM_IN0_SE_SECURE_BIT_MASK_SFT (0x1 << 0) + +/* AFE_SE_PROT_SIDEBAND0 */ +#define HDMI_HPROT_SFT 11 +#define HDMI_HPROT_MASK 0x1 +#define HDMI_HPROT_MASK_SFT (0x1 << 11) +#define SPDIF2_OUT_HPROT_SFT 10 +#define SPDIF2_OUT_HPROT_MASK 0x1 +#define SPDIF2_OUT_HPROT_MASK_SFT (0x1 << 10) +#define SPDIF_OUT_HPROT_SFT 9 +#define SPDIF_OUT_HPROT_MASK 0x1 +#define SPDIF_OUT_HPROT_MASK_SFT (0x1 << 9) +#define DL8_HPROT_SFT 8 +#define DL8_HPROT_MASK 0x1 +#define DL8_HPROT_MASK_SFT (0x1 << 8) +#define DL7_HPROT_SFT 7 +#define DL7_HPROT_MASK 0x1 +#define DL7_HPROT_MASK_SFT (0x1 << 7) +#define DL6_HPROT_SFT 6 +#define DL6_HPROT_MASK 0x1 +#define DL6_HPROT_MASK_SFT (0x1 << 6) +#define DL5_HPROT_SFT 5 +#define DL5_HPROT_MASK 0x1 +#define DL5_HPROT_MASK_SFT (0x1 << 5) +#define DL4_HPROT_SFT 4 +#define DL4_HPROT_MASK 0x1 +#define DL4_HPROT_MASK_SFT (0x1 << 4) +#define DL3_HPROT_SFT 3 +#define DL3_HPROT_MASK 0x1 +#define DL3_HPROT_MASK_SFT (0x1 << 3) +#define DL2_HPROT_SFT 2 +#define DL2_HPROT_MASK 0x1 +#define DL2_HPROT_MASK_SFT (0x1 << 2) +#define DL1_HPROT_SFT 1 +#define DL1_HPROT_MASK 0x1 +#define DL1_HPROT_MASK_SFT (0x1 << 1) +#define DL0_HPROT_SFT 0 +#define DL0_HPROT_MASK 0x1 +#define DL0_HPROT_MASK_SFT (0x1 << 0) + +/* AFE_SE_PROT_SIDEBAND1 */ +#define DL46_HPROT_SFT 26 +#define DL46_HPROT_MASK 0x1 +#define DL46_HPROT_MASK_SFT (0x1 << 26) +#define DL45_HPROT_SFT 25 +#define DL45_HPROT_MASK 0x1 +#define DL45_HPROT_MASK_SFT (0x1 << 25) +#define DL44_HPROT_SFT 24 +#define DL44_HPROT_MASK 0x1 +#define DL44_HPROT_MASK_SFT (0x1 << 24) +#define DL43_HPROT_SFT 23 +#define DL43_HPROT_MASK 0x1 +#define DL43_HPROT_MASK_SFT (0x1 << 23) +#define DL42_HPROT_SFT 22 +#define DL42_HPROT_MASK 0x1 +#define DL42_HPROT_MASK_SFT (0x1 << 22) +#define DL41_HPROT_SFT 21 +#define DL41_HPROT_MASK 0x1 +#define DL41_HPROT_MASK_SFT (0x1 << 21) +#define DL40_HPROT_SFT 20 +#define DL40_HPROT_MASK 0x1 +#define DL40_HPROT_MASK_SFT (0x1 << 20) +#define DL39_HPROT_SFT 19 +#define DL39_HPROT_MASK 0x1 +#define DL39_HPROT_MASK_SFT (0x1 << 19) +#define DL38_HPROT_SFT 18 +#define DL38_HPROT_MASK 0x1 +#define DL38_HPROT_MASK_SFT (0x1 << 18) +#define DL37_HPROT_SFT 17 +#define DL37_HPROT_MASK 0x1 +#define DL37_HPROT_MASK_SFT (0x1 << 17) +#define DL36_HPROT_SFT 16 +#define DL36_HPROT_MASK 0x1 +#define DL36_HPROT_MASK_SFT (0x1 << 16) +#define DL35_HPROT_SFT 15 +#define DL35_HPROT_MASK 0x1 +#define DL35_HPROT_MASK_SFT (0x1 << 15) +#define DL34_HPROT_SFT 14 +#define DL34_HPROT_MASK 0x1 +#define DL34_HPROT_MASK_SFT (0x1 << 14) +#define DL33_HPROT_SFT 13 +#define DL33_HPROT_MASK 0x1 +#define DL33_HPROT_MASK_SFT (0x1 << 13) +#define DL32_HPROT_SFT 12 +#define DL32_HPROT_MASK 0x1 +#define DL32_HPROT_MASK_SFT (0x1 << 12) +#define DL31_HPROT_SFT 11 +#define DL31_HPROT_MASK 0x1 +#define DL31_HPROT_MASK_SFT (0x1 << 11) +#define DL30_HPROT_SFT 10 +#define DL30_HPROT_MASK 0x1 +#define DL30_HPROT_MASK_SFT (0x1 << 10) +#define DL29_HPROT_SFT 9 +#define DL29_HPROT_MASK 0x1 +#define DL29_HPROT_MASK_SFT (0x1 << 9) +#define DL28_HPROT_SFT 8 +#define DL28_HPROT_MASK 0x1 +#define DL28_HPROT_MASK_SFT (0x1 << 8) +#define DL27_HPROT_SFT 7 +#define DL27_HPROT_MASK 0x1 +#define DL27_HPROT_MASK_SFT (0x1 << 7) +#define DL26_HPROT_SFT 6 +#define DL26_HPROT_MASK 0x1 +#define DL26_HPROT_MASK_SFT (0x1 << 6) +#define DL25_HPROT_SFT 5 +#define DL25_HPROT_MASK 0x1 +#define DL25_HPROT_MASK_SFT (0x1 << 5) +#define DL24_HPROT_SFT 4 +#define DL24_HPROT_MASK 0x1 +#define DL24_HPROT_MASK_SFT (0x1 << 4) +#define DL23_HPROT_SFT 3 +#define DL23_HPROT_MASK 0x1 +#define DL23_HPROT_MASK_SFT (0x1 << 3) +#define DL_48CH_PROT_SFT 2 +#define DL_48CH_PROT_MASK 0x1 +#define DL_48CH_PROT_MASK_SFT (0x1 << 2) +#define DL_24CH_PROT_SFT 1 +#define DL_24CH_PROT_MASK 0x1 +#define DL_24CH_PROT_MASK_SFT (0x1 << 1) +#define DL_4CH_PROT_SFT 0 +#define DL_4CH_PROT_MASK 0x1 +#define DL_4CH_PROT_MASK_SFT (0x1 << 0) + +/* AFE_SE_PROT_SIDEBAND2 */ +#define VUL38_HPROT_SFT 28 +#define VUL38_HPROT_MASK 0x1 +#define VUL38_HPROT_MASK_SFT (0x1 << 28) +#define VUL37_HPROT_SFT 27 +#define VUL37_HPROT_MASK 0x1 +#define VUL37_HPROT_MASK_SFT (0x1 << 27) +#define VUL36_HPROT_SFT 26 +#define VUL36_HPROT_MASK 0x1 +#define VUL36_HPROT_MASK_SFT (0x1 << 26) +#define VUL35_HPROT_SFT 25 +#define VUL35_HPROT_MASK 0x1 +#define VUL35_HPROT_MASK_SFT (0x1 << 25) +#define VUL34_HPROT_SFT 24 +#define VUL34_HPROT_MASK 0x1 +#define VUL34_HPROT_MASK_SFT (0x1 << 24) +#define VUL33_HPROT_SFT 23 +#define VUL33_HPROT_MASK 0x1 +#define VUL33_HPROT_MASK_SFT (0x1 << 23) +#define VUL32_HPROT_SFT 22 +#define VUL32_HPROT_MASK 0x1 +#define VUL32_HPROT_MASK_SFT (0x1 << 22) +#define VUL31_HPROT_SFT 21 +#define VUL31_HPROT_MASK 0x1 +#define VUL31_HPROT_MASK_SFT (0x1 << 21) +#define VUL30_HPROT_SFT 20 +#define VUL30_HPROT_MASK 0x1 +#define VUL30_HPROT_MASK_SFT (0x1 << 20) +#define VUL29_HPROT_SFT 19 +#define VUL29_HPROT_MASK 0x1 +#define VUL29_HPROT_MASK_SFT (0x1 << 19) +#define VUL28_HPROT_SFT 18 +#define VUL28_HPROT_MASK 0x1 +#define VUL28_HPROT_MASK_SFT (0x1 << 18) +#define VUL27_HPROT_SFT 17 +#define VUL27_HPROT_MASK 0x1 +#define VUL27_HPROT_MASK_SFT (0x1 << 17) +#define VUL26_HPROT_SFT 16 +#define VUL26_HPROT_MASK 0x1 +#define VUL26_HPROT_MASK_SFT (0x1 << 16) +#define VUL25_HPROT_SFT 15 +#define VUL25_HPROT_MASK 0x1 +#define VUL25_HPROT_MASK_SFT (0x1 << 15) +#define VUL24_HPROT_SFT 14 +#define VUL24_HPROT_MASK 0x1 +#define VUL24_HPROT_MASK_SFT (0x1 << 14) +#define VUL_CM2_HPROT_SFT 13 +#define VUL_CM2_HPROT_MASK 0x1 +#define VUL_CM2_HPROT_MASK_SFT (0x1 << 13) +#define VUL_CM1_HPROT_SFT 12 +#define VUL_CM1_HPROT_MASK 0x1 +#define VUL_CM1_HPROT_MASK_SFT (0x1 << 12) +#define VUL_CM0_HPROT_SFT 11 +#define VUL_CM0_HPROT_MASK 0x1 +#define VUL_CM0_HPROT_MASK_SFT (0x1 << 11) +#define VUL10_HPROT_SFT 10 +#define VUL10_HPROT_MASK 0x1 +#define VUL10_HPROT_MASK_SFT (0x1 << 10) +#define VUL9_HPROT_SFT 9 +#define VUL9_HPROT_MASK 0x1 +#define VUL9_HPROT_MASK_SFT (0x1 << 9) +#define VUL8_HPROT_SFT 8 +#define VUL8_HPROT_MASK 0x1 +#define VUL8_HPROT_MASK_SFT (0x1 << 8) +#define VUL7_HPROT_SFT 7 +#define VUL7_HPROT_MASK 0x1 +#define VUL7_HPROT_MASK_SFT (0x1 << 7) +#define VUL6_HPROT_SFT 6 +#define VUL6_HPROT_MASK 0x1 +#define VUL6_HPROT_MASK_SFT (0x1 << 6) +#define VUL5_HPROT_SFT 5 +#define VUL5_HPROT_MASK 0x1 +#define VUL5_HPROT_MASK_SFT (0x1 << 5) +#define VUL4_HPROT_SFT 4 +#define VUL4_HPROT_MASK 0x1 +#define VUL4_HPROT_MASK_SFT (0x1 << 4) +#define VUL3_HPROT_SFT 3 +#define VUL3_HPROT_MASK 0x1 +#define VUL3_HPROT_MASK_SFT (0x1 << 3) +#define VUL2_HPROT_SFT 2 +#define VUL2_HPROT_MASK 0x1 +#define VUL2_HPROT_MASK_SFT (0x1 << 2) +#define VUL1_HPROT_SFT 1 +#define VUL1_HPROT_MASK 0x1 +#define VUL1_HPROT_MASK_SFT (0x1 << 1) +#define VUL0_HPROT_SFT 0 +#define VUL0_HPROT_MASK 0x1 +#define VUL0_HPROT_MASK_SFT (0x1 << 0) + +/* AFE_SE_PROT_SIDEBAND3 */ +#define MPHONE_EARC_HPROT_SFT 10 +#define MPHONE_EARC_HPROT_MASK 0x1 +#define MPHONE_EARC_HPROT_MASK_SFT (0x1 << 10) +#define MPHONE_SPDIF_HPROT_SFT 9 +#define MPHONE_SPDIF_HPROT_MASK 0x1 +#define MPHONE_SPDIF_HPROT_MASK_SFT (0x1 << 9) +#define SPDIFIN_HPROT_SFT 8 +#define SPDIFIN_HPROT_MASK 0x1 +#define SPDIFIN_HPROT_MASK_SFT (0x1 << 8) +#define TDMIN_HPROT_SFT 7 +#define TDMIN_HPROT_MASK 0x1 +#define TDMIN_HPROT_MASK_SFT (0x1 << 7) +#define ETDM_IN6_HPROT_SFT 6 +#define ETDM_IN6_HPROT_MASK 0x1 +#define ETDM_IN6_HPROT_MASK_SFT (0x1 << 6) +#define ETDM_IN5_HPROT_SFT 5 +#define ETDM_IN5_HPROT_MASK 0x1 +#define ETDM_IN5_HPROT_MASK_SFT (0x1 << 5) +#define ETDM_IN4_HPROT_SFT 4 +#define ETDM_IN4_HPROT_MASK 0x1 +#define ETDM_IN4_HPROT_MASK_SFT (0x1 << 4) +#define ETDM_IN3_HPROT_SFT 3 +#define ETDM_IN3_HPROT_MASK 0x1 +#define ETDM_IN3_HPROT_MASK_SFT (0x1 << 3) +#define ETDM_IN2_HPROT_SFT 2 +#define ETDM_IN2_HPROT_MASK 0x1 +#define ETDM_IN2_HPROT_MASK_SFT (0x1 << 2) +#define ETDM_IN1_HPROT_SFT 1 +#define ETDM_IN1_HPROT_MASK 0x1 +#define ETDM_IN1_HPROT_MASK_SFT (0x1 << 1) +#define ETDM_IN0_HPROT_SFT 0 +#define ETDM_IN0_HPROT_MASK 0x1 +#define ETDM_IN0_HPROT_MASK_SFT (0x1 << 0) + +/* AFE_SE_DOMAIN_SIDEBAND0 */ +#define DL7_HDOMAIN_SFT 28 +#define DL7_HDOMAIN_MASK 0xf +#define DL7_HDOMAIN_MASK_SFT (0xf << 28) +#define DL6_HDOMAIN_SFT 24 +#define DL6_HDOMAIN_MASK 0xf +#define DL6_HDOMAIN_MASK_SFT (0xf << 24) +#define DL5_HDOMAIN_SFT 20 +#define DL5_HDOMAIN_MASK 0xf +#define DL5_HDOMAIN_MASK_SFT (0xf << 20) +#define DL4_HDOMAIN_SFT 16 +#define DL4_HDOMAIN_MASK 0xf +#define DL4_HDOMAIN_MASK_SFT (0xf << 16) +#define DL3_HDOMAIN_SFT 12 +#define DL3_HDOMAIN_MASK 0xf +#define DL3_HDOMAIN_MASK_SFT (0xf << 12) +#define DL2_HDOMAIN_SFT 8 +#define DL2_HDOMAIN_MASK 0xf +#define DL2_HDOMAIN_MASK_SFT (0xf << 8) +#define DL1_HDOMAIN_SFT 4 +#define DL1_HDOMAIN_MASK 0xf +#define DL1_HDOMAIN_MASK_SFT (0xf << 4) +#define DL0_HDOMAIN_SFT 0 +#define DL0_HDOMAIN_MASK 0xf +#define DL0_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND1 */ +#define DL_48CH_HDOMAIN_SFT 24 +#define DL_48CH_HDOMAIN_MASK 0xf +#define DL_48CH_HDOMAIN_MASK_SFT (0xf << 24) +#define DL_24CH_HDOMAIN_SFT 20 +#define DL_24CH_HDOMAIN_MASK 0xf +#define DL_24CH_HDOMAIN_MASK_SFT (0xf << 20) +#define DL_4CH_HDOMAIN_SFT 16 +#define DL_4CH_HDOMAIN_MASK 0xf +#define DL_4CH_HDOMAIN_MASK_SFT (0xf << 16) +#define HDMI_HDOMAIN_SFT 12 +#define HDMI_HDOMAIN_MASK 0xf +#define HDMI_HDOMAIN_MASK_SFT (0xf << 12) +#define SPDIF2_OUT_HDOMAIN_SFT 8 +#define SPDIF2_OUT_HDOMAIN_MASK 0xf +#define SPDIF2_OUT_HDOMAIN_MASK_SFT (0xf << 8) +#define SPDIF_OUT_HDOMAIN_SFT 4 +#define SPDIF_OUT_HDOMAIN_MASK 0xf +#define SPDIF_OUT_HDOMAIN_MASK_SFT (0xf << 4) +#define DL8_HDOMAIN_SFT 0 +#define DL8_HDOMAIN_MASK 0xf +#define DL8_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND2 */ +#define DL30_HDOMAIN_SFT 28 +#define DL30_HDOMAIN_MASK 0xf +#define DL30_HDOMAIN_MASK_SFT (0xf << 28) +#define DL29_HDOMAIN_SFT 24 +#define DL29_HDOMAIN_MASK 0xf +#define DL29_HDOMAIN_MASK_SFT (0xf << 24) +#define DL28_HDOMAIN_SFT 20 +#define DL28_HDOMAIN_MASK 0xf +#define DL28_HDOMAIN_MASK_SFT (0xf << 20) +#define DL27_HDOMAIN_SFT 16 +#define DL27_HDOMAIN_MASK 0xf +#define DL27_HDOMAIN_MASK_SFT (0xf << 16) +#define DL26_HDOMAIN_SFT 12 +#define DL26_HDOMAIN_MASK 0xf +#define DL26_HDOMAIN_MASK_SFT (0xf << 12) +#define DL25_HDOMAIN_SFT 8 +#define DL25_HDOMAIN_MASK 0xf +#define DL25_HDOMAIN_MASK_SFT (0xf << 8) +#define DL24_HDOMAIN_SFT 4 +#define DL24_HDOMAIN_MASK 0xf +#define DL24_HDOMAIN_MASK_SFT (0xf << 4) +#define DL23_HDOMAIN_SFT 0 +#define DL23_HDOMAIN_MASK 0xf +#define DL23_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND3 */ +#define DL38_HDOMAIN_SFT 28 +#define DL38_HDOMAIN_MASK 0xf +#define DL38_HDOMAIN_MASK_SFT (0xf << 28) +#define DL37_HDOMAIN_SFT 24 +#define DL37_HDOMAIN_MASK 0xf +#define DL37_HDOMAIN_MASK_SFT (0xf << 24) +#define DL36_HDOMAIN_SFT 20 +#define DL36_HDOMAIN_MASK 0xf +#define DL36_HDOMAIN_MASK_SFT (0xf << 20) +#define DL35_HDOMAIN_SFT 16 +#define DL35_HDOMAIN_MASK 0xf +#define DL35_HDOMAIN_MASK_SFT (0xf << 16) +#define DL34_HDOMAIN_SFT 12 +#define DL34_HDOMAIN_MASK 0xf +#define DL34_HDOMAIN_MASK_SFT (0xf << 12) +#define DL33_HDOMAIN_SFT 8 +#define DL33_HDOMAIN_MASK 0xf +#define DL33_HDOMAIN_MASK_SFT (0xf << 8) +#define DL32_HDOMAIN_SFT 4 +#define DL32_HDOMAIN_MASK 0xf +#define DL32_HDOMAIN_MASK_SFT (0xf << 4) +#define DL31_HDOMAIN_SFT 0 +#define DL31_HDOMAIN_MASK 0xf +#define DL31_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND4 */ +#define DL46_HDOMAIN_SFT 28 +#define DL46_HDOMAIN_MASK 0xf +#define DL46_HDOMAIN_MASK_SFT (0xf << 28) +#define DL45_HDOMAIN_SFT 24 +#define DL45_HDOMAIN_MASK 0xf +#define DL45_HDOMAIN_MASK_SFT (0xf << 24) +#define DL44_HDOMAIN_SFT 20 +#define DL44_HDOMAIN_MASK 0xf +#define DL44_HDOMAIN_MASK_SFT (0xf << 20) +#define DL43_HDOMAIN_SFT 16 +#define DL43_HDOMAIN_MASK 0xf +#define DL43_HDOMAIN_MASK_SFT (0xf << 16) +#define DL42_HDOMAIN_SFT 12 +#define DL42_HDOMAIN_MASK 0xf +#define DL42_HDOMAIN_MASK_SFT (0xf << 12) +#define DL41_HDOMAIN_SFT 8 +#define DL41_HDOMAIN_MASK 0xf +#define DL41_HDOMAIN_MASK_SFT (0xf << 8) +#define DL40_HDOMAIN_SFT 4 +#define DL40_HDOMAIN_MASK 0xf +#define DL40_HDOMAIN_MASK_SFT (0xf << 4) +#define DL39_HDOMAIN_SFT 0 +#define DL39_HDOMAIN_MASK 0xf +#define DL39_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND5 */ +#define VUL7_HDOMAIN_SFT 28 +#define VUL7_HDOMAIN_MASK 0xf +#define VUL7_HDOMAIN_MASK_SFT (0xf << 28) +#define VUL6_HDOMAIN_SFT 24 +#define VUL6_HDOMAIN_MASK 0xf +#define VUL6_HDOMAIN_MASK_SFT (0xf << 24) +#define VUL5_HDOMAIN_SFT 20 +#define VUL5_HDOMAIN_MASK 0xf +#define VUL5_HDOMAIN_MASK_SFT (0xf << 20) +#define VUL4_HDOMAIN_SFT 16 +#define VUL4_HDOMAIN_MASK 0xf +#define VUL4_HDOMAIN_MASK_SFT (0xf << 16) +#define VUL3_HDOMAIN_SFT 12 +#define VUL3_HDOMAIN_MASK 0xf +#define VUL3_HDOMAIN_MASK_SFT (0xf << 12) +#define VUL2_HDOMAIN_SFT 8 +#define VUL2_HDOMAIN_MASK 0xf +#define VUL2_HDOMAIN_MASK_SFT (0xf << 8) +#define VUL1_HDOMAIN_SFT 4 +#define VUL1_HDOMAIN_MASK 0xf +#define VUL1_HDOMAIN_MASK_SFT (0xf << 4) +#define VUL0_HDOMAIN_SFT 0 +#define VUL0_HDOMAIN_MASK 0xf +#define VUL0_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND6 */ +#define VU25_HDOMAIN_SFT 28 +#define VU25_HDOMAIN_MASK 0xf +#define VU25_HDOMAIN_MASK_SFT (0xf << 28) +#define VUL24_HDOMAIN_SFT 24 +#define VUL24_HDOMAIN_MASK 0xf +#define VUL24_HDOMAIN_MASK_SFT (0xf << 24) +#define VUL_CM2_HDOMAIN_SFT 20 +#define VUL_CM2_HDOMAIN_MASK 0xf +#define VUL_CM2_HDOMAIN_MASK_SFT (0xf << 20) +#define VUL_CM1_HDOMAIN_SFT 16 +#define VUL_CM1_HDOMAIN_MASK 0xf +#define VUL_CM1_HDOMAIN_MASK_SFT (0xf << 16) +#define VUL_CM0_HDOMAIN_SFT 12 +#define VUL_CM0_HDOMAIN_MASK 0xf +#define VUL_CM0_HDOMAIN_MASK_SFT (0xf << 12) +#define VUL10_HDOMAIN_SFT 8 +#define VUL10_HDOMAIN_MASK 0xf +#define VUL10_HDOMAIN_MASK_SFT (0xf << 8) +#define VUL9_HDOMAIN_SFT 4 +#define VUL9_HDOMAIN_MASK 0xf +#define VUL9_HDOMAIN_MASK_SFT (0xf << 4) +#define VUL8_HDOMAIN_SFT 0 +#define VUL8_HDOMAIN_MASK 0xf +#define VUL8_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND7 */ +#define VUL33_HDOMAIN_SFT 28 +#define VUL33_HDOMAIN_MASK 0xf +#define VUL33_HDOMAIN_MASK_SFT (0xf << 28) +#define VUL32_HDOMAIN_SFT 24 +#define VUL32_HDOMAIN_MASK 0xf +#define VUL32_HDOMAIN_MASK_SFT (0xf << 24) +#define VUL31_HDOMAIN_SFT 20 +#define VUL31_HDOMAIN_MASK 0xf +#define VUL31_HDOMAIN_MASK_SFT (0xf << 20) +#define VUL30_HDOMAIN_SFT 16 +#define VUL30_HDOMAIN_MASK 0xf +#define VUL30_HDOMAIN_MASK_SFT (0xf << 16) +#define VUL29_HDOMAIN_SFT 12 +#define VUL29_HDOMAIN_MASK 0xf +#define VUL29_HDOMAIN_MASK_SFT (0xf << 12) +#define VUL28_HDOMAIN_SFT 8 +#define VUL28_HDOMAIN_MASK 0xf +#define VUL28_HDOMAIN_MASK_SFT (0xf << 8) +#define VUL27_HDOMAIN_SFT 4 +#define VUL27_HDOMAIN_MASK 0xf +#define VUL27_HDOMAIN_MASK_SFT (0xf << 4) +#define VUL26_HDOMAIN_SFT 0 +#define VUL26_HDOMAIN_MASK 0xf +#define VUL26_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND8 */ +#define ETDM_IN2_HDOMAIN_SFT 28 +#define ETDM_IN2_HDOMAIN_MASK 0xf +#define ETDM_IN2_HDOMAIN_MASK_SFT (0xf << 28) +#define ETDM_IN1_HDOMAIN_SFT 24 +#define ETDM_IN1_HDOMAIN_MASK 0xf +#define ETDM_IN1_HDOMAIN_MASK_SFT (0xf << 24) +#define ETDM_IN0_HDOMAIN_SFT 20 +#define ETDM_IN0_HDOMAIN_MASK 0xf +#define ETDM_IN0_HDOMAIN_MASK_SFT (0xf << 20) +#define VUL38_HDOMAIN_SFT 16 +#define VUL38_HDOMAIN_MASK 0xf +#define VUL38_HDOMAIN_MASK_SFT (0xf << 16) +#define VUL37_HDOMAIN_SFT 12 +#define VUL37_HDOMAIN_MASK 0xf +#define VUL37_HDOMAIN_MASK_SFT (0xf << 12) +#define VUL36_HDOMAIN_SFT 8 +#define VUL36_HDOMAIN_MASK 0xf +#define VUL36_HDOMAIN_MASK_SFT (0xf << 8) +#define VUL35_HDOMAIN_SFT 4 +#define VUL35_HDOMAIN_MASK 0xf +#define VUL35_HDOMAIN_MASK_SFT (0xf << 4) +#define VUL34_HDOMAIN_SFT 0 +#define VUL34_HDOMAIN_MASK 0xf +#define VUL34_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_SE_DOMAIN_SIDEBAND9 */ +#define MPHONE_EARC_HDOMAIN_SFT 28 +#define MPHONE_EARC_HDOMAIN_MASK 0xf +#define MPHONE_EARC_HDOMAIN_MASK_SFT (0xf << 28) +#define MPHONE_SPDIF_HDOMAIN_SFT 24 +#define MPHONE_SPDIF_HDOMAIN_MASK 0xf +#define MPHONE_SPDIF_HDOMAIN_MASK_SFT (0xf << 24) +#define SPDIFIN_HDOMAIN_SFT 20 +#define SPDIFIN_HDOMAIN_MASK 0xf +#define SPDIFIN_HDOMAIN_MASK_SFT (0xf << 20) +#define TDMIN_HDOMAIN_SFT 16 +#define TDMIN_HDOMAIN_MASK 0xf +#define TDMIN_HDOMAIN_MASK_SFT (0xf << 16) +#define ETDM_IN6_HDOMAIN_SFT 12 +#define ETDM_IN6_HDOMAIN_MASK 0xf +#define ETDM_IN6_HDOMAIN_MASK_SFT (0xf << 12) +#define ETDM_IN5_HDOMAIN_SFT 8 +#define ETDM_IN5_HDOMAIN_MASK 0xf +#define ETDM_IN5_HDOMAIN_MASK_SFT (0xf << 8) +#define ETDM_IN4_HDOMAIN_SFT 4 +#define ETDM_IN4_HDOMAIN_MASK 0xf +#define ETDM_IN4_HDOMAIN_MASK_SFT (0xf << 4) +#define ETDM_IN3_HDOMAIN_SFT 0 +#define ETDM_IN3_HDOMAIN_MASK 0xf +#define ETDM_IN3_HDOMAIN_MASK_SFT (0xf << 0) + +/* AFE_PROT_SIDEBAND0_MON */ +#define AFE_DOMAIN_SIDEBAN0_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN0_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN0_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_PROT_SIDEBAND1_MON */ +#define AFE_DOMAIN_SIDEBAN1_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN1_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN1_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_PROT_SIDEBAND2_MON */ +#define AFE_DOMAIN_SIDEBAN2_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN2_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN2_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_PROT_SIDEBAND3_MON */ +#define AFE_DOMAIN_SIDEBAN3_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN3_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN3_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND0_MON */ +#define AFE_DOMAIN_SIDEBAN0_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN0_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN0_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND1_MON */ +#define AFE_DOMAIN_SIDEBAN1_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN1_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN1_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND2_MON */ +#define AFE_DOMAIN_SIDEBAN2_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN2_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN2_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND3_MON */ +#define AFE_DOMAIN_SIDEBAN3_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN3_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN3_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND4_MON */ +#define AFE_DOMAIN_SIDEBAN0_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN0_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN0_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND5_MON */ +#define AFE_DOMAIN_SIDEBAN1_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN1_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN1_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND6_MON */ +#define AFE_DOMAIN_SIDEBAN2_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN2_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN2_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND7_MON */ +#define AFE_DOMAIN_SIDEBAN3_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN3_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN3_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND8_MON */ +#define AFE_DOMAIN_SIDEBAN2_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN2_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN2_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_DOMAIN_SIDEBAND9_MON */ +#define AFE_DOMAIN_SIDEBAN3_MON_SFT 0 +#define AFE_DOMAIN_SIDEBAN3_MON_MASK 0xffffffff +#define AFE_DOMAIN_SIDEBAN3_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_SECURE_CONN0 */ +#define AFE_SPDIFIN_LPBK_CON_MASK_S_SFT 26 +#define AFE_SPDIFIN_LPBK_CON_MASK_S_MASK 0x3 +#define AFE_SPDIFIN_LPBK_CON_MASK_S_MASK_SFT (0x3 << 26) +#define AFE_ADDA_DMIC1_SRC_CON0_MASK_S_SFT 25 +#define AFE_ADDA_DMIC1_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_DMIC1_SRC_CON0_MASK_S_MASK_SFT (0x1 << 25) +#define AFE_ADDA_DMIC0_SRC_CON0_MASK_S_SFT 24 +#define AFE_ADDA_DMIC0_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_DMIC0_SRC_CON0_MASK_S_MASK_SFT (0x1 << 24) +#define AFE_ADDA_UL3_SRC_CON0_MASK_S_SFT 23 +#define AFE_ADDA_UL3_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_UL3_SRC_CON0_MASK_S_MASK_SFT (0x1 << 23) +#define AFE_ADDA_UL2_SRC_CON0_MASK_S_SFT 22 +#define AFE_ADDA_UL2_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_UL2_SRC_CON0_MASK_S_MASK_SFT (0x1 << 22) +#define AFE_ADDA_UL1_SRC_CON0_MASK_S_SFT 21 +#define AFE_ADDA_UL1_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_UL1_SRC_CON0_MASK_S_MASK_SFT (0x1 << 21) +#define AFE_ADDA_UL0_SRC_CON0_MASK_S_SFT 20 +#define AFE_ADDA_UL0_SRC_CON0_MASK_S_MASK 0x1 +#define AFE_ADDA_UL0_SRC_CON0_MASK_S_MASK_SFT (0x1 << 20) +#define AFE_MRKAIF1_CFG0_MASK_S_SFT 19 +#define AFE_MRKAIF1_CFG0_MASK_S_MASK 0x1 +#define AFE_MRKAIF1_CFG0_MASK_S_MASK_SFT (0x1 << 19) +#define AFE_MRKAIF0_CFG0_MASK_S_SFT 18 +#define AFE_MRKAIF0_CFG0_MASK_S_MASK 0x1 +#define AFE_MRKAIF0_CFG0_MASK_S_MASK_SFT (0x1 << 18) +#define AFE_TDMIN_CON1_MASK_S_SFT 17 +#define AFE_TDMIN_CON1_MASK_S_MASK 0x1 +#define AFE_TDMIN_CON1_MASK_S_MASK_SFT (0x1 << 17) +#define AFE_TDM_CON2_MASK_S_SFT 16 +#define AFE_TDM_CON2_MASK_S_MASK 0x1 +#define AFE_TDM_CON2_MASK_S_MASK_SFT (0x1 << 16) +#define AFE_DAIBT_CON_MASK_S_SFT 14 +#define AFE_DAIBT_CON_MASK_S_MASK 0x3 +#define AFE_DAIBT_CON_MASK_S_MASK_SFT (0x3 << 14) +#define AFE_MRGIF_CON_MASK_S_SFT 12 +#define AFE_MRGIF_CON_MASK_S_MASK 0x3 +#define AFE_MRGIF_CON_MASK_S_MASK_SFT (0x3 << 12) +#define AFE_CONNSYS_I2S_CON_MASK_S_SFT 11 +#define AFE_CONNSYS_I2S_CON_MASK_S_MASK 0x1 +#define AFE_CONNSYS_I2S_CON_MASK_S_MASK_SFT (0x1 << 11) +#define AFE_PCM1_INFT_CON0_MASK_S_SFT 6 +#define AFE_PCM1_INFT_CON0_MASK_S_MASK 0x1f +#define AFE_PCM1_INFT_CON0_MASK_S_MASK_SFT (0x1f << 6) +#define AFE_PCM0_INTF_CON1_MASK_S_SFT 0 +#define AFE_PCM0_INTF_CON1_MASK_S_MASK 0x3f +#define AFE_PCM0_INTF_CON1_MASK_S_MASK_SFT (0x3f << 0) + +/* AFE_SECURE_CONN_ETDM0 */ +#define ETDM_0_3_COWORK_CON2_OUT3_DATA_SEL_SFT 28 +#define ETDM_0_3_COWORK_CON2_OUT3_DATA_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_OUT3_DATA_SEL_MASK_SFT (0xf << 28) +#define ETDM_0_3_COWORK_CON2_OUT2_DATA_SEL_SFT 24 +#define ETDM_0_3_COWORK_CON2_OUT2_DATA_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_OUT2_DATA_SEL_MASK_SFT (0xf << 24) +#define ETDM_0_3_COWORK_CON2_IN1_SDATA1_15_SEL_SFT 20 +#define ETDM_0_3_COWORK_CON2_IN1_SDATA1_15_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_IN1_SDATA1_15_SEL_MASK_SFT (0xf << 20) +#define ETDM_0_3_COWORK_CON2_IN1_SDATA0_SEL_SFT 16 +#define ETDM_0_3_COWORK_CON2_IN1_SDATA0_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_IN1_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_0_3_COWORK_CON2_IN0_SDATA1_15_SEL_SFT 12 +#define ETDM_0_3_COWORK_CON2_IN0_SDATA1_15_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_IN0_SDATA1_15_SEL_MASK_SFT (0xf << 12) +#define ETDM_0_3_COWORK_CON2_IN0_SDATA0_SEL_SFT 8 +#define ETDM_0_3_COWORK_CON2_IN0_SDATA0_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_IN0_SDATA0_SEL_MASK_SFT (0xf << 8) +#define ETDM_0_3_COWORK_CON2_OUT1_DATA_SEL_SFT 4 +#define ETDM_0_3_COWORK_CON2_OUT1_DATA_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_OUT1_DATA_SEL_MASK_SFT (0xf << 4) +#define ETDM_0_3_COWORK_CON2_OUT0_DATA_SEL_SFT 0 +#define ETDM_0_3_COWORK_CON2_OUT0_DATA_SEL_MASK 0xf +#define ETDM_0_3_COWORK_CON2_OUT0_DATA_SEL_MASK_SFT (0xf << 0) + +/* AFE_SECURE_CONN_ETDM1 */ +#define ETDM_4_7_COWORK_CON1_IN4_SDATA1_15_SEL_SFT 28 +#define ETDM_4_7_COWORK_CON1_IN4_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN4_SDATA1_15_SEL_MASK_SFT (0xf << 28) +#define ETDM_4_7_COWORK_CON1_IN4_SDATA0_SEL_SFT 24 +#define ETDM_4_7_COWORK_CON1_IN4_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN4_SDATA0_SEL_MASK_SFT (0xf << 24) +#define ETDM_4_7_COWORK_CON1_OUT5_DATA_SEL_SFT 20 +#define ETDM_4_7_COWORK_CON1_OUT5_DATA_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_OUT5_DATA_SEL_MASK_SFT (0xf << 20) +#define ETDM_4_7_COWORK_CON1_OUT4_DATA_SEL_SFT 16 +#define ETDM_4_7_COWORK_CON1_OUT4_DATA_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_OUT4_DATA_SEL_MASK_SFT (0xf << 16) +#define ETDM_4_7_COWORK_CON1_IN3_SDATA1_15_SEL_SFT 12 +#define ETDM_4_7_COWORK_CON1_IN3_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN3_SDATA1_15_SEL_MASK_SFT (0xf << 12) +#define ETDM_4_7_COWORK_CON1_IN3_SDATA0_SEL_SFT 8 +#define ETDM_4_7_COWORK_CON1_IN3_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN3_SDATA0_SEL_MASK_SFT (0xf << 8) +#define ETDM_4_7_COWORK_CON1_IN2_SDATA1_15_SEL_SFT 4 +#define ETDM_4_7_COWORK_CON1_IN2_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN2_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_4_7_COWORK_CON1_IN2_SDATA0_SEL_SFT 0 +#define ETDM_4_7_COWORK_CON1_IN2_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON1_IN2_SDATA0_SEL_MASK_SFT (0xf << 0) + +/* AFE_SECURE_CONN_ETDM2 */ +#define ETDM_4_7_COWORK_CON3_IN7_SDATA1_15_SEL_SFT 28 +#define ETDM_4_7_COWORK_CON3_IN7_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN7_SDATA1_15_SEL_MASK_SFT (0xf << 28) +#define ETDM_4_7_COWORK_CON3_IN7_SDATA0_SEL_SFT 24 +#define ETDM_4_7_COWORK_CON3_IN7_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN7_SDATA0_SEL_MASK_SFT (0xf << 24) +#define ETDM_4_7_COWORK_CON3_IN6_SDATA1_15_SEL_SFT 20 +#define ETDM_4_7_COWORK_CON3_IN6_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN6_SDATA1_15_SEL_MASK_SFT (0xf << 20) +#define ETDM_4_7_COWORK_CON3_IN6_SDATA0_SEL_SFT 16 +#define ETDM_4_7_COWORK_CON3_IN6_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN6_SDATA0_SEL_MASK_SFT (0xf << 16) +#define ETDM_4_7_COWORK_CON3_OUT7_DATA_SEL_SFT 12 +#define ETDM_4_7_COWORK_CON3_OUT7_DATA_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_OUT7_DATA_SEL_MASK_SFT (0xf << 12) +#define ETDM_4_7_COWORK_CON3_OUT6_DATA_SEL_SFT 8 +#define ETDM_4_7_COWORK_CON3_OUT6_DATA_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_OUT6_DATA_SEL_MASK_SFT (0xf << 8) +#define ETDM_4_7_COWORK_CON3_IN5_SDATA1_15_SEL_SFT 4 +#define ETDM_4_7_COWORK_CON3_IN5_SDATA1_15_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN5_SDATA1_15_SEL_MASK_SFT (0xf << 4) +#define ETDM_4_7_COWORK_CON3_IN5_SDATA0_SEL_SFT 0 +#define ETDM_4_7_COWORK_CON3_IN5_SDATA0_SEL_MASK 0xf +#define ETDM_4_7_COWORK_CON3_IN5_SDATA0_SEL_MASK_SFT (0xf << 0) + +/* AFE_SECURE_SRAM_CON0 */ +#define SRAM_READ_EN15_NS_SFT 31 +#define SRAM_READ_EN15_NS_MASK 0x1 +#define SRAM_READ_EN15_NS_MASK_SFT (0x1 << 31) +#define SRAM_WRITE_EN15_NS_SFT 30 +#define SRAM_WRITE_EN15_NS_MASK 0x1 +#define SRAM_WRITE_EN15_NS_MASK_SFT (0x1 << 30) +#define SRAM_READ_EN14_NS_SFT 29 +#define SRAM_READ_EN14_NS_MASK 0x1 +#define SRAM_READ_EN14_NS_MASK_SFT (0x1 << 29) +#define SRAM_WRITE_EN14_NS_SFT 28 +#define SRAM_WRITE_EN14_NS_MASK 0x1 +#define SRAM_WRITE_EN14_NS_MASK_SFT (0x1 << 28) +#define SRAM_READ_EN13_NS_SFT 27 +#define SRAM_READ_EN13_NS_MASK 0x1 +#define SRAM_READ_EN13_NS_MASK_SFT (0x1 << 27) +#define SRAM_WRITE_EN13_NS_SFT 26 +#define SRAM_WRITE_EN13_NS_MASK 0x1 +#define SRAM_WRITE_EN13_NS_MASK_SFT (0x1 << 26) +#define SRAM_READ_EN12_NS_SFT 25 +#define SRAM_READ_EN12_NS_MASK 0x1 +#define SRAM_READ_EN12_NS_MASK_SFT (0x1 << 25) +#define SRAM_WRITE_EN12_NS_SFT 24 +#define SRAM_WRITE_EN12_NS_MASK 0x1 +#define SRAM_WRITE_EN12_NS_MASK_SFT (0x1 << 24) +#define SRAM_READ_EN11_NS_SFT 23 +#define SRAM_READ_EN11_NS_MASK 0x1 +#define SRAM_READ_EN11_NS_MASK_SFT (0x1 << 23) +#define SRAM_WRITE_EN11_NS_SFT 22 +#define SRAM_WRITE_EN11_NS_MASK 0x1 +#define SRAM_WRITE_EN11_NS_MASK_SFT (0x1 << 22) +#define SRAM_READ_EN10_NS_SFT 21 +#define SRAM_READ_EN10_NS_MASK 0x1 +#define SRAM_READ_EN10_NS_MASK_SFT (0x1 << 21) +#define SRAM_WRITE_EN10_NS_SFT 20 +#define SRAM_WRITE_EN10_NS_MASK 0x1 +#define SRAM_WRITE_EN10_NS_MASK_SFT (0x1 << 20) +#define SRAM_READ_EN9_NS_SFT 19 +#define SRAM_READ_EN9_NS_MASK 0x1 +#define SRAM_READ_EN9_NS_MASK_SFT (0x1 << 19) +#define SRAM_WRITE_EN9_NS_SFT 18 +#define SRAM_WRITE_EN9_NS_MASK 0x1 +#define SRAM_WRITE_EN9_NS_MASK_SFT (0x1 << 18) +#define SRAM_READ_EN8_NS_SFT 17 +#define SRAM_READ_EN8_NS_MASK 0x1 +#define SRAM_READ_EN8_NS_MASK_SFT (0x1 << 17) +#define SRAM_WRITE_EN8_NS_SFT 16 +#define SRAM_WRITE_EN8_NS_MASK 0x1 +#define SRAM_WRITE_EN8_NS_MASK_SFT (0x1 << 16) +#define SRAM_READ_EN7_NS_SFT 15 +#define SRAM_READ_EN7_NS_MASK 0x1 +#define SRAM_READ_EN7_NS_MASK_SFT (0x1 << 15) +#define SRAM_WRITE_EN7_NS_SFT 14 +#define SRAM_WRITE_EN7_NS_MASK 0x1 +#define SRAM_WRITE_EN7_NS_MASK_SFT (0x1 << 14) +#define SRAM_READ_EN6_NS_SFT 13 +#define SRAM_READ_EN6_NS_MASK 0x1 +#define SRAM_READ_EN6_NS_MASK_SFT (0x1 << 13) +#define SRAM_WRITE_EN6_NS_SFT 12 +#define SRAM_WRITE_EN6_NS_MASK 0x1 +#define SRAM_WRITE_EN6_NS_MASK_SFT (0x1 << 12) +#define SRAM_READ_EN5_NS_SFT 11 +#define SRAM_READ_EN5_NS_MASK 0x1 +#define SRAM_READ_EN5_NS_MASK_SFT (0x1 << 11) +#define SRAM_WRITE_EN5_NS_SFT 10 +#define SRAM_WRITE_EN5_NS_MASK 0x1 +#define SRAM_WRITE_EN5_NS_MASK_SFT (0x1 << 10) +#define SRAM_READ_EN4_NS_SFT 9 +#define SRAM_READ_EN4_NS_MASK 0x1 +#define SRAM_READ_EN4_NS_MASK_SFT (0x1 << 9) +#define SRAM_WRITE_EN4_NS_SFT 8 +#define SRAM_WRITE_EN4_NS_MASK 0x1 +#define SRAM_WRITE_EN4_NS_MASK_SFT (0x1 << 8) +#define SRAM_READ_EN3_NS_SFT 7 +#define SRAM_READ_EN3_NS_MASK 0x1 +#define SRAM_READ_EN3_NS_MASK_SFT (0x1 << 7) +#define SRAM_WRITE_EN3_NS_SFT 6 +#define SRAM_WRITE_EN3_NS_MASK 0x1 +#define SRAM_WRITE_EN3_NS_MASK_SFT (0x1 << 6) +#define SRAM_READ_EN2_NS_SFT 5 +#define SRAM_READ_EN2_NS_MASK 0x1 +#define SRAM_READ_EN2_NS_MASK_SFT (0x1 << 5) +#define SRAM_WRITE_EN2_NS_SFT 4 +#define SRAM_WRITE_EN2_NS_MASK 0x1 +#define SRAM_WRITE_EN2_NS_MASK_SFT (0x1 << 4) +#define SRAM_READ_EN1_NS_SFT 3 +#define SRAM_READ_EN1_NS_MASK 0x1 +#define SRAM_READ_EN1_NS_MASK_SFT (0x1 << 3) +#define SRAM_WRITE_EN1_NS_SFT 2 +#define SRAM_WRITE_EN1_NS_MASK 0x1 +#define SRAM_WRITE_EN1_NS_MASK_SFT (0x1 << 2) +#define SRAM_READ_EN0_NS_SFT 1 +#define SRAM_READ_EN0_NS_MASK 0x1 +#define SRAM_READ_EN0_NS_MASK_SFT (0x1 << 1) +#define SRAM_WRITE_EN0_NS_SFT 0 +#define SRAM_WRITE_EN0_NS_MASK 0x1 +#define SRAM_WRITE_EN0_NS_MASK_SFT (0x1 << 0) + +/* AFE_SECURE_SRAM_CON1 */ +#define SRAM_READ_EN15_S_SFT 31 +#define SRAM_READ_EN15_S_MASK 0x1 +#define SRAM_READ_EN15_S_MASK_SFT (0x1 << 31) +#define SRAM_WRITE_EN15_S_SFT 30 +#define SRAM_WRITE_EN15_S_MASK 0x1 +#define SRAM_WRITE_EN15_S_MASK_SFT (0x1 << 30) +#define SRAM_READ_EN14_S_SFT 29 +#define SRAM_READ_EN14_S_MASK 0x1 +#define SRAM_READ_EN14_S_MASK_SFT (0x1 << 29) +#define SRAM_WRITE_EN14_S_SFT 28 +#define SRAM_WRITE_EN14_S_MASK 0x1 +#define SRAM_WRITE_EN14_S_MASK_SFT (0x1 << 28) +#define SRAM_READ_EN13_S_SFT 27 +#define SRAM_READ_EN13_S_MASK 0x1 +#define SRAM_READ_EN13_S_MASK_SFT (0x1 << 27) +#define SRAM_WRITE_EN13_S_SFT 26 +#define SRAM_WRITE_EN13_S_MASK 0x1 +#define SRAM_WRITE_EN13_S_MASK_SFT (0x1 << 26) +#define SRAM_READ_EN12_S_SFT 25 +#define SRAM_READ_EN12_S_MASK 0x1 +#define SRAM_READ_EN12_S_MASK_SFT (0x1 << 25) +#define SRAM_WRITE_EN12_S_SFT 24 +#define SRAM_WRITE_EN12_S_MASK 0x1 +#define SRAM_WRITE_EN12_S_MASK_SFT (0x1 << 24) +#define SRAM_READ_EN11_S_SFT 23 +#define SRAM_READ_EN11_S_MASK 0x1 +#define SRAM_READ_EN11_S_MASK_SFT (0x1 << 23) +#define SRAM_WRITE_EN11_S_SFT 22 +#define SRAM_WRITE_EN11_S_MASK 0x1 +#define SRAM_WRITE_EN11_S_MASK_SFT (0x1 << 22) +#define SRAM_READ_EN10_S_SFT 21 +#define SRAM_READ_EN10_S_MASK 0x1 +#define SRAM_READ_EN10_S_MASK_SFT (0x1 << 21) +#define SRAM_WRITE_EN10_S_SFT 20 +#define SRAM_WRITE_EN10_S_MASK 0x1 +#define SRAM_WRITE_EN10_S_MASK_SFT (0x1 << 20) +#define SRAM_READ_EN9_S_SFT 19 +#define SRAM_READ_EN9_S_MASK 0x1 +#define SRAM_READ_EN9_S_MASK_SFT (0x1 << 19) +#define SRAM_WRITE_EN9_S_SFT 18 +#define SRAM_WRITE_EN9_S_MASK 0x1 +#define SRAM_WRITE_EN9_S_MASK_SFT (0x1 << 18) +#define SRAM_READ_EN8_S_SFT 17 +#define SRAM_READ_EN8_S_MASK 0x1 +#define SRAM_READ_EN8_S_MASK_SFT (0x1 << 17) +#define SRAM_WRITE_EN8_S_SFT 16 +#define SRAM_WRITE_EN8_S_MASK 0x1 +#define SRAM_WRITE_EN8_S_MASK_SFT (0x1 << 16) +#define SRAM_READ_EN7_S_SFT 15 +#define SRAM_READ_EN7_S_MASK 0x1 +#define SRAM_READ_EN7_S_MASK_SFT (0x1 << 15) +#define SRAM_WRITE_EN7_S_SFT 14 +#define SRAM_WRITE_EN7_S_MASK 0x1 +#define SRAM_WRITE_EN7_S_MASK_SFT (0x1 << 14) +#define SRAM_READ_EN6_S_SFT 13 +#define SRAM_READ_EN6_S_MASK 0x1 +#define SRAM_READ_EN6_S_MASK_SFT (0x1 << 13) +#define SRAM_WRITE_EN6_S_SFT 12 +#define SRAM_WRITE_EN6_S_MASK 0x1 +#define SRAM_WRITE_EN6_S_MASK_SFT (0x1 << 12) +#define SRAM_READ_EN5_S_SFT 11 +#define SRAM_READ_EN5_S_MASK 0x1 +#define SRAM_READ_EN5_S_MASK_SFT (0x1 << 11) +#define SRAM_WRITE_EN5_S_SFT 10 +#define SRAM_WRITE_EN5_S_MASK 0x1 +#define SRAM_WRITE_EN5_S_MASK_SFT (0x1 << 10) +#define SRAM_READ_EN4_S_SFT 9 +#define SRAM_READ_EN4_S_MASK 0x1 +#define SRAM_READ_EN4_S_MASK_SFT (0x1 << 9) +#define SRAM_WRITE_EN4_S_SFT 8 +#define SRAM_WRITE_EN4_S_MASK 0x1 +#define SRAM_WRITE_EN4_S_MASK_SFT (0x1 << 8) +#define SRAM_READ_EN3_S_SFT 7 +#define SRAM_READ_EN3_S_MASK 0x1 +#define SRAM_READ_EN3_S_MASK_SFT (0x1 << 7) +#define SRAM_WRITE_EN3_S_SFT 6 +#define SRAM_WRITE_EN3_S_MASK 0x1 +#define SRAM_WRITE_EN3_S_MASK_SFT (0x1 << 6) +#define SRAM_READ_EN2_S_SFT 5 +#define SRAM_READ_EN2_S_MASK 0x1 +#define SRAM_READ_EN2_S_MASK_SFT (0x1 << 5) +#define SRAM_WRITE_EN2_S_SFT 4 +#define SRAM_WRITE_EN2_S_MASK 0x1 +#define SRAM_WRITE_EN2_S_MASK_SFT (0x1 << 4) +#define SRAM_READ_EN1_S_SFT 3 +#define SRAM_READ_EN1_S_MASK 0x1 +#define SRAM_READ_EN1_S_MASK_SFT (0x1 << 3) +#define SRAM_WRITE_EN1_S_SFT 2 +#define SRAM_WRITE_EN1_S_MASK 0x1 +#define SRAM_WRITE_EN1_S_MASK_SFT (0x1 << 2) +#define SRAM_READ_EN0_S_SFT 1 +#define SRAM_READ_EN0_S_MASK 0x1 +#define SRAM_READ_EN0_S_MASK_SFT (0x1 << 1) +#define SRAM_WRITE_EN0_S_SFT 0 +#define SRAM_WRITE_EN0_S_MASK 0x1 +#define SRAM_WRITE_EN0_S_MASK_SFT (0x1 << 0) + +/* AFE_SE_CONN_INPUT_MASK0 */ +#define SECURE_INTRCONN_I0_I31_S_SFT 0 +#define SECURE_INTRCONN_I0_I31_S_MASK 0xffffffff +#define SECURE_INTRCONN_I0_I31_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK1 */ +#define SECURE_INTRCONN_I32_I63_S_SFT 0 +#define SECURE_INTRCONN_I32_I63_S_MASK 0xffffffff +#define SECURE_INTRCONN_I32_I63_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK2 */ +#define SECURE_INTRCONN_I64_I95_S_SFT 0 +#define SECURE_INTRCONN_I64_I95_S_MASK 0xffffffff +#define SECURE_INTRCONN_I64_I95_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK3 */ +#define SECURE_INTRCONN_I96_I127_S_SFT 0 +#define SECURE_INTRCONN_I96_I127_S_MASK 0xffffffff +#define SECURE_INTRCONN_I96_I127_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK4 */ +#define SECURE_INTRCONN_I128_I159_S_SFT 0 +#define SECURE_INTRCONN_I128_I159_S_MASK 0xffffffff +#define SECURE_INTRCONN_I128_I159_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK5 */ +#define SECURE_INTRCONN_I160_I191_S_SFT 0 +#define SECURE_INTRCONN_I160_I191_S_MASK 0xffffffff +#define SECURE_INTRCONN_I160_I191_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK6 */ +#define SECURE_INTRCONN_I192_I223_S_SFT 0 +#define SECURE_INTRCONN_I192_I223_S_MASK 0xffffffff +#define SECURE_INTRCONN_I192_I223_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_INPUT_MASK7 */ +#define SECURE_INTRCONN_I224_I256_S_SFT 0 +#define SECURE_INTRCONN_I224_I256_S_MASK 0xffffffff +#define SECURE_INTRCONN_I224_I256_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK0 */ +#define NORMAL_INTRCONN_I0_I31_S_SFT 0 +#define NORMAL_INTRCONN_I0_I31_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I0_I31_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK1 */ +#define NORMAL_INTRCONN_I32_I63_S_SFT 0 +#define NORMAL_INTRCONN_I32_I63_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I32_I63_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK2 */ +#define NORMAL_INTRCONN_I64_I95_S_SFT 0 +#define NORMAL_INTRCONN_I64_I95_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I64_I95_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK3 */ +#define NORMAL_INTRCONN_I96_I127_S_SFT 0 +#define NORMAL_INTRCONN_I96_I127_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I96_I127_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK4 */ +#define NORMAL_INTRCONN_I128_I159_S_SFT 0 +#define NORMAL_INTRCONN_I128_I159_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I128_I159_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK5 */ +#define NORMAL_INTRCONN_I160_I191_S_SFT 0 +#define NORMAL_INTRCONN_I160_I191_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I160_I191_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK6 */ +#define NORMAL_INTRCONN_I192_I223_S_SFT 0 +#define NORMAL_INTRCONN_I192_I223_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I192_I223_S_MASK_SFT (0xffffffff << 0) + +/* AFE_NON_SE_CONN_INPUT_MASK7 */ +#define NORMAL_INTRCONN_I224_I256_S_SFT 0 +#define NORMAL_INTRCONN_I224_I256_S_MASK 0xffffffff +#define NORMAL_INTRCONN_I224_I256_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL0 */ +#define SECURE_INTRCONN_O0_O31_S_SFT 0 +#define SECURE_INTRCONN_O0_O31_S_MASK 0xffffffff +#define SECURE_INTRCONN_O0_O31_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL1 */ +#define SECURE_INTRCONN_O32_O63_S_SFT 0 +#define SECURE_INTRCONN_O32_O63_S_MASK 0xffffffff +#define SECURE_INTRCONN_O32_O63_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL2 */ +#define SECURE_INTRCONN_O64_O95_S_SFT 0 +#define SECURE_INTRCONN_O64_O95_S_MASK 0xffffffff +#define SECURE_INTRCONN_O64_O95_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL3 */ +#define SECURE_INTRCONN_O96_O127_S_SFT 0 +#define SECURE_INTRCONN_O96_O127_S_MASK 0xffffffff +#define SECURE_INTRCONN_O96_O127_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL4 */ +#define SECURE_INTRCONN_O128_O159_S_SFT 0 +#define SECURE_INTRCONN_O128_O159_S_MASK 0xffffffff +#define SECURE_INTRCONN_O128_O159_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL5 */ +#define SECURE_INTRCONN_O160_O191_S_SFT 0 +#define SECURE_INTRCONN_O160_O191_S_MASK 0xffffffff +#define SECURE_INTRCONN_O160_O191_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL6 */ +#define SECURE_INTRCONN_O192_O223_S_SFT 0 +#define SECURE_INTRCONN_O192_O223_S_MASK 0xffffffff +#define SECURE_INTRCONN_O192_O223_S_MASK_SFT (0xffffffff << 0) + +/* AFE_SE_CONN_OUTPUT_SEL7 */ +#define SECURE_INTRCONN_O224_O256_S_SFT 0 +#define SECURE_INTRCONN_O224_O256_S_MASK 0xffffffff +#define SECURE_INTRCONN_O224_O256_S_MASK_SFT (0xffffffff << 0) + +/* AFE_PCM0_INTF_CON1_MASK_MON */ +#define AFE_PCM0_INTF_CON1_MASK_MON_SFT 0 +#define AFE_PCM0_INTF_CON1_MASK_MON_MASK 0xffffffff +#define AFE_PCM0_INTF_CON1_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_PCM0_INTF_CON0_MASK_MON */ +#define AFE_PCM0_INTF_CON0_MASK_MON_SFT 0 +#define AFE_PCM0_INTF_CON0_MASK_MON_MASK 0xffffffff +#define AFE_PCM0_INTF_CON0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_CONNSYS_I2S_CON_MASK_MON */ +#define AFE_CONNSYS_I2S_CON_MASK_MON_SFT 0 +#define AFE_CONNSYS_I2S_CON_MASK_MON_MASK 0xffffffff +#define AFE_CONNSYS_I2S_CON_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_TDM_CON2_MASK_MON */ +#define AFE_TDM_CON2_MASK_MON_SFT 0 +#define AFE_TDM_CON2_MASK_MON_MASK 0xffffffff +#define AFE_TDM_CON2_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_MTKAIF0_CFG0_MASK_MON */ +#define AFE_MTKAIF0_CFG0_MASK_MON_SFT 0 +#define AFE_MTKAIF0_CFG0_MASK_MON_MASK 0xffffffff +#define AFE_MTKAIF0_CFG0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_MTKAIF1_CFG0_MASK_MON */ +#define AFE_MTKAIF1_CFG0_MASK_MON_SFT 0 +#define AFE_MTKAIF1_CFG0_MASK_MON_MASK 0xffffffff +#define AFE_MTKAIF1_CFG0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL0_SRC_CON0_MASK_MON */ +#define AFE_ADDA_UL0_SRC_CON0_MASK_MON_SFT 0 +#define AFE_ADDA_UL0_SRC_CON0_MASK_MON_MASK 0xffffffff +#define AFE_ADDA_UL0_SRC_CON0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL1_SRC_CON0_MASK_MON */ +#define AFE_ADDA_UL1_SRC_CON0_MASK_MON_SFT 0 +#define AFE_ADDA_UL1_SRC_CON0_MASK_MON_MASK 0xffffffff +#define AFE_ADDA_UL1_SRC_CON0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_ADDA_UL2_SRC_CON0_MASK_MON */ +#define AFE_ADDA_UL2_SRC_CON0_MASK_MON_SFT 0 +#define AFE_ADDA_UL2_SRC_CON0_MASK_MON_MASK 0xffffffff +#define AFE_ADDA_UL2_SRC_CON0_MASK_MON_MASK_SFT (0xffffffff << 0) + +/* AFE_ASRC_NEW_CON0 */ +#define ONE_HEART_SFT 31 +#define ONE_HEART_MASK 0x1 +#define ONE_HEART_MASK_SFT (0x1 << 31) +#define CHSET0_OFS_ONE_HEART_DISABLE_SFT 30 +#define CHSET0_OFS_ONE_HEART_DISABLE_MASK 0x1 +#define CHSET0_OFS_ONE_HEART_DISABLE_MASK_SFT (0x1 << 30) +#define USE_SHORT_DELAY_COEFF_SFT 29 +#define USE_SHORT_DELAY_COEFF_MASK 0x1 +#define USE_SHORT_DELAY_COEFF_MASK_SFT (0x1 << 29) +#define CHSET0_O16BIT_SFT 19 +#define CHSET0_O16BIT_MASK 0x1 +#define CHSET0_O16BIT_MASK_SFT (0x1 << 19) +#define CHSET0_CLR_IIR_HISTORY_SFT 17 +#define CHSET0_CLR_IIR_HISTORY_MASK 0x1 +#define CHSET0_CLR_IIR_HISTORY_MASK_SFT (0x1 << 17) +#define CHSET0_IS_MONO_SFT 16 +#define CHSET0_IS_MONO_MASK 0x1 +#define CHSET0_IS_MONO_MASK_SFT (0x1 << 16) +#define CHSET0_OFS_SEL_SFT 14 +#define CHSET0_OFS_SEL_MASK 0x3 +#define CHSET0_OFS_SEL_MASK_SFT (0x3 << 14) +#define CHSET0_IFS_SEL_SFT 12 +#define CHSET0_IFS_SEL_MASK 0x3 +#define CHSET0_IFS_SEL_MASK_SFT (0x3 << 12) +#define CHSET0_IIR_EN_SFT 11 +#define CHSET0_IIR_EN_MASK 0x1 +#define CHSET0_IIR_EN_MASK_SFT (0x1 << 11) +#define CHSET0_IIR_STAGE_SFT 8 +#define CHSET0_IIR_STAGE_MASK 0x7 +#define CHSET0_IIR_STAGE_MASK_SFT (0x7 << 8) +#define ASM_ON_MOD_SFT 7 +#define ASM_ON_MOD_MASK 0x1 +#define ASM_ON_MOD_MASK_SFT (0x1 << 7) +#define CHSET_STR_CLR_SFT 4 +#define CHSET_STR_CLR_MASK 0x1 +#define CHSET_STR_CLR_MASK_SFT (0x1 << 4) +#define CHSET_ON_SFT 2 +#define CHSET_ON_MASK 0x1 +#define CHSET_ON_MASK_SFT (0x1 << 2) +#define COEFF_SRAM_CTRL_SFT 1 +#define COEFF_SRAM_CTRL_MASK 0x1 +#define COEFF_SRAM_CTRL_MASK_SFT (0x1 << 1) +#define ASM_ON_SFT 0 +#define ASM_ON_MASK 0x1 +#define ASM_ON_MASK_SFT (0x1 << 0) + +/* AFE_ASRC_NEW_CON1 */ +#define ASM_FREQ_0_SFT 0 +#define ASM_FREQ_0_MASK 0xffffff +#define ASM_FREQ_0_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON2 */ +#define ASM_FREQ_1_SFT 0 +#define ASM_FREQ_1_MASK 0xffffff +#define ASM_FREQ_1_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON3 */ +#define ASM_FREQ_2_SFT 0 +#define ASM_FREQ_2_MASK 0xffffff +#define ASM_FREQ_2_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON4 */ +#define ASM_FREQ_3_SFT 0 +#define ASM_FREQ_3_MASK 0xffffff +#define ASM_FREQ_3_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON5 */ +#define OUT_EN_SEL_DOMAIN_SFT 29 +#define OUT_EN_SEL_DOMAIN_MASK 0x7 +#define OUT_EN_SEL_DOMAIN_MASK_SFT (0x7 << 29) +#define OUT_EN_SEL_FS_SFT 24 +#define OUT_EN_SEL_FS_MASK 0x1f +#define OUT_EN_SEL_FS_MASK_SFT (0x1f << 24) +#define IN_EN_SEL_DOMAIN_SFT 21 +#define IN_EN_SEL_DOMAIN_MASK 0x7 +#define IN_EN_SEL_DOMAIN_MASK_SFT (0x7 << 21) +#define IN_EN_SEL_FS_SFT 16 +#define IN_EN_SEL_FS_MASK 0x1f +#define IN_EN_SEL_FS_MASK_SFT (0x1f << 16) +#define RESULT_SEL_SFT 8 +#define RESULT_SEL_MASK 0x7 +#define RESULT_SEL_MASK_SFT (0x7 << 8) +#define CALI_CK_SEL_SFT 4 +#define CALI_CK_SEL_MASK 0x7 +#define CALI_CK_SEL_MASK_SFT (0x7 << 4) +#define CALI_LRCK_SEL_SFT 1 +#define CALI_LRCK_SEL_MASK 0x7 +#define CALI_LRCK_SEL_MASK_SFT (0x7 << 1) +#define SOFT_RESET_SFT 0 +#define SOFT_RESET_MASK 0x1 +#define SOFT_RESET_MASK_SFT (0x1 << 0) + +/* AFE_ASRC_NEW_CON6 */ +#define FREQ_CALI_CYCLE_SFT 16 +#define FREQ_CALI_CYCLE_MASK 0xffff +#define FREQ_CALI_CYCLE_MASK_SFT (0xffff << 16) +#define FREQ_CALI_AUTORST_EN_SFT 15 +#define FREQ_CALI_AUTORST_EN_MASK 0x1 +#define FREQ_CALI_AUTORST_EN_MASK_SFT (0x1 << 15) +#define CALI_AUTORST_DETECT_SFT 14 +#define CALI_AUTORST_DETECT_MASK 0x1 +#define CALI_AUTORST_DETECT_MASK_SFT (0x1 << 14) +#define FREQ_CALC_RUNNING_SFT 13 +#define FREQ_CALC_RUNNING_MASK 0x1 +#define FREQ_CALC_RUNNING_MASK_SFT (0x1 << 13) +#define AUTO_TUNE_FREQ3_SFT 12 +#define AUTO_TUNE_FREQ3_MASK 0x1 +#define AUTO_TUNE_FREQ3_MASK_SFT (0x1 << 12) +#define COMP_FREQ_RES_EN_SFT 11 +#define COMP_FREQ_RES_EN_MASK 0x1 +#define COMP_FREQ_RES_EN_MASK_SFT (0x1 << 11) +#define FREQ_CALI_SEL_SFT 8 +#define FREQ_CALI_SEL_MASK 0x3 +#define FREQ_CALI_SEL_MASK_SFT (0x3 << 8) +#define FREQ_CALI_BP_DGL_SFT 7 +#define FREQ_CALI_BP_DGL_MASK 0x1 +#define FREQ_CALI_BP_DGL_MASK_SFT (0x1 << 7) +#define FREQ_CALI_MAX_GWIDTH_SFT 4 +#define FREQ_CALI_MAX_GWIDTH_MASK 0x7 +#define FREQ_CALI_MAX_GWIDTH_MASK_SFT (0x7 << 4) +#define AUTO_TUNE_FREQ2_SFT 3 +#define AUTO_TUNE_FREQ2_MASK 0x1 +#define AUTO_TUNE_FREQ2_MASK_SFT (0x1 << 3) +#define FREQ_CALI_AUTO_RESTART_SFT 2 +#define FREQ_CALI_AUTO_RESTART_MASK 0x1 +#define FREQ_CALI_AUTO_RESTART_MASK_SFT (0x1 << 2) +#define CALI_USE_FREQ_OUT_SFT 1 +#define CALI_USE_FREQ_OUT_MASK 0x1 +#define CALI_USE_FREQ_OUT_MASK_SFT (0x1 << 1) +#define CALI_EN_SFT 0 +#define CALI_EN_MASK 0x1 +#define CALI_EN_MASK_SFT (0x1 << 0) + +/* AFE_ASRC_NEW_CON7 */ +#define FREQ_CALC_DENOMINATOR_SFT 0 +#define FREQ_CALC_DENOMINATOR_MASK 0xffffff +#define FREQ_CALC_DENOMINATOR_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON8 */ +#define PRD_CALI_RESULT_RECORD_SFT 0 +#define PRD_CALI_RESULT_RECORD_MASK 0xffffff +#define PRD_CALI_RESULT_RECORD_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON9 */ +#define FREQ_CALI_RESULT_SFT 0 +#define FREQ_CALI_RESULT_MASK 0xffffff +#define FREQ_CALI_RESULT_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON10 */ +#define COEFF_SRAM_DATA_SFT 0 +#define COEFF_SRAM_DATA_MASK 0xffffffff +#define COEFF_SRAM_DATA_MASK_SFT (0xffffffff << 0) + +/* AFE_ASRC_NEW_CON11 */ +#define COEFF_SRAM_ADR_SFT 0 +#define COEFF_SRAM_ADR_MASK 0x3f +#define COEFF_SRAM_ADR_MASK_SFT (0x3f << 0) + +/* AFE_ASRC_NEW_CON12 */ +#define RING_DBG_RD_SFT 0 +#define RING_DBG_RD_MASK 0x3ffffff +#define RING_DBG_RD_MASK_SFT (0x3ffffff << 0) + +/* AFE_ASRC_NEW_CON13 */ +#define FREQ_CALI_AUTORST_TH_HIGH_SFT 0 +#define FREQ_CALI_AUTORST_TH_HIGH_MASK 0xffffff +#define FREQ_CALI_AUTORST_TH_HIGH_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_CON14 */ +#define FREQ_CALI_AUTORST_TH_LOW_SFT 0 +#define FREQ_CALI_AUTORST_TH_LOW_MASK 0xffffff +#define FREQ_CALI_AUTORST_TH_LOW_MASK_SFT (0xffffff << 0) + +/* AFE_ASRC_NEW_IP_VERSION */ +#define IP_VERSION_SFT 0 +#define IP_VERSION_MASK 0xffffffff +#define IP_VERSION_MASK_SFT (0xffffffff << 0) + +#define AUDIO_TOP_CON0 0x0 +#define AUDIO_TOP_CON1 0x4 +#define AUDIO_TOP_CON2 0x8 +#define AUDIO_TOP_CON3 0xc +#define AUDIO_TOP_CON4 0x10 +#define AUDIO_ENGEN_CON0 0x14 +#define AUDIO_ENGEN_CON0_USER1 0x18 +#define AUDIO_ENGEN_CON0_USER2 0x1c +#define AFE_SINEGEN_CON0 0x20 +#define AFE_SINEGEN_CON1 0x24 +#define AFE_SINEGEN_CON2 0x28 +#define AFE_SINEGEN_CON3 0x2c +#define AFE_APLL1_TUNER_CFG 0x30 +#define AFE_APLL1_TUNER_MON0 0x34 +#define AFE_APLL2_TUNER_CFG 0x38 +#define AFE_APLL2_TUNER_MON0 0x3c +#define AUDIO_TOP_RG0 0x4c +#define AUDIO_TOP_RG1 0x50 +#define AUDIO_TOP_RG2 0x54 +#define AUDIO_TOP_RG3 0x58 +#define AUDIO_TOP_RG4 0x5c +#define AFE_SPM_CONTROL_REQ 0x60 +#define AFE_SPM_CONTROL_ACK 0x64 +#define AUD_TOP_CFG_VCORE_RG 0x68 +#define AUDIO_TOP_IP_VERSION 0x6c +#define AUDIO_ENGEN_CON0_MON 0x7c +#define AUD_TOP_CFG_VLP_RG 0x98 +#define AUD_TOP_MON_RG 0x9c +#define AUDIO_USE_DEFAULT_DELSEL0 0xa0 +#define AUDIO_USE_DEFAULT_DELSEL1 0xa4 +#define AUDIO_USE_DEFAULT_DELSEL2 0xa8 +#define AFE_CONNSYS_I2S_IPM_VER_MON 0xb0 +#define AFE_CONNSYS_I2S_MON_SEL 0xb4 +#define AFE_CONNSYS_I2S_MON 0xb8 +#define AFE_CONNSYS_I2S_CON 0xbc +#define AFE_PCM0_INTF_CON0 0xc0 +#define AFE_PCM0_INTF_CON1 0xc4 +#define AFE_PCM_INTF_MON 0xc8 +#define AFE_PCM1_INTF_CON0 0xd0 +#define AFE_PCM1_INTF_CON1 0xd4 +#define AFE_PCM_TOP_IP_VERSION 0xe8 +#define AFE_IRQ_MCU_EN 0x100 +#define AFE_IRQ_MCU_DSP_EN 0x104 +#define AFE_IRQ_MCU_DSP2_EN 0x108 +#define AFE_IRQ_MCU_SCP_EN 0x10c +#define AFE_CUSTOM_IRQ_MCU_EN 0x110 +#define AFE_CUSTOM_IRQ_MCU_DSP_EN 0x114 +#define AFE_CUSTOM_IRQ_MCU_DSP2_EN 0x118 +#define AFE_CUSTOM_IRQ_MCU_SCP_EN 0x11c +#define AFE_IRQ_MCU_STATUS 0x120 +#define AFE_CUSTOM_IRQ_MCU_STATUS 0x124 +#define AFE_IRQ0_MCU_CFG0 0x140 +#define AFE_IRQ0_MCU_CFG1 0x144 +#define AFE_IRQ1_MCU_CFG0 0x148 +#define AFE_IRQ1_MCU_CFG1 0x14c +#define AFE_IRQ2_MCU_CFG0 0x150 +#define AFE_IRQ2_MCU_CFG1 0x154 +#define AFE_IRQ3_MCU_CFG0 0x158 +#define AFE_IRQ3_MCU_CFG1 0x15c +#define AFE_IRQ4_MCU_CFG0 0x160 +#define AFE_IRQ4_MCU_CFG1 0x164 +#define AFE_IRQ5_MCU_CFG0 0x168 +#define AFE_IRQ5_MCU_CFG1 0x16c +#define AFE_IRQ6_MCU_CFG0 0x170 +#define AFE_IRQ6_MCU_CFG1 0x174 +#define AFE_IRQ7_MCU_CFG0 0x178 +#define AFE_IRQ7_MCU_CFG1 0x17c +#define AFE_IRQ8_MCU_CFG0 0x180 +#define AFE_IRQ8_MCU_CFG1 0x184 +#define AFE_IRQ9_MCU_CFG0 0x188 +#define AFE_IRQ9_MCU_CFG1 0x18c +#define AFE_IRQ10_MCU_CFG0 0x190 +#define AFE_IRQ10_MCU_CFG1 0x194 +#define AFE_IRQ11_MCU_CFG0 0x198 +#define AFE_IRQ11_MCU_CFG1 0x19c +#define AFE_IRQ12_MCU_CFG0 0x1a0 +#define AFE_IRQ12_MCU_CFG1 0x1a4 +#define AFE_IRQ13_MCU_CFG0 0x1a8 +#define AFE_IRQ13_MCU_CFG1 0x1ac +#define AFE_IRQ14_MCU_CFG0 0x1b0 +#define AFE_IRQ14_MCU_CFG1 0x1b4 +#define AFE_IRQ15_MCU_CFG0 0x1b8 +#define AFE_IRQ15_MCU_CFG1 0x1bc +#define AFE_IRQ16_MCU_CFG0 0x1c0 +#define AFE_IRQ16_MCU_CFG1 0x1c4 +#define AFE_IRQ17_MCU_CFG0 0x1c8 +#define AFE_IRQ17_MCU_CFG1 0x1cc +#define AFE_IRQ18_MCU_CFG0 0x1d0 +#define AFE_IRQ18_MCU_CFG1 0x1d4 +#define AFE_IRQ19_MCU_CFG0 0x1d8 +#define AFE_IRQ19_MCU_CFG1 0x1dc +#define AFE_IRQ20_MCU_CFG0 0x1e0 +#define AFE_IRQ20_MCU_CFG1 0x1e4 +#define AFE_IRQ21_MCU_CFG0 0x1e8 +#define AFE_IRQ21_MCU_CFG1 0x1ec +#define AFE_IRQ22_MCU_CFG0 0x1f0 +#define AFE_IRQ22_MCU_CFG1 0x1f4 +#define AFE_IRQ23_MCU_CFG0 0x1f8 +#define AFE_IRQ23_MCU_CFG1 0x1fc +#define AFE_IRQ24_MCU_CFG0 0x200 +#define AFE_IRQ24_MCU_CFG1 0x204 +#define AFE_IRQ25_MCU_CFG0 0x208 +#define AFE_IRQ25_MCU_CFG1 0x20c +#define AFE_IRQ26_MCU_CFG0 0x210 +#define AFE_IRQ26_MCU_CFG1 0x214 +#define AFE_CUSTOM_IRQ0_MCU_CFG0 0x268 +#define AFE_IRQ_MCU_MON0 0x300 +#define AFE_IRQ_MCU_MON1 0x304 +#define AFE_IRQ_MCU_MON2 0x308 +#define AFE_IRQ0_CNT_MON 0x310 +#define AFE_IRQ1_CNT_MON 0x314 +#define AFE_IRQ2_CNT_MON 0x318 +#define AFE_IRQ3_CNT_MON 0x31c +#define AFE_IRQ4_CNT_MON 0x320 +#define AFE_IRQ5_CNT_MON 0x324 +#define AFE_IRQ6_CNT_MON 0x328 +#define AFE_IRQ7_CNT_MON 0x32c +#define AFE_IRQ8_CNT_MON 0x330 +#define AFE_IRQ9_CNT_MON 0x334 +#define AFE_IRQ10_CNT_MON 0x338 +#define AFE_IRQ11_CNT_MON 0x33c +#define AFE_IRQ12_CNT_MON 0x340 +#define AFE_IRQ13_CNT_MON 0x344 +#define AFE_IRQ14_CNT_MON 0x348 +#define AFE_IRQ15_CNT_MON 0x34c +#define AFE_IRQ16_CNT_MON 0x350 +#define AFE_IRQ17_CNT_MON 0x354 +#define AFE_IRQ18_CNT_MON 0x358 +#define AFE_IRQ19_CNT_MON 0x35c +#define AFE_IRQ20_CNT_MON 0x360 +#define AFE_IRQ21_CNT_MON 0x364 +#define AFE_IRQ22_CNT_MON 0x368 +#define AFE_IRQ23_CNT_MON 0x36c +#define AFE_IRQ24_CNT_MON 0x370 +#define AFE_IRQ25_CNT_MON 0x374 +#define AFE_IRQ26_CNT_MON 0x378 +#define AFE_CUSTOM_IRQ0_CNT_MON 0x390 +#define AFE_CUSTOM_IRQ0_MCU_CFG1 0x3dc +#define AFE_GAIN0_CON0 0x400 +#define AFE_GAIN0_CON1_R 0x404 +#define AFE_GAIN0_CON1_L 0x408 +#define AFE_GAIN0_CON2 0x40c +#define AFE_GAIN0_CON3 0x410 +#define AFE_GAIN0_CUR_R 0x414 +#define AFE_GAIN0_CUR_L 0x418 +#define AFE_GAIN1_CON0 0x41c +#define AFE_GAIN1_CON1_R 0x420 +#define AFE_GAIN1_CON1_L 0x424 +#define AFE_GAIN1_CON2 0x428 +#define AFE_GAIN1_CON3 0x42c +#define AFE_GAIN1_CUR_R 0x430 +#define AFE_GAIN1_CUR_L 0x434 +#define AFE_GAIN2_CON0 0x438 +#define AFE_GAIN2_CON1_R 0x43c +#define AFE_GAIN2_CON1_L 0x440 +#define AFE_GAIN2_CON2 0x444 +#define AFE_GAIN2_CON3 0x448 +#define AFE_GAIN2_CUR_R 0x44c +#define AFE_GAIN2_CUR_L 0x450 +#define AFE_GAIN3_CON0 0x454 +#define AFE_GAIN3_CON1_R 0x458 +#define AFE_GAIN3_CON1_L 0x45c +#define AFE_GAIN3_CON2 0x460 +#define AFE_GAIN3_CON3 0x464 +#define AFE_GAIN3_CUR_R 0x468 +#define AFE_GAIN3_CUR_L 0x46c +#define AFE_STF_CON0 0xb80 +#define AFE_STF_CON1 0xb84 +#define AFE_STF_COEFF 0xb88 +#define AFE_STF_GAIN 0xb8c +#define AFE_STF_MON 0xb90 +#define AFE_STF_IP_VERSION 0xb94 +#define AFE_CM0_CON0 0xba0 +#define AFE_CM0_MON 0xba4 +#define AFE_CM0_IP_VERSION 0xba8 +#define AFE_CM1_CON0 0xbb0 +#define AFE_CM1_MON 0xbb4 +#define AFE_CM1_IP_VERSION 0xbb8 +#define AFE_CM2_CON0 0xbc0 +#define AFE_CM2_MON 0xbc4 +#define AFE_CM2_IP_VERSION 0xbc8 +#define AFE_ADDA_UL0_SRC_CON0 0xbd0 +#define AFE_ADDA_UL0_SRC_CON1 0xbd4 +#define AFE_ADDA_UL0_SRC_CON2 0xbd8 +#define AFE_ADDA_UL0_SRC_DEBUG 0xbdc +#define AFE_ADDA_UL0_SRC_DEBUG_MON0 0xbe0 +#define AFE_ADDA_UL0_SRC_MON0 0xbe4 +#define AFE_ADDA_UL0_SRC_MON1 0xbe8 +#define AFE_ADDA_UL0_IIR_COEF_02_01 0xbec +#define AFE_ADDA_UL0_IIR_COEF_04_03 0xbf0 +#define AFE_ADDA_UL0_IIR_COEF_06_05 0xbf4 +#define AFE_ADDA_UL0_IIR_COEF_08_07 0xbf8 +#define AFE_ADDA_UL0_IIR_COEF_10_09 0xbfc +#define AFE_ADDA_UL0_ULCF_CFG_02_01 0xc00 +#define AFE_ADDA_UL0_ULCF_CFG_04_03 0xc04 +#define AFE_ADDA_UL0_ULCF_CFG_06_05 0xc08 +#define AFE_ADDA_UL0_ULCF_CFG_08_07 0xc0c +#define AFE_ADDA_UL0_ULCF_CFG_10_09 0xc10 +#define AFE_ADDA_UL0_ULCF_CFG_12_11 0xc14 +#define AFE_ADDA_UL0_ULCF_CFG_14_13 0xc18 +#define AFE_ADDA_UL0_ULCF_CFG_16_15 0xc1c +#define AFE_ADDA_UL0_ULCF_CFG_18_17 0xc20 +#define AFE_ADDA_UL0_ULCF_CFG_20_19 0xc24 +#define AFE_ADDA_UL0_ULCF_CFG_22_21 0xc28 +#define AFE_ADDA_UL0_ULCF_CFG_24_23 0xc2c +#define AFE_ADDA_UL0_ULCF_CFG_26_25 0xc30 +#define AFE_ADDA_UL0_ULCF_CFG_28_27 0xc34 +#define AFE_ADDA_UL0_ULCF_CFG_30_29 0xc38 +#define AFE_ADDA_UL0_ULCF_CFG_32_31 0xc3c +#define AFE_ADDA_UL0_IP_VERSION 0xc4c +#define AFE_ADDA_UL1_SRC_CON0 0xc50 +#define AFE_ADDA_UL1_SRC_CON1 0xc54 +#define AFE_ADDA_UL1_SRC_CON2 0xc58 +#define AFE_ADDA_UL1_SRC_DEBUG 0xc5c +#define AFE_ADDA_UL1_SRC_DEBUG_MON0 0xc60 +#define AFE_ADDA_UL1_SRC_MON0 0xc64 +#define AFE_ADDA_UL1_SRC_MON1 0xc68 +#define AFE_ADDA_UL1_IIR_COEF_02_01 0xc6c +#define AFE_ADDA_UL1_IIR_COEF_04_03 0xc70 +#define AFE_ADDA_UL1_IIR_COEF_06_05 0xc74 +#define AFE_ADDA_UL1_IIR_COEF_08_07 0xc78 +#define AFE_ADDA_UL1_IIR_COEF_10_09 0xc7c +#define AFE_ADDA_UL1_ULCF_CFG_02_01 0xc80 +#define AFE_ADDA_UL1_ULCF_CFG_04_03 0xc84 +#define AFE_ADDA_UL1_ULCF_CFG_06_05 0xc88 +#define AFE_ADDA_UL1_ULCF_CFG_08_07 0xc8c +#define AFE_ADDA_UL1_ULCF_CFG_10_09 0xc90 +#define AFE_ADDA_UL1_ULCF_CFG_12_11 0xc94 +#define AFE_ADDA_UL1_ULCF_CFG_14_13 0xc98 +#define AFE_ADDA_UL1_ULCF_CFG_16_15 0xc9c +#define AFE_ADDA_UL1_ULCF_CFG_18_17 0xca0 +#define AFE_ADDA_UL1_ULCF_CFG_20_19 0xca4 +#define AFE_ADDA_UL1_ULCF_CFG_22_21 0xca8 +#define AFE_ADDA_UL1_ULCF_CFG_24_23 0xcac +#define AFE_ADDA_UL1_ULCF_CFG_26_25 0xcb0 +#define AFE_ADDA_UL1_ULCF_CFG_28_27 0xcb4 +#define AFE_ADDA_UL1_ULCF_CFG_30_29 0xcb8 +#define AFE_ADDA_UL1_ULCF_CFG_32_31 0xcbc +#define AFE_ADDA_UL1_IP_VERSION 0xccc +#define AFE_ADDA_UL2_SRC_CON0 0xcd0 +#define AFE_ADDA_UL2_SRC_CON1 0xcd4 +#define AFE_ADDA_UL2_SRC_CON2 0xcd8 +#define AFE_ADDA_UL2_SRC_DEBUG 0xcdc +#define AFE_ADDA_UL2_SRC_DEBUG_MON0 0xce0 +#define AFE_ADDA_UL2_SRC_MON0 0xce4 +#define AFE_ADDA_UL2_SRC_MON1 0xce8 +#define AFE_ADDA_UL2_IIR_COEF_02_01 0xcec +#define AFE_ADDA_UL2_IIR_COEF_04_03 0xcf0 +#define AFE_ADDA_UL2_IIR_COEF_06_05 0xcf4 +#define AFE_ADDA_UL2_IIR_COEF_08_07 0xcf8 +#define AFE_ADDA_UL2_IIR_COEF_10_09 0xcfc +#define AFE_ADDA_UL2_ULCF_CFG_02_01 0xd00 +#define AFE_ADDA_UL2_ULCF_CFG_04_03 0xd04 +#define AFE_ADDA_UL2_ULCF_CFG_06_05 0xd08 +#define AFE_ADDA_UL2_ULCF_CFG_08_07 0xd0c +#define AFE_ADDA_UL2_ULCF_CFG_10_09 0xd10 +#define AFE_ADDA_UL2_ULCF_CFG_12_11 0xd14 +#define AFE_ADDA_UL2_ULCF_CFG_14_13 0xd18 +#define AFE_ADDA_UL2_ULCF_CFG_16_15 0xd1c +#define AFE_ADDA_UL2_ULCF_CFG_18_17 0xd20 +#define AFE_ADDA_UL2_ULCF_CFG_20_19 0xd24 +#define AFE_ADDA_UL2_ULCF_CFG_22_21 0xd28 +#define AFE_ADDA_UL2_ULCF_CFG_24_23 0xd2c +#define AFE_ADDA_UL2_ULCF_CFG_26_25 0xd30 +#define AFE_ADDA_UL2_ULCF_CFG_28_27 0xd34 +#define AFE_ADDA_UL2_ULCF_CFG_30_29 0xd38 +#define AFE_ADDA_UL2_ULCF_CFG_32_31 0xd3c +#define AFE_ADDA_UL2_IP_VERSION 0xd4c +#define AFE_ADDA_PROXIMITY_CON0 0xed0 +#define AFE_ADDA_ULSRC_PHASE_CON0 0xf00 +#define AFE_ADDA_ULSRC_PHASE_CON1 0xf04 +#define AFE_ADDA_ULSRC_PHASE_CON2 0xf08 +#define AFE_ADDA_ULSRC_PHASE_CON3 0xf0c +#define AFE_MTKAIF_IPM_VER_MON 0x1180 +#define AFE_MTKAIF_MON_SEL 0x1184 +#define AFE_MTKAIF_MON 0x1188 +#define AFE_MTKAIF0_CFG0 0x1190 +#define AFE_MTKAIF0_TX_CFG0 0x1194 +#define AFE_MTKAIF0_RX_CFG0 0x1198 +#define AFE_MTKAIF0_RX_CFG1 0x119c +#define AFE_MTKAIF0_RX_CFG2 0x11a0 +#define AFE_MTKAIF1_CFG0 0x11f0 +#define AFE_MTKAIF1_TX_CFG0 0x11f4 +#define AFE_MTKAIF1_RX_CFG0 0x11f8 +#define AFE_MTKAIF1_RX_CFG1 0x11fc +#define AFE_MTKAIF1_RX_CFG2 0x1200 +#define AFE_AUD_PAD_TOP_CFG0 0x1204 +#define AFE_AUD_PAD_TOP_MON 0x1208 +#define AFE_ADDA_MTKAIFV4_TX_CFG0 0x1280 +#define AFE_ADDA6_MTKAIFV4_TX_CFG0 0x1284 +#define AFE_ADDA_MTKAIFV4_RX_CFG0 0x1288 +#define AFE_ADDA_MTKAIFV4_RX_CFG1 0x128c +#define AFE_ADDA6_MTKAIFV4_RX_CFG0 0x1290 +#define AFE_ADDA6_MTKAIFV4_RX_CFG1 0x1294 +#define AFE_ADDA_MTKAIFV4_TX_SYNCWORD_CFG 0x1298 +#define AFE_ADDA_MTKAIFV4_RX_SYNCWORD_CFG 0x129c +#define AFE_ADDA_MTKAIFV4_MON0 0x12a0 +#define AFE_ADDA_MTKAIFV4_MON1 0x12a4 +#define AFE_ADDA6_MTKAIFV4_MON0 0x12a8 +#define ETDM_IN0_CON0 0x1300 +#define ETDM_IN0_CON1 0x1304 +#define ETDM_IN0_CON2 0x1308 +#define ETDM_IN0_CON3 0x130c +#define ETDM_IN0_CON4 0x1310 +#define ETDM_IN0_CON5 0x1314 +#define ETDM_IN0_CON6 0x1318 +#define ETDM_IN0_CON7 0x131c +#define ETDM_IN0_CON8 0x1320 +#define ETDM_IN0_CON9 0x1324 +#define ETDM_IN0_MON 0x1328 +#define ETDM_IN1_CON0 0x1330 +#define ETDM_IN1_CON1 0x1334 +#define ETDM_IN1_CON2 0x1338 +#define ETDM_IN1_CON3 0x133c +#define ETDM_IN1_CON4 0x1340 +#define ETDM_IN1_CON5 0x1344 +#define ETDM_IN1_CON6 0x1348 +#define ETDM_IN1_CON7 0x134c +#define ETDM_IN1_CON8 0x1350 +#define ETDM_IN1_CON9 0x1354 +#define ETDM_IN1_MON 0x1358 +#define ETDM_IN2_CON0 0x1360 +#define ETDM_IN2_CON1 0x1364 +#define ETDM_IN2_CON2 0x1368 +#define ETDM_IN2_CON3 0x136c +#define ETDM_IN2_CON4 0x1370 +#define ETDM_IN2_CON5 0x1374 +#define ETDM_IN2_CON6 0x1378 +#define ETDM_IN2_CON7 0x137c +#define ETDM_IN2_CON8 0x1380 +#define ETDM_IN2_CON9 0x1384 +#define ETDM_IN2_MON 0x1388 +#define ETDM_IN3_CON0 0x1390 +#define ETDM_IN3_CON1 0x1394 +#define ETDM_IN3_CON2 0x1398 +#define ETDM_IN3_CON3 0x139c +#define ETDM_IN3_CON4 0x13a0 +#define ETDM_IN3_CON5 0x13a4 +#define ETDM_IN3_CON6 0x13a8 +#define ETDM_IN3_CON7 0x13ac +#define ETDM_IN3_CON8 0x13b0 +#define ETDM_IN3_CON9 0x13b4 +#define ETDM_IN3_MON 0x13b8 +#define ETDM_IN4_CON0 0x13c0 +#define ETDM_IN4_CON1 0x13c4 +#define ETDM_IN4_CON2 0x13c8 +#define ETDM_IN4_CON3 0x13cc +#define ETDM_IN4_CON4 0x13d0 +#define ETDM_IN4_CON5 0x13d4 +#define ETDM_IN4_CON6 0x13d8 +#define ETDM_IN4_CON7 0x13dc +#define ETDM_IN4_CON8 0x13e0 +#define ETDM_IN4_CON9 0x13e4 +#define ETDM_IN4_MON 0x13e8 +#define ETDM_IN5_CON0 0x13f0 +#define ETDM_IN5_CON1 0x13f4 +#define ETDM_IN5_CON2 0x13f8 +#define ETDM_IN5_CON3 0x13fc +#define ETDM_IN5_CON4 0x1400 +#define ETDM_IN5_CON5 0x1404 +#define ETDM_IN5_CON6 0x1408 +#define ETDM_IN5_CON7 0x140c +#define ETDM_IN5_CON8 0x1410 +#define ETDM_IN5_CON9 0x1414 +#define ETDM_IN5_MON 0x1418 +#define ETDM_IN6_CON0 0x1420 +#define ETDM_IN6_CON1 0x1424 +#define ETDM_IN6_CON2 0x1428 +#define ETDM_IN6_CON3 0x142c +#define ETDM_IN6_CON4 0x1430 +#define ETDM_IN6_CON5 0x1434 +#define ETDM_IN6_CON6 0x1438 +#define ETDM_IN6_CON7 0x143c +#define ETDM_IN6_CON8 0x1440 +#define ETDM_IN6_CON9 0x1444 +#define ETDM_IN6_MON 0x1448 +#define ETDM_OUT0_CON0 0x1480 +#define ETDM_OUT0_CON1 0x1484 +#define ETDM_OUT0_CON2 0x1488 +#define ETDM_OUT0_CON3 0x148c +#define ETDM_OUT0_CON4 0x1490 +#define ETDM_OUT0_CON5 0x1494 +#define ETDM_OUT0_CON6 0x1498 +#define ETDM_OUT0_CON7 0x149c +#define ETDM_OUT0_CON8 0x14a0 +#define ETDM_OUT0_CON9 0x14a4 +#define ETDM_OUT0_MON 0x14a8 +#define ETDM_OUT1_CON0 0x14c0 +#define ETDM_OUT1_CON1 0x14c4 +#define ETDM_OUT1_CON2 0x14c8 +#define ETDM_OUT1_CON3 0x14cc +#define ETDM_OUT1_CON4 0x14d0 +#define ETDM_OUT1_CON5 0x14d4 +#define ETDM_OUT1_CON6 0x14d8 +#define ETDM_OUT1_CON7 0x14dc +#define ETDM_OUT1_CON8 0x14e0 +#define ETDM_OUT1_CON9 0x14e4 +#define ETDM_OUT1_MON 0x14e8 +#define ETDM_OUT2_CON0 0x1500 +#define ETDM_OUT2_CON1 0x1504 +#define ETDM_OUT2_CON2 0x1508 +#define ETDM_OUT2_CON3 0x150c +#define ETDM_OUT2_CON4 0x1510 +#define ETDM_OUT2_CON5 0x1514 +#define ETDM_OUT2_CON6 0x1518 +#define ETDM_OUT2_CON7 0x151c +#define ETDM_OUT2_CON8 0x1520 +#define ETDM_OUT2_CON9 0x1524 +#define ETDM_OUT2_MON 0x1528 +#define ETDM_OUT3_CON0 0x1540 +#define ETDM_OUT3_CON1 0x1544 +#define ETDM_OUT3_CON2 0x1548 +#define ETDM_OUT3_CON3 0x154c +#define ETDM_OUT3_CON4 0x1550 +#define ETDM_OUT3_CON5 0x1554 +#define ETDM_OUT3_CON6 0x1558 +#define ETDM_OUT3_CON7 0x155c +#define ETDM_OUT3_CON8 0x1560 +#define ETDM_OUT3_CON9 0x1564 +#define ETDM_OUT3_MON 0x1568 +#define ETDM_OUT4_CON0 0x1580 +#define ETDM_OUT4_CON1 0x1584 +#define ETDM_OUT4_CON2 0x1588 +#define ETDM_OUT4_CON3 0x158c +#define ETDM_OUT4_CON4 0x1590 +#define ETDM_OUT4_CON5 0x1594 +#define ETDM_OUT4_CON6 0x1598 +#define ETDM_OUT4_CON7 0x159c +#define ETDM_OUT4_CON8 0x15a0 +#define ETDM_OUT4_CON9 0x15a4 +#define ETDM_OUT4_MON 0x15a8 +#define ETDM_OUT5_CON0 0x15c0 +#define ETDM_OUT5_CON1 0x15c4 +#define ETDM_OUT5_CON2 0x15c8 +#define ETDM_OUT5_CON3 0x15cc +#define ETDM_OUT5_CON4 0x15d0 +#define ETDM_OUT5_CON5 0x15d4 +#define ETDM_OUT5_CON6 0x15d8 +#define ETDM_OUT5_CON7 0x15dc +#define ETDM_OUT5_CON8 0x15e0 +#define ETDM_OUT5_CON9 0x15e4 +#define ETDM_OUT5_MON 0x15e8 +#define ETDM_OUT6_CON0 0x1600 +#define ETDM_OUT6_CON1 0x1604 +#define ETDM_OUT6_CON2 0x1608 +#define ETDM_OUT6_CON3 0x160c +#define ETDM_OUT6_CON4 0x1610 +#define ETDM_OUT6_CON5 0x1614 +#define ETDM_OUT6_CON6 0x1618 +#define ETDM_OUT6_CON7 0x161c +#define ETDM_OUT6_CON8 0x1620 +#define ETDM_OUT6_CON9 0x1624 +#define ETDM_OUT6_MON 0x1628 +#define ETDM_0_3_COWORK_CON0 0x1680 +#define ETDM_0_3_COWORK_CON1 0x1684 +#define ETDM_0_3_COWORK_CON2 0x1688 +#define ETDM_0_3_COWORK_CON3 0x168c +#define ETDM_4_7_COWORK_CON0 0x1690 +#define ETDM_4_7_COWORK_CON1 0x1694 +#define ETDM_4_7_COWORK_CON2 0x1698 +#define ETDM_4_7_COWORK_CON3 0x169c +#define AFE_DPTX_CON 0x2040 +#define AFE_DPTX_MON 0x2044 +#define AFE_TDM_CON1 0x2048 +#define AFE_TDM_CON2 0x204c +#define AFE_TDM_CON3 0x2050 +#define AFE_TDM_OUT_MON 0x2054 +#define AFE_HDMI_CONN0 0x2078 +#define AFE_TDM_TOP_IP_VERSION 0x207c +#define AFE_CONN004_0 0x2100 +#define AFE_CONN004_1 0x2104 +#define AFE_CONN004_2 0x2108 +#define AFE_CONN004_4 0x2110 +#define AFE_CONN004_5 0x2114 +#define AFE_CONN004_6 0x2118 +#define AFE_CONN004_7 0x211c +#define AFE_CONN005_0 0x2120 +#define AFE_CONN005_1 0x2124 +#define AFE_CONN005_2 0x2128 +#define AFE_CONN005_4 0x2130 +#define AFE_CONN005_5 0x2134 +#define AFE_CONN005_6 0x2138 +#define AFE_CONN005_7 0x213c +#define AFE_CONN006_0 0x2140 +#define AFE_CONN006_1 0x2144 +#define AFE_CONN006_2 0x2148 +#define AFE_CONN006_4 0x2150 +#define AFE_CONN006_5 0x2154 +#define AFE_CONN006_6 0x2158 +#define AFE_CONN006_7 0x215c +#define AFE_CONN007_0 0x2160 +#define AFE_CONN007_1 0x2164 +#define AFE_CONN007_2 0x2168 +#define AFE_CONN007_4 0x2170 +#define AFE_CONN007_5 0x2174 +#define AFE_CONN007_6 0x2178 +#define AFE_CONN007_7 0x217c +#define AFE_CONN008_0 0x2180 +#define AFE_CONN008_1 0x2184 +#define AFE_CONN008_2 0x2188 +#define AFE_CONN008_4 0x2190 +#define AFE_CONN008_5 0x2194 +#define AFE_CONN008_6 0x2198 +#define AFE_CONN008_7 0x219c +#define AFE_CONN009_0 0x21a0 +#define AFE_CONN009_1 0x21a4 +#define AFE_CONN009_2 0x21a8 +#define AFE_CONN009_4 0x21b0 +#define AFE_CONN009_5 0x21b4 +#define AFE_CONN009_6 0x21b8 +#define AFE_CONN009_7 0x21bc +#define AFE_CONN010_0 0x21c0 +#define AFE_CONN010_1 0x21c4 +#define AFE_CONN010_2 0x21c8 +#define AFE_CONN010_4 0x21d0 +#define AFE_CONN010_5 0x21d4 +#define AFE_CONN010_6 0x21d8 +#define AFE_CONN010_7 0x21dc +#define AFE_CONN011_0 0x21e0 +#define AFE_CONN011_1 0x21e4 +#define AFE_CONN011_2 0x21e8 +#define AFE_CONN011_4 0x21f0 +#define AFE_CONN011_5 0x21f4 +#define AFE_CONN011_6 0x21f8 +#define AFE_CONN011_7 0x21fc +#define AFE_CONN012_0 0x2200 +#define AFE_CONN012_1 0x2204 +#define AFE_CONN012_2 0x2208 +#define AFE_CONN012_4 0x2210 +#define AFE_CONN012_5 0x2214 +#define AFE_CONN012_6 0x2218 +#define AFE_CONN012_7 0x221c +#define AFE_CONN014_0 0x2240 +#define AFE_CONN014_1 0x2244 +#define AFE_CONN014_2 0x2248 +#define AFE_CONN014_4 0x2250 +#define AFE_CONN014_5 0x2254 +#define AFE_CONN014_6 0x2258 +#define AFE_CONN014_7 0x225c +#define AFE_CONN015_0 0x2260 +#define AFE_CONN015_1 0x2264 +#define AFE_CONN015_2 0x2268 +#define AFE_CONN015_4 0x2270 +#define AFE_CONN015_5 0x2274 +#define AFE_CONN015_6 0x2278 +#define AFE_CONN015_7 0x227c +#define AFE_CONN016_0 0x2280 +#define AFE_CONN016_1 0x2284 +#define AFE_CONN016_2 0x2288 +#define AFE_CONN016_4 0x2290 +#define AFE_CONN016_5 0x2294 +#define AFE_CONN016_6 0x2298 +#define AFE_CONN016_7 0x229c +#define AFE_CONN017_0 0x22a0 +#define AFE_CONN017_1 0x22a4 +#define AFE_CONN017_2 0x22a8 +#define AFE_CONN017_4 0x22b0 +#define AFE_CONN017_5 0x22b4 +#define AFE_CONN017_6 0x22b8 +#define AFE_CONN017_7 0x22bc +#define AFE_CONN018_0 0x22c0 +#define AFE_CONN018_1 0x22c4 +#define AFE_CONN018_2 0x22c8 +#define AFE_CONN018_4 0x22d0 +#define AFE_CONN018_5 0x22d4 +#define AFE_CONN018_6 0x22d8 +#define AFE_CONN018_7 0x22dc +#define AFE_CONN019_0 0x22e0 +#define AFE_CONN019_1 0x22e4 +#define AFE_CONN019_2 0x22e8 +#define AFE_CONN019_4 0x22f0 +#define AFE_CONN019_5 0x22f4 +#define AFE_CONN019_6 0x22f8 +#define AFE_CONN019_7 0x22fc +#define AFE_CONN020_0 0x2300 +#define AFE_CONN020_1 0x2304 +#define AFE_CONN020_2 0x2308 +#define AFE_CONN020_4 0x2310 +#define AFE_CONN020_5 0x2314 +#define AFE_CONN020_6 0x2318 +#define AFE_CONN020_7 0x231c +#define AFE_CONN021_0 0x2320 +#define AFE_CONN021_1 0x2324 +#define AFE_CONN021_2 0x2328 +#define AFE_CONN021_4 0x2330 +#define AFE_CONN021_5 0x2334 +#define AFE_CONN021_6 0x2338 +#define AFE_CONN021_7 0x233c +#define AFE_CONN022_0 0x2340 +#define AFE_CONN022_1 0x2344 +#define AFE_CONN022_2 0x2348 +#define AFE_CONN022_4 0x2350 +#define AFE_CONN022_5 0x2354 +#define AFE_CONN022_6 0x2358 +#define AFE_CONN022_7 0x235c +#define AFE_CONN023_0 0x2360 +#define AFE_CONN023_1 0x2364 +#define AFE_CONN023_2 0x2368 +#define AFE_CONN023_4 0x2370 +#define AFE_CONN023_5 0x2374 +#define AFE_CONN023_6 0x2378 +#define AFE_CONN023_7 0x237c +#define AFE_CONN024_0 0x2380 +#define AFE_CONN024_1 0x2384 +#define AFE_CONN024_2 0x2388 +#define AFE_CONN024_4 0x2390 +#define AFE_CONN024_5 0x2394 +#define AFE_CONN024_6 0x2398 +#define AFE_CONN024_7 0x239c +#define AFE_CONN025_0 0x23a0 +#define AFE_CONN025_1 0x23a4 +#define AFE_CONN025_2 0x23a8 +#define AFE_CONN025_4 0x23b0 +#define AFE_CONN025_5 0x23b4 +#define AFE_CONN025_6 0x23b8 +#define AFE_CONN025_7 0x23bc +#define AFE_CONN026_0 0x23c0 +#define AFE_CONN026_1 0x23c4 +#define AFE_CONN026_2 0x23c8 +#define AFE_CONN026_4 0x23d0 +#define AFE_CONN026_5 0x23d4 +#define AFE_CONN026_6 0x23d8 +#define AFE_CONN026_7 0x23dc +#define AFE_CONN027_0 0x23e0 +#define AFE_CONN027_1 0x23e4 +#define AFE_CONN027_2 0x23e8 +#define AFE_CONN027_4 0x23f0 +#define AFE_CONN027_5 0x23f4 +#define AFE_CONN027_6 0x23f8 +#define AFE_CONN027_7 0x23fc +#define AFE_CONN028_0 0x2400 +#define AFE_CONN028_1 0x2404 +#define AFE_CONN028_2 0x2408 +#define AFE_CONN028_4 0x2410 +#define AFE_CONN028_5 0x2414 +#define AFE_CONN028_6 0x2418 +#define AFE_CONN028_7 0x241c +#define AFE_CONN029_0 0x2420 +#define AFE_CONN029_1 0x2424 +#define AFE_CONN029_2 0x2428 +#define AFE_CONN029_4 0x2430 +#define AFE_CONN029_5 0x2434 +#define AFE_CONN029_6 0x2438 +#define AFE_CONN029_7 0x243c +#define AFE_CONN030_0 0x2440 +#define AFE_CONN030_1 0x2444 +#define AFE_CONN030_2 0x2448 +#define AFE_CONN030_4 0x2450 +#define AFE_CONN030_5 0x2454 +#define AFE_CONN030_6 0x2458 +#define AFE_CONN030_7 0x245c +#define AFE_CONN031_0 0x2460 +#define AFE_CONN031_1 0x2464 +#define AFE_CONN031_2 0x2468 +#define AFE_CONN031_4 0x2470 +#define AFE_CONN031_5 0x2474 +#define AFE_CONN031_6 0x2478 +#define AFE_CONN031_7 0x247c +#define AFE_CONN032_0 0x2480 +#define AFE_CONN032_1 0x2484 +#define AFE_CONN032_2 0x2488 +#define AFE_CONN032_4 0x2490 +#define AFE_CONN032_5 0x2494 +#define AFE_CONN032_6 0x2498 +#define AFE_CONN032_7 0x249c +#define AFE_CONN033_0 0x24a0 +#define AFE_CONN033_1 0x24a4 +#define AFE_CONN033_2 0x24a8 +#define AFE_CONN033_4 0x24b0 +#define AFE_CONN033_5 0x24b4 +#define AFE_CONN033_6 0x24b8 +#define AFE_CONN033_7 0x24bc +#define AFE_CONN034_0 0x24c0 +#define AFE_CONN034_1 0x24c4 +#define AFE_CONN034_2 0x24c8 +#define AFE_CONN034_4 0x24d0 +#define AFE_CONN034_5 0x24d4 +#define AFE_CONN034_6 0x24d8 +#define AFE_CONN034_7 0x24dc +#define AFE_CONN035_0 0x24e0 +#define AFE_CONN035_1 0x24e4 +#define AFE_CONN035_2 0x24e8 +#define AFE_CONN035_4 0x24f0 +#define AFE_CONN035_5 0x24f4 +#define AFE_CONN035_6 0x24f8 +#define AFE_CONN035_7 0x24fc +#define AFE_CONN036_0 0x2500 +#define AFE_CONN036_1 0x2504 +#define AFE_CONN036_2 0x2508 +#define AFE_CONN036_4 0x2510 +#define AFE_CONN036_5 0x2514 +#define AFE_CONN036_6 0x2518 +#define AFE_CONN036_7 0x251c +#define AFE_CONN037_0 0x2520 +#define AFE_CONN037_1 0x2524 +#define AFE_CONN037_2 0x2528 +#define AFE_CONN037_4 0x2530 +#define AFE_CONN037_5 0x2534 +#define AFE_CONN037_6 0x2538 +#define AFE_CONN037_7 0x253c +#define AFE_CONN038_0 0x2540 +#define AFE_CONN038_1 0x2544 +#define AFE_CONN038_2 0x2548 +#define AFE_CONN038_4 0x2550 +#define AFE_CONN038_5 0x2554 +#define AFE_CONN038_6 0x2558 +#define AFE_CONN038_7 0x255c +#define AFE_CONN039_0 0x2560 +#define AFE_CONN039_1 0x2564 +#define AFE_CONN039_2 0x2568 +#define AFE_CONN039_4 0x2570 +#define AFE_CONN039_5 0x2574 +#define AFE_CONN039_6 0x2578 +#define AFE_CONN039_7 0x257c +#define AFE_CONN040_0 0x2580 +#define AFE_CONN040_1 0x2584 +#define AFE_CONN040_2 0x2588 +#define AFE_CONN040_4 0x2590 +#define AFE_CONN040_5 0x2594 +#define AFE_CONN040_6 0x2598 +#define AFE_CONN040_7 0x259c +#define AFE_CONN041_0 0x25a0 +#define AFE_CONN041_1 0x25a4 +#define AFE_CONN041_2 0x25a8 +#define AFE_CONN041_4 0x25b0 +#define AFE_CONN041_5 0x25b4 +#define AFE_CONN041_6 0x25b8 +#define AFE_CONN041_7 0x25bc +#define AFE_CONN042_0 0x25c0 +#define AFE_CONN042_1 0x25c4 +#define AFE_CONN042_2 0x25c8 +#define AFE_CONN042_4 0x25d0 +#define AFE_CONN042_5 0x25d4 +#define AFE_CONN042_6 0x25d8 +#define AFE_CONN042_7 0x25dc +#define AFE_CONN043_0 0x25e0 +#define AFE_CONN043_1 0x25e4 +#define AFE_CONN043_2 0x25e8 +#define AFE_CONN043_4 0x25f0 +#define AFE_CONN043_5 0x25f4 +#define AFE_CONN043_6 0x25f8 +#define AFE_CONN043_7 0x25fc +#define AFE_CONN044_0 0x2600 +#define AFE_CONN044_1 0x2604 +#define AFE_CONN044_2 0x2608 +#define AFE_CONN044_4 0x2610 +#define AFE_CONN044_5 0x2614 +#define AFE_CONN044_6 0x2618 +#define AFE_CONN044_7 0x261c +#define AFE_CONN045_0 0x2620 +#define AFE_CONN045_1 0x2624 +#define AFE_CONN045_2 0x2628 +#define AFE_CONN045_4 0x2630 +#define AFE_CONN045_5 0x2634 +#define AFE_CONN045_6 0x2638 +#define AFE_CONN045_7 0x263c +#define AFE_CONN046_0 0x2640 +#define AFE_CONN046_1 0x2644 +#define AFE_CONN046_2 0x2648 +#define AFE_CONN046_4 0x2650 +#define AFE_CONN046_5 0x2654 +#define AFE_CONN046_6 0x2658 +#define AFE_CONN046_7 0x265c +#define AFE_CONN047_0 0x2660 +#define AFE_CONN047_1 0x2664 +#define AFE_CONN047_2 0x2668 +#define AFE_CONN047_4 0x2670 +#define AFE_CONN047_5 0x2674 +#define AFE_CONN047_6 0x2678 +#define AFE_CONN047_7 0x267c +#define AFE_CONN048_0 0x2680 +#define AFE_CONN048_1 0x2684 +#define AFE_CONN048_2 0x2688 +#define AFE_CONN048_4 0x2690 +#define AFE_CONN048_5 0x2694 +#define AFE_CONN048_6 0x2698 +#define AFE_CONN048_7 0x269c +#define AFE_CONN049_0 0x26a0 +#define AFE_CONN049_1 0x26a4 +#define AFE_CONN049_2 0x26a8 +#define AFE_CONN049_4 0x26b0 +#define AFE_CONN049_5 0x26b4 +#define AFE_CONN049_6 0x26b8 +#define AFE_CONN049_7 0x26bc +#define AFE_CONN050_0 0x26c0 +#define AFE_CONN050_1 0x26c4 +#define AFE_CONN050_2 0x26c8 +#define AFE_CONN050_4 0x26d0 +#define AFE_CONN050_5 0x26d4 +#define AFE_CONN050_6 0x26d8 +#define AFE_CONN050_7 0x26dc +#define AFE_CONN051_0 0x26e0 +#define AFE_CONN051_1 0x26e4 +#define AFE_CONN051_2 0x26e8 +#define AFE_CONN051_4 0x26f0 +#define AFE_CONN051_5 0x26f4 +#define AFE_CONN051_6 0x26f8 +#define AFE_CONN051_7 0x26fc +#define AFE_CONN052_0 0x2700 +#define AFE_CONN052_1 0x2704 +#define AFE_CONN052_2 0x2708 +#define AFE_CONN052_4 0x2710 +#define AFE_CONN052_5 0x2714 +#define AFE_CONN052_6 0x2718 +#define AFE_CONN052_7 0x271c +#define AFE_CONN053_0 0x2720 +#define AFE_CONN053_1 0x2724 +#define AFE_CONN053_2 0x2728 +#define AFE_CONN053_4 0x2730 +#define AFE_CONN053_5 0x2734 +#define AFE_CONN053_6 0x2738 +#define AFE_CONN053_7 0x273c +#define AFE_CONN054_0 0x2740 +#define AFE_CONN054_1 0x2744 +#define AFE_CONN054_2 0x2748 +#define AFE_CONN054_4 0x2750 +#define AFE_CONN054_5 0x2754 +#define AFE_CONN054_6 0x2758 +#define AFE_CONN054_7 0x275c +#define AFE_CONN055_0 0x2760 +#define AFE_CONN055_1 0x2764 +#define AFE_CONN055_2 0x2768 +#define AFE_CONN055_4 0x2770 +#define AFE_CONN055_5 0x2774 +#define AFE_CONN055_6 0x2778 +#define AFE_CONN055_7 0x277c +#define AFE_CONN056_0 0x2780 +#define AFE_CONN056_1 0x2784 +#define AFE_CONN056_2 0x2788 +#define AFE_CONN056_4 0x2790 +#define AFE_CONN056_5 0x2794 +#define AFE_CONN056_6 0x2798 +#define AFE_CONN056_7 0x279c +#define AFE_CONN057_0 0x27a0 +#define AFE_CONN057_1 0x27a4 +#define AFE_CONN057_2 0x27a8 +#define AFE_CONN057_4 0x27b0 +#define AFE_CONN057_5 0x27b4 +#define AFE_CONN057_6 0x27b8 +#define AFE_CONN057_7 0x27bc +#define AFE_CONN058_0 0x27c0 +#define AFE_CONN058_1 0x27c4 +#define AFE_CONN058_2 0x27c8 +#define AFE_CONN058_4 0x27d0 +#define AFE_CONN058_5 0x27d4 +#define AFE_CONN058_6 0x27d8 +#define AFE_CONN058_7 0x27dc +#define AFE_CONN059_0 0x27e0 +#define AFE_CONN059_1 0x27e4 +#define AFE_CONN059_2 0x27e8 +#define AFE_CONN059_4 0x27f0 +#define AFE_CONN059_5 0x27f4 +#define AFE_CONN059_6 0x27f8 +#define AFE_CONN059_7 0x27fc +#define AFE_CONN060_0 0x2800 +#define AFE_CONN060_1 0x2804 +#define AFE_CONN060_2 0x2808 +#define AFE_CONN060_4 0x2810 +#define AFE_CONN060_5 0x2814 +#define AFE_CONN060_6 0x2818 +#define AFE_CONN060_7 0x281c +#define AFE_CONN061_0 0x2820 +#define AFE_CONN061_1 0x2824 +#define AFE_CONN061_2 0x2828 +#define AFE_CONN061_4 0x2830 +#define AFE_CONN061_5 0x2834 +#define AFE_CONN061_6 0x2838 +#define AFE_CONN061_7 0x283c +#define AFE_CONN062_0 0x2840 +#define AFE_CONN062_1 0x2844 +#define AFE_CONN062_2 0x2848 +#define AFE_CONN062_4 0x2850 +#define AFE_CONN062_5 0x2854 +#define AFE_CONN062_6 0x2858 +#define AFE_CONN062_7 0x285c +#define AFE_CONN063_0 0x2860 +#define AFE_CONN063_1 0x2864 +#define AFE_CONN063_2 0x2868 +#define AFE_CONN063_4 0x2870 +#define AFE_CONN063_5 0x2874 +#define AFE_CONN063_6 0x2878 +#define AFE_CONN063_7 0x287c +#define AFE_CONN064_0 0x2880 +#define AFE_CONN064_1 0x2884 +#define AFE_CONN064_2 0x2888 +#define AFE_CONN064_4 0x2890 +#define AFE_CONN064_5 0x2894 +#define AFE_CONN064_6 0x2898 +#define AFE_CONN064_7 0x289c +#define AFE_CONN065_0 0x28a0 +#define AFE_CONN065_1 0x28a4 +#define AFE_CONN065_2 0x28a8 +#define AFE_CONN065_4 0x28b0 +#define AFE_CONN065_5 0x28b4 +#define AFE_CONN065_6 0x28b8 +#define AFE_CONN065_7 0x28bc +#define AFE_CONN066_0 0x28c0 +#define AFE_CONN066_1 0x28c4 +#define AFE_CONN066_2 0x28c8 +#define AFE_CONN066_4 0x28d0 +#define AFE_CONN066_5 0x28d4 +#define AFE_CONN066_6 0x28d8 +#define AFE_CONN066_7 0x28dc +#define AFE_CONN067_0 0x28e0 +#define AFE_CONN067_1 0x28e4 +#define AFE_CONN067_2 0x28e8 +#define AFE_CONN067_4 0x28f0 +#define AFE_CONN067_5 0x28f4 +#define AFE_CONN067_6 0x28f8 +#define AFE_CONN067_7 0x28fc +#define AFE_CONN068_0 0x2900 +#define AFE_CONN068_1 0x2904 +#define AFE_CONN068_2 0x2908 +#define AFE_CONN068_4 0x2910 +#define AFE_CONN068_5 0x2914 +#define AFE_CONN068_6 0x2918 +#define AFE_CONN068_7 0x291c +#define AFE_CONN069_0 0x2920 +#define AFE_CONN069_1 0x2924 +#define AFE_CONN069_2 0x2928 +#define AFE_CONN069_4 0x2930 +#define AFE_CONN069_5 0x2934 +#define AFE_CONN069_6 0x2938 +#define AFE_CONN069_7 0x293c +#define AFE_CONN070_0 0x2940 +#define AFE_CONN070_1 0x2944 +#define AFE_CONN070_2 0x2948 +#define AFE_CONN070_4 0x2950 +#define AFE_CONN070_5 0x2954 +#define AFE_CONN070_6 0x2958 +#define AFE_CONN070_7 0x295c +#define AFE_CONN071_0 0x2960 +#define AFE_CONN071_1 0x2964 +#define AFE_CONN071_2 0x2968 +#define AFE_CONN071_4 0x2970 +#define AFE_CONN071_5 0x2974 +#define AFE_CONN071_6 0x2978 +#define AFE_CONN071_7 0x297c +#define AFE_CONN072_0 0x2980 +#define AFE_CONN072_1 0x2984 +#define AFE_CONN072_2 0x2988 +#define AFE_CONN072_4 0x2990 +#define AFE_CONN072_5 0x2994 +#define AFE_CONN072_6 0x2998 +#define AFE_CONN072_7 0x299c +#define AFE_CONN073_0 0x29a0 +#define AFE_CONN073_1 0x29a4 +#define AFE_CONN073_2 0x29a8 +#define AFE_CONN073_4 0x29b0 +#define AFE_CONN073_5 0x29b4 +#define AFE_CONN073_6 0x29b8 +#define AFE_CONN073_7 0x29bc +#define AFE_CONN074_0 0x29c0 +#define AFE_CONN074_1 0x29c4 +#define AFE_CONN074_2 0x29c8 +#define AFE_CONN074_4 0x29d0 +#define AFE_CONN074_5 0x29d4 +#define AFE_CONN074_6 0x29d8 +#define AFE_CONN074_7 0x29dc +#define AFE_CONN075_0 0x29e0 +#define AFE_CONN075_1 0x29e4 +#define AFE_CONN075_2 0x29e8 +#define AFE_CONN075_4 0x29f0 +#define AFE_CONN075_5 0x29f4 +#define AFE_CONN075_6 0x29f8 +#define AFE_CONN075_7 0x29fc +#define AFE_CONN076_0 0x2a00 +#define AFE_CONN076_1 0x2a04 +#define AFE_CONN076_2 0x2a08 +#define AFE_CONN076_4 0x2a10 +#define AFE_CONN076_5 0x2a14 +#define AFE_CONN076_6 0x2a18 +#define AFE_CONN076_7 0x2a1c +#define AFE_CONN077_0 0x2a20 +#define AFE_CONN077_1 0x2a24 +#define AFE_CONN077_2 0x2a28 +#define AFE_CONN077_4 0x2a30 +#define AFE_CONN077_5 0x2a34 +#define AFE_CONN077_6 0x2a38 +#define AFE_CONN077_7 0x2a3c +#define AFE_CONN078_0 0x2a40 +#define AFE_CONN078_1 0x2a44 +#define AFE_CONN078_2 0x2a48 +#define AFE_CONN078_4 0x2a50 +#define AFE_CONN078_5 0x2a54 +#define AFE_CONN078_6 0x2a58 +#define AFE_CONN078_7 0x2a5c +#define AFE_CONN079_0 0x2a60 +#define AFE_CONN079_1 0x2a64 +#define AFE_CONN079_2 0x2a68 +#define AFE_CONN079_4 0x2a70 +#define AFE_CONN079_5 0x2a74 +#define AFE_CONN079_6 0x2a78 +#define AFE_CONN079_7 0x2a7c +#define AFE_CONN080_0 0x2a80 +#define AFE_CONN080_1 0x2a84 +#define AFE_CONN080_2 0x2a88 +#define AFE_CONN080_4 0x2a90 +#define AFE_CONN080_5 0x2a94 +#define AFE_CONN080_6 0x2a98 +#define AFE_CONN080_7 0x2a9c +#define AFE_CONN081_0 0x2aa0 +#define AFE_CONN081_1 0x2aa4 +#define AFE_CONN081_2 0x2aa8 +#define AFE_CONN081_4 0x2ab0 +#define AFE_CONN081_5 0x2ab4 +#define AFE_CONN081_6 0x2ab8 +#define AFE_CONN081_7 0x2abc +#define AFE_CONN082_0 0x2ac0 +#define AFE_CONN082_1 0x2ac4 +#define AFE_CONN082_2 0x2ac8 +#define AFE_CONN082_4 0x2ad0 +#define AFE_CONN082_5 0x2ad4 +#define AFE_CONN082_6 0x2ad8 +#define AFE_CONN082_7 0x2adc +#define AFE_CONN083_0 0x2ae0 +#define AFE_CONN083_1 0x2ae4 +#define AFE_CONN083_2 0x2ae8 +#define AFE_CONN083_4 0x2af0 +#define AFE_CONN083_5 0x2af4 +#define AFE_CONN083_6 0x2af8 +#define AFE_CONN083_7 0x2afc +#define AFE_CONN084_0 0x2b00 +#define AFE_CONN084_1 0x2b04 +#define AFE_CONN084_2 0x2b08 +#define AFE_CONN084_4 0x2b10 +#define AFE_CONN084_5 0x2b14 +#define AFE_CONN084_6 0x2b18 +#define AFE_CONN084_7 0x2b1c +#define AFE_CONN085_0 0x2b20 +#define AFE_CONN085_1 0x2b24 +#define AFE_CONN085_2 0x2b28 +#define AFE_CONN085_4 0x2b30 +#define AFE_CONN085_5 0x2b34 +#define AFE_CONN085_6 0x2b38 +#define AFE_CONN085_7 0x2b3c +#define AFE_CONN086_0 0x2b40 +#define AFE_CONN086_1 0x2b44 +#define AFE_CONN086_2 0x2b48 +#define AFE_CONN086_4 0x2b50 +#define AFE_CONN086_5 0x2b54 +#define AFE_CONN086_6 0x2b58 +#define AFE_CONN086_7 0x2b5c +#define AFE_CONN087_0 0x2b60 +#define AFE_CONN087_1 0x2b64 +#define AFE_CONN087_2 0x2b68 +#define AFE_CONN087_4 0x2b70 +#define AFE_CONN087_5 0x2b74 +#define AFE_CONN087_6 0x2b78 +#define AFE_CONN087_7 0x2b7c +#define AFE_CONN088_0 0x2b80 +#define AFE_CONN088_1 0x2b84 +#define AFE_CONN088_2 0x2b88 +#define AFE_CONN088_4 0x2b90 +#define AFE_CONN088_5 0x2b94 +#define AFE_CONN088_6 0x2b98 +#define AFE_CONN088_7 0x2b9c +#define AFE_CONN089_0 0x2ba0 +#define AFE_CONN089_1 0x2ba4 +#define AFE_CONN089_2 0x2ba8 +#define AFE_CONN089_4 0x2bb0 +#define AFE_CONN089_5 0x2bb4 +#define AFE_CONN089_6 0x2bb8 +#define AFE_CONN089_7 0x2bbc +#define AFE_CONN090_0 0x2bc0 +#define AFE_CONN090_1 0x2bc4 +#define AFE_CONN090_2 0x2bc8 +#define AFE_CONN090_4 0x2bd0 +#define AFE_CONN090_5 0x2bd4 +#define AFE_CONN090_6 0x2bd8 +#define AFE_CONN090_7 0x2bdc +#define AFE_CONN091_0 0x2be0 +#define AFE_CONN091_1 0x2be4 +#define AFE_CONN091_2 0x2be8 +#define AFE_CONN091_4 0x2bf0 +#define AFE_CONN091_5 0x2bf4 +#define AFE_CONN091_6 0x2bf8 +#define AFE_CONN091_7 0x2bfc +#define AFE_CONN092_0 0x2c00 +#define AFE_CONN092_1 0x2c04 +#define AFE_CONN092_2 0x2c08 +#define AFE_CONN092_4 0x2c10 +#define AFE_CONN092_5 0x2c14 +#define AFE_CONN092_6 0x2c18 +#define AFE_CONN092_7 0x2c1c +#define AFE_CONN093_0 0x2c20 +#define AFE_CONN093_1 0x2c24 +#define AFE_CONN093_2 0x2c28 +#define AFE_CONN093_4 0x2c30 +#define AFE_CONN093_5 0x2c34 +#define AFE_CONN093_6 0x2c38 +#define AFE_CONN093_7 0x2c3c +#define AFE_CONN094_0 0x2c40 +#define AFE_CONN094_1 0x2c44 +#define AFE_CONN094_2 0x2c48 +#define AFE_CONN094_4 0x2c50 +#define AFE_CONN094_5 0x2c54 +#define AFE_CONN094_6 0x2c58 +#define AFE_CONN094_7 0x2c5c +#define AFE_CONN095_0 0x2c60 +#define AFE_CONN095_1 0x2c64 +#define AFE_CONN095_2 0x2c68 +#define AFE_CONN095_4 0x2c70 +#define AFE_CONN095_5 0x2c74 +#define AFE_CONN095_6 0x2c78 +#define AFE_CONN095_7 0x2c7c +#define AFE_CONN096_0 0x2c80 +#define AFE_CONN096_1 0x2c84 +#define AFE_CONN096_2 0x2c88 +#define AFE_CONN096_4 0x2c90 +#define AFE_CONN096_5 0x2c94 +#define AFE_CONN096_6 0x2c98 +#define AFE_CONN096_7 0x2c9c +#define AFE_CONN097_0 0x2ca0 +#define AFE_CONN097_1 0x2ca4 +#define AFE_CONN097_2 0x2ca8 +#define AFE_CONN097_4 0x2cb0 +#define AFE_CONN097_5 0x2cb4 +#define AFE_CONN097_6 0x2cb8 +#define AFE_CONN097_7 0x2cbc +#define AFE_CONN098_0 0x2cc0 +#define AFE_CONN098_1 0x2cc4 +#define AFE_CONN098_2 0x2cc8 +#define AFE_CONN098_4 0x2cd0 +#define AFE_CONN098_5 0x2cd4 +#define AFE_CONN098_6 0x2cd8 +#define AFE_CONN098_7 0x2cdc +#define AFE_CONN099_0 0x2ce0 +#define AFE_CONN099_1 0x2ce4 +#define AFE_CONN099_2 0x2ce8 +#define AFE_CONN099_4 0x2cf0 +#define AFE_CONN099_5 0x2cf4 +#define AFE_CONN099_6 0x2cf8 +#define AFE_CONN099_7 0x2cfc +#define AFE_CONN100_0 0x2d00 +#define AFE_CONN100_1 0x2d04 +#define AFE_CONN100_2 0x2d08 +#define AFE_CONN100_4 0x2d10 +#define AFE_CONN100_5 0x2d14 +#define AFE_CONN100_6 0x2d18 +#define AFE_CONN100_7 0x2d1c +#define AFE_CONN102_0 0x2d40 +#define AFE_CONN102_1 0x2d44 +#define AFE_CONN102_2 0x2d48 +#define AFE_CONN102_4 0x2d50 +#define AFE_CONN102_5 0x2d54 +#define AFE_CONN102_6 0x2d58 +#define AFE_CONN102_7 0x2d5c +#define AFE_CONN103_0 0x2d60 +#define AFE_CONN103_1 0x2d64 +#define AFE_CONN103_2 0x2d68 +#define AFE_CONN103_4 0x2d70 +#define AFE_CONN103_5 0x2d74 +#define AFE_CONN103_6 0x2d78 +#define AFE_CONN103_7 0x2d7c +#define AFE_CONN104_0 0x2d80 +#define AFE_CONN104_1 0x2d84 +#define AFE_CONN104_2 0x2d88 +#define AFE_CONN104_4 0x2d90 +#define AFE_CONN104_5 0x2d94 +#define AFE_CONN104_6 0x2d98 +#define AFE_CONN104_7 0x2d9c +#define AFE_CONN105_0 0x2da0 +#define AFE_CONN105_1 0x2da4 +#define AFE_CONN105_2 0x2da8 +#define AFE_CONN105_4 0x2db0 +#define AFE_CONN105_5 0x2db4 +#define AFE_CONN105_6 0x2db8 +#define AFE_CONN105_7 0x2dbc +#define AFE_CONN106_0 0x2dc0 +#define AFE_CONN106_1 0x2dc4 +#define AFE_CONN106_2 0x2dc8 +#define AFE_CONN106_4 0x2dd0 +#define AFE_CONN106_5 0x2dd4 +#define AFE_CONN106_6 0x2dd8 +#define AFE_CONN106_7 0x2ddc +#define AFE_CONN108_0 0x2e00 +#define AFE_CONN108_1 0x2e04 +#define AFE_CONN108_2 0x2e08 +#define AFE_CONN108_4 0x2e10 +#define AFE_CONN108_5 0x2e14 +#define AFE_CONN108_6 0x2e18 +#define AFE_CONN108_7 0x2e1c +#define AFE_CONN109_0 0x2e20 +#define AFE_CONN109_1 0x2e24 +#define AFE_CONN109_2 0x2e28 +#define AFE_CONN109_4 0x2e30 +#define AFE_CONN109_5 0x2e34 +#define AFE_CONN109_6 0x2e38 +#define AFE_CONN109_7 0x2e3c +#define AFE_CONN110_0 0x2e40 +#define AFE_CONN110_1 0x2e44 +#define AFE_CONN110_2 0x2e48 +#define AFE_CONN110_4 0x2e50 +#define AFE_CONN110_5 0x2e54 +#define AFE_CONN110_6 0x2e58 +#define AFE_CONN110_7 0x2e5c +#define AFE_CONN111_0 0x2e60 +#define AFE_CONN111_1 0x2e64 +#define AFE_CONN111_2 0x2e68 +#define AFE_CONN111_4 0x2e70 +#define AFE_CONN111_5 0x2e74 +#define AFE_CONN111_6 0x2e78 +#define AFE_CONN111_7 0x2e7c +#define AFE_CONN112_0 0x2e80 +#define AFE_CONN112_1 0x2e84 +#define AFE_CONN112_2 0x2e88 +#define AFE_CONN112_4 0x2e90 +#define AFE_CONN112_5 0x2e94 +#define AFE_CONN112_6 0x2e98 +#define AFE_CONN112_7 0x2e9c +#define AFE_CONN113_0 0x2ea0 +#define AFE_CONN113_1 0x2ea4 +#define AFE_CONN113_2 0x2ea8 +#define AFE_CONN113_4 0x2eb0 +#define AFE_CONN113_5 0x2eb4 +#define AFE_CONN113_6 0x2eb8 +#define AFE_CONN113_7 0x2ebc +#define AFE_CONN114_0 0x2ec0 +#define AFE_CONN114_1 0x2ec4 +#define AFE_CONN114_2 0x2ec8 +#define AFE_CONN114_4 0x2ed0 +#define AFE_CONN114_5 0x2ed4 +#define AFE_CONN114_6 0x2ed8 +#define AFE_CONN114_7 0x2edc +#define AFE_CONN115_0 0x2ee0 +#define AFE_CONN115_1 0x2ee4 +#define AFE_CONN115_2 0x2ee8 +#define AFE_CONN115_4 0x2ef0 +#define AFE_CONN115_5 0x2ef4 +#define AFE_CONN115_6 0x2ef8 +#define AFE_CONN115_7 0x2efc +#define AFE_CONN116_0 0x2f00 +#define AFE_CONN116_1 0x2f04 +#define AFE_CONN116_2 0x2f08 +#define AFE_CONN116_4 0x2f10 +#define AFE_CONN116_5 0x2f14 +#define AFE_CONN116_6 0x2f18 +#define AFE_CONN116_7 0x2f1c +#define AFE_CONN117_0 0x2f20 +#define AFE_CONN117_1 0x2f24 +#define AFE_CONN117_2 0x2f28 +#define AFE_CONN117_4 0x2f30 +#define AFE_CONN117_5 0x2f34 +#define AFE_CONN117_6 0x2f38 +#define AFE_CONN117_7 0x2f3c +#define AFE_CONN118_0 0x2f40 +#define AFE_CONN118_1 0x2f44 +#define AFE_CONN118_2 0x2f48 +#define AFE_CONN118_4 0x2f50 +#define AFE_CONN118_5 0x2f54 +#define AFE_CONN118_6 0x2f58 +#define AFE_CONN118_7 0x2f5c +#define AFE_CONN119_0 0x2f60 +#define AFE_CONN119_1 0x2f64 +#define AFE_CONN119_2 0x2f68 +#define AFE_CONN119_4 0x2f70 +#define AFE_CONN119_5 0x2f74 +#define AFE_CONN119_6 0x2f78 +#define AFE_CONN119_7 0x2f7c +#define AFE_CONN120_0 0x2f80 +#define AFE_CONN120_1 0x2f84 +#define AFE_CONN120_2 0x2f88 +#define AFE_CONN120_4 0x2f90 +#define AFE_CONN120_5 0x2f94 +#define AFE_CONN120_6 0x2f98 +#define AFE_CONN120_7 0x2f9c +#define AFE_CONN121_0 0x2fa0 +#define AFE_CONN121_1 0x2fa4 +#define AFE_CONN121_2 0x2fa8 +#define AFE_CONN121_4 0x2fb0 +#define AFE_CONN121_5 0x2fb4 +#define AFE_CONN121_6 0x2fb8 +#define AFE_CONN121_7 0x2fbc +#define AFE_CONN122_0 0x2fc0 +#define AFE_CONN122_1 0x2fc4 +#define AFE_CONN122_2 0x2fc8 +#define AFE_CONN122_4 0x2fd0 +#define AFE_CONN122_5 0x2fd4 +#define AFE_CONN122_6 0x2fd8 +#define AFE_CONN122_7 0x2fdc +#define AFE_CONN123_0 0x2fe0 +#define AFE_CONN123_1 0x2fe4 +#define AFE_CONN123_2 0x2fe8 +#define AFE_CONN123_4 0x2ff0 +#define AFE_CONN123_5 0x2ff4 +#define AFE_CONN123_6 0x2ff8 +#define AFE_CONN123_7 0x2ffc +#define AFE_CONN124_0 0x3000 +#define AFE_CONN124_1 0x3004 +#define AFE_CONN124_2 0x3008 +#define AFE_CONN124_4 0x3010 +#define AFE_CONN124_5 0x3014 +#define AFE_CONN124_6 0x3018 +#define AFE_CONN124_7 0x301c +#define AFE_CONN125_0 0x3020 +#define AFE_CONN125_1 0x3024 +#define AFE_CONN125_2 0x3028 +#define AFE_CONN125_4 0x3030 +#define AFE_CONN125_5 0x3034 +#define AFE_CONN125_6 0x3038 +#define AFE_CONN125_7 0x303c +#define AFE_CONN126_0 0x3040 +#define AFE_CONN126_1 0x3044 +#define AFE_CONN126_2 0x3048 +#define AFE_CONN126_4 0x3050 +#define AFE_CONN126_5 0x3054 +#define AFE_CONN126_6 0x3058 +#define AFE_CONN126_7 0x305c +#define AFE_CONN127_0 0x3060 +#define AFE_CONN127_1 0x3064 +#define AFE_CONN127_2 0x3068 +#define AFE_CONN127_4 0x3070 +#define AFE_CONN127_5 0x3074 +#define AFE_CONN127_6 0x3078 +#define AFE_CONN127_7 0x307c +#define AFE_CONN128_0 0x3080 +#define AFE_CONN128_1 0x3084 +#define AFE_CONN128_2 0x3088 +#define AFE_CONN128_4 0x3090 +#define AFE_CONN128_5 0x3094 +#define AFE_CONN128_6 0x3098 +#define AFE_CONN128_7 0x309c +#define AFE_CONN129_0 0x30a0 +#define AFE_CONN129_1 0x30a4 +#define AFE_CONN129_2 0x30a8 +#define AFE_CONN129_4 0x30b0 +#define AFE_CONN129_5 0x30b4 +#define AFE_CONN129_6 0x30b8 +#define AFE_CONN129_7 0x30bc +#define AFE_CONN130_0 0x30c0 +#define AFE_CONN130_1 0x30c4 +#define AFE_CONN130_2 0x30c8 +#define AFE_CONN130_4 0x30d0 +#define AFE_CONN130_5 0x30d4 +#define AFE_CONN130_6 0x30d8 +#define AFE_CONN130_7 0x30dc +#define AFE_CONN131_0 0x30e0 +#define AFE_CONN131_1 0x30e4 +#define AFE_CONN131_2 0x30e8 +#define AFE_CONN131_4 0x30f0 +#define AFE_CONN131_5 0x30f4 +#define AFE_CONN131_6 0x30f8 +#define AFE_CONN131_7 0x30fc +#define AFE_CONN132_0 0x3100 +#define AFE_CONN132_1 0x3104 +#define AFE_CONN132_2 0x3108 +#define AFE_CONN132_4 0x3110 +#define AFE_CONN132_5 0x3114 +#define AFE_CONN132_6 0x3118 +#define AFE_CONN132_7 0x311c +#define AFE_CONN133_0 0x3120 +#define AFE_CONN133_1 0x3124 +#define AFE_CONN133_2 0x3128 +#define AFE_CONN133_4 0x3130 +#define AFE_CONN133_5 0x3134 +#define AFE_CONN133_6 0x3138 +#define AFE_CONN133_7 0x313c +#define AFE_CONN134_0 0x3140 +#define AFE_CONN134_1 0x3144 +#define AFE_CONN134_2 0x3148 +#define AFE_CONN134_4 0x3150 +#define AFE_CONN134_5 0x3154 +#define AFE_CONN134_6 0x3158 +#define AFE_CONN134_7 0x315c +#define AFE_CONN135_0 0x3160 +#define AFE_CONN135_1 0x3164 +#define AFE_CONN135_2 0x3168 +#define AFE_CONN135_4 0x3170 +#define AFE_CONN135_5 0x3174 +#define AFE_CONN135_6 0x3178 +#define AFE_CONN135_7 0x317c +#define AFE_CONN136_0 0x3180 +#define AFE_CONN136_1 0x3184 +#define AFE_CONN136_2 0x3188 +#define AFE_CONN136_4 0x3190 +#define AFE_CONN136_5 0x3194 +#define AFE_CONN136_6 0x3198 +#define AFE_CONN136_7 0x319c +#define AFE_CONN137_0 0x31a0 +#define AFE_CONN137_1 0x31a4 +#define AFE_CONN137_2 0x31a8 +#define AFE_CONN137_4 0x31b0 +#define AFE_CONN137_5 0x31b4 +#define AFE_CONN137_6 0x31b8 +#define AFE_CONN137_7 0x31bc +#define AFE_CONN138_0 0x31c0 +#define AFE_CONN138_1 0x31c4 +#define AFE_CONN138_2 0x31c8 +#define AFE_CONN138_4 0x31d0 +#define AFE_CONN138_5 0x31d4 +#define AFE_CONN138_6 0x31d8 +#define AFE_CONN138_7 0x31dc +#define AFE_CONN139_0 0x31e0 +#define AFE_CONN139_1 0x31e4 +#define AFE_CONN139_2 0x31e8 +#define AFE_CONN139_4 0x31f0 +#define AFE_CONN139_5 0x31f4 +#define AFE_CONN139_6 0x31f8 +#define AFE_CONN139_7 0x31fc +#define AFE_CONN148_0 0x3300 +#define AFE_CONN148_1 0x3304 +#define AFE_CONN148_2 0x3308 +#define AFE_CONN148_4 0x3310 +#define AFE_CONN148_5 0x3314 +#define AFE_CONN148_6 0x3318 +#define AFE_CONN148_7 0x331c +#define AFE_CONN149_0 0x3320 +#define AFE_CONN149_1 0x3324 +#define AFE_CONN149_2 0x3328 +#define AFE_CONN149_4 0x3330 +#define AFE_CONN149_5 0x3334 +#define AFE_CONN149_6 0x3338 +#define AFE_CONN149_7 0x333c +#define AFE_CONN180_0 0x3700 +#define AFE_CONN180_1 0x3704 +#define AFE_CONN180_2 0x3708 +#define AFE_CONN180_4 0x3710 +#define AFE_CONN180_5 0x3714 +#define AFE_CONN180_6 0x3718 +#define AFE_CONN180_7 0x371c +#define AFE_CONN181_0 0x3720 +#define AFE_CONN181_1 0x3724 +#define AFE_CONN181_2 0x3728 +#define AFE_CONN181_4 0x3730 +#define AFE_CONN181_5 0x3734 +#define AFE_CONN181_6 0x3738 +#define AFE_CONN181_7 0x373c +#define AFE_CONN182_0 0x3740 +#define AFE_CONN182_1 0x3744 +#define AFE_CONN182_2 0x3748 +#define AFE_CONN182_4 0x3750 +#define AFE_CONN182_5 0x3754 +#define AFE_CONN182_6 0x3758 +#define AFE_CONN182_7 0x375c +#define AFE_CONN183_0 0x3760 +#define AFE_CONN183_1 0x3764 +#define AFE_CONN183_2 0x3768 +#define AFE_CONN183_4 0x3770 +#define AFE_CONN183_5 0x3774 +#define AFE_CONN183_6 0x3778 +#define AFE_CONN183_7 0x377c +#define AFE_CONN184_0 0x3780 +#define AFE_CONN184_1 0x3784 +#define AFE_CONN184_2 0x3788 +#define AFE_CONN184_4 0x3790 +#define AFE_CONN184_5 0x3794 +#define AFE_CONN184_6 0x3798 +#define AFE_CONN184_7 0x379c +#define AFE_CONN185_0 0x37a0 +#define AFE_CONN185_1 0x37a4 +#define AFE_CONN185_2 0x37a8 +#define AFE_CONN185_4 0x37b0 +#define AFE_CONN185_5 0x37b4 +#define AFE_CONN185_6 0x37b8 +#define AFE_CONN185_7 0x37bc +#define AFE_CONN186_0 0x37c0 +#define AFE_CONN186_1 0x37c4 +#define AFE_CONN186_2 0x37c8 +#define AFE_CONN186_4 0x37d0 +#define AFE_CONN186_5 0x37d4 +#define AFE_CONN186_6 0x37d8 +#define AFE_CONN186_7 0x37dc +#define AFE_CONN187_0 0x37e0 +#define AFE_CONN187_1 0x37e4 +#define AFE_CONN187_2 0x37e8 +#define AFE_CONN187_4 0x37f0 +#define AFE_CONN187_5 0x37f4 +#define AFE_CONN187_6 0x37f8 +#define AFE_CONN187_7 0x37fc +#define AFE_CONN188_0 0x3800 +#define AFE_CONN188_1 0x3804 +#define AFE_CONN188_2 0x3808 +#define AFE_CONN188_4 0x3810 +#define AFE_CONN188_5 0x3814 +#define AFE_CONN188_6 0x3818 +#define AFE_CONN188_7 0x381c +#define AFE_CONN189_0 0x3820 +#define AFE_CONN189_1 0x3824 +#define AFE_CONN189_2 0x3828 +#define AFE_CONN189_4 0x3830 +#define AFE_CONN189_5 0x3834 +#define AFE_CONN189_6 0x3838 +#define AFE_CONN189_7 0x383c +#define AFE_CONN190_0 0x3840 +#define AFE_CONN190_1 0x3844 +#define AFE_CONN190_2 0x3848 +#define AFE_CONN190_4 0x3850 +#define AFE_CONN190_5 0x3854 +#define AFE_CONN190_6 0x3858 +#define AFE_CONN190_7 0x385c +#define AFE_CONN191_0 0x3860 +#define AFE_CONN191_1 0x3864 +#define AFE_CONN191_2 0x3868 +#define AFE_CONN191_4 0x3870 +#define AFE_CONN191_5 0x3874 +#define AFE_CONN191_6 0x3878 +#define AFE_CONN191_7 0x387c +#define AFE_CONN192_0 0x3880 +#define AFE_CONN192_1 0x3884 +#define AFE_CONN192_2 0x3888 +#define AFE_CONN192_4 0x3890 +#define AFE_CONN192_5 0x3894 +#define AFE_CONN192_6 0x3898 +#define AFE_CONN192_7 0x389c +#define AFE_CONN193_0 0x38a0 +#define AFE_CONN193_1 0x38a4 +#define AFE_CONN193_2 0x38a8 +#define AFE_CONN193_4 0x38b0 +#define AFE_CONN193_5 0x38b4 +#define AFE_CONN193_6 0x38b8 +#define AFE_CONN193_7 0x38bc +#define AFE_CONN194_0 0x38c0 +#define AFE_CONN194_1 0x38c4 +#define AFE_CONN194_2 0x38c8 +#define AFE_CONN194_4 0x38d0 +#define AFE_CONN194_5 0x38d4 +#define AFE_CONN194_6 0x38d8 +#define AFE_CONN194_7 0x38dc +#define AFE_CONN195_0 0x38e0 +#define AFE_CONN195_1 0x38e4 +#define AFE_CONN195_2 0x38e8 +#define AFE_CONN195_4 0x38f0 +#define AFE_CONN195_5 0x38f4 +#define AFE_CONN195_6 0x38f8 +#define AFE_CONN195_7 0x38fc +#define AFE_CONN196_0 0x3900 +#define AFE_CONN196_1 0x3904 +#define AFE_CONN196_2 0x3908 +#define AFE_CONN196_4 0x3910 +#define AFE_CONN196_5 0x3914 +#define AFE_CONN196_6 0x3918 +#define AFE_CONN196_7 0x391c +#define AFE_CONN197_0 0x3920 +#define AFE_CONN197_1 0x3924 +#define AFE_CONN197_2 0x3928 +#define AFE_CONN197_4 0x3930 +#define AFE_CONN197_5 0x3934 +#define AFE_CONN197_6 0x3938 +#define AFE_CONN197_7 0x393c +#define AFE_CONN198_0 0x3940 +#define AFE_CONN198_1 0x3944 +#define AFE_CONN198_2 0x3948 +#define AFE_CONN198_4 0x3950 +#define AFE_CONN198_5 0x3954 +#define AFE_CONN198_6 0x3958 +#define AFE_CONN198_7 0x395c +#define AFE_CONN199_0 0x3960 +#define AFE_CONN199_1 0x3964 +#define AFE_CONN199_2 0x3968 +#define AFE_CONN199_4 0x3970 +#define AFE_CONN199_5 0x3974 +#define AFE_CONN199_6 0x3978 +#define AFE_CONN199_7 0x397c +#define AFE_CONN200_0 0x3980 +#define AFE_CONN200_1 0x3984 +#define AFE_CONN200_2 0x3988 +#define AFE_CONN200_4 0x3990 +#define AFE_CONN200_5 0x3994 +#define AFE_CONN200_6 0x3998 +#define AFE_CONN200_7 0x399c +#define AFE_CONN201_0 0x39a0 +#define AFE_CONN201_1 0x39a4 +#define AFE_CONN201_2 0x39a8 +#define AFE_CONN201_4 0x39b0 +#define AFE_CONN201_5 0x39b4 +#define AFE_CONN201_6 0x39b8 +#define AFE_CONN201_7 0x39bc +#define AFE_CONN202_0 0x39c0 +#define AFE_CONN202_1 0x39c4 +#define AFE_CONN202_2 0x39c8 +#define AFE_CONN202_4 0x39d0 +#define AFE_CONN202_5 0x39d4 +#define AFE_CONN202_6 0x39d8 +#define AFE_CONN202_7 0x39dc +#define AFE_CONN203_0 0x39e0 +#define AFE_CONN203_1 0x39e4 +#define AFE_CONN203_2 0x39e8 +#define AFE_CONN203_4 0x39f0 +#define AFE_CONN203_5 0x39f4 +#define AFE_CONN203_6 0x39f8 +#define AFE_CONN203_7 0x39fc +#define AFE_CONN204_0 0x3a00 +#define AFE_CONN204_1 0x3a04 +#define AFE_CONN204_2 0x3a08 +#define AFE_CONN204_4 0x3a10 +#define AFE_CONN204_5 0x3a14 +#define AFE_CONN204_6 0x3a18 +#define AFE_CONN204_7 0x3a1c +#define AFE_CONN205_0 0x3a20 +#define AFE_CONN205_1 0x3a24 +#define AFE_CONN205_2 0x3a28 +#define AFE_CONN205_4 0x3a30 +#define AFE_CONN205_5 0x3a34 +#define AFE_CONN205_6 0x3a38 +#define AFE_CONN205_7 0x3a3c +#define AFE_CONN206_0 0x3a40 +#define AFE_CONN206_1 0x3a44 +#define AFE_CONN206_2 0x3a48 +#define AFE_CONN206_4 0x3a50 +#define AFE_CONN206_5 0x3a54 +#define AFE_CONN206_6 0x3a58 +#define AFE_CONN206_7 0x3a5c +#define AFE_CONN207_0 0x3a60 +#define AFE_CONN207_1 0x3a64 +#define AFE_CONN207_2 0x3a68 +#define AFE_CONN207_4 0x3a70 +#define AFE_CONN207_5 0x3a74 +#define AFE_CONN207_6 0x3a78 +#define AFE_CONN207_7 0x3a7c +#define AFE_CONN208_0 0x3a80 +#define AFE_CONN208_1 0x3a84 +#define AFE_CONN208_2 0x3a88 +#define AFE_CONN208_4 0x3a90 +#define AFE_CONN208_5 0x3a94 +#define AFE_CONN208_6 0x3a98 +#define AFE_CONN208_7 0x3a9c +#define AFE_CONN209_0 0x3aa0 +#define AFE_CONN209_1 0x3aa4 +#define AFE_CONN209_2 0x3aa8 +#define AFE_CONN209_4 0x3ab0 +#define AFE_CONN209_5 0x3ab4 +#define AFE_CONN209_6 0x3ab8 +#define AFE_CONN209_7 0x3abc +#define AFE_CONN210_0 0x3ac0 +#define AFE_CONN210_1 0x3ac4 +#define AFE_CONN210_2 0x3ac8 +#define AFE_CONN210_4 0x3ad0 +#define AFE_CONN210_5 0x3ad4 +#define AFE_CONN210_6 0x3ad8 +#define AFE_CONN210_7 0x3adc +#define AFE_CONN211_0 0x3ae0 +#define AFE_CONN211_1 0x3ae4 +#define AFE_CONN211_2 0x3ae8 +#define AFE_CONN211_4 0x3af0 +#define AFE_CONN211_5 0x3af4 +#define AFE_CONN211_6 0x3af8 +#define AFE_CONN211_7 0x3afc +#define AFE_CONN_MON_CFG 0x4080 +#define AFE_CONN_MON0 0x4084 +#define AFE_CONN_MON1 0x4088 +#define AFE_CONN_MON2 0x408c +#define AFE_CONN_MON3 0x4090 +#define AFE_CONN_MON4 0x4094 +#define AFE_CONN_MON5 0x4098 +#define AFE_CONN_RS_0 0x40a0 +#define AFE_CONN_RS_1 0x40a4 +#define AFE_CONN_RS_2 0x40a8 +#define AFE_CONN_RS_3 0x40ac +#define AFE_CONN_RS_4 0x40b0 +#define AFE_CONN_RS_5 0x40b4 +#define AFE_CONN_RS_6 0x40b8 +#define AFE_CONN_DI_0 0x40c0 +#define AFE_CONN_DI_1 0x40c4 +#define AFE_CONN_DI_2 0x40c8 +#define AFE_CONN_DI_3 0x40cc +#define AFE_CONN_DI_4 0x40d0 +#define AFE_CONN_DI_5 0x40d4 +#define AFE_CONN_DI_6 0x40d8 +#define AFE_CONN_16BIT_0 0x40e0 +#define AFE_CONN_16BIT_1 0x40e4 +#define AFE_CONN_16BIT_2 0x40e8 +#define AFE_CONN_16BIT_3 0x40ec +#define AFE_CONN_16BIT_4 0x40f0 +#define AFE_CONN_16BIT_5 0x40f4 +#define AFE_CONN_16BIT_6 0x40f8 +#define AFE_CONN_24BIT_0 0x4100 +#define AFE_CONN_24BIT_1 0x4104 +#define AFE_CONN_24BIT_2 0x4108 +#define AFE_CONN_24BIT_3 0x410c +#define AFE_CONN_24BIT_4 0x4110 +#define AFE_CONN_24BIT_5 0x4114 +#define AFE_CONN_24BIT_6 0x4118 +#define AFE_CBIP_CFG0 0x4380 +#define AFE_CBIP_SLV_DECODER_MON0 0x4384 +#define AFE_CBIP_SLV_DECODER_MON1 0x4388 +#define AFE_CBIP_SLV_MUX_MON_CFG 0x438c +#define AFE_CBIP_SLV_MUX_MON0 0x4390 +#define AFE_CBIP_SLV_MUX_MON1 0x4394 +#define AFE_MEMIF_CON0 0x4400 +#define AFE_MEMIF_ONE_HEART 0x4420 +#define AFE_DL0_BASE_MSB 0x4440 +#define AFE_DL0_BASE 0x4444 +#define AFE_DL0_CUR_MSB 0x4448 +#define AFE_DL0_CUR 0x444c +#define AFE_DL0_END_MSB 0x4450 +#define AFE_DL0_END 0x4454 +#define AFE_DL0_RCH_MON 0x4458 +#define AFE_DL0_LCH_MON 0x445c +#define AFE_DL0_CON0 0x4460 +#define AFE_DL0_MON0 0x4464 +#define AFE_DL1_BASE_MSB 0x4470 +#define AFE_DL1_BASE 0x4474 +#define AFE_DL1_CUR_MSB 0x4478 +#define AFE_DL1_CUR 0x447c +#define AFE_DL1_END_MSB 0x4480 +#define AFE_DL1_END 0x4484 +#define AFE_DL1_RCH_MON 0x4488 +#define AFE_DL1_LCH_MON 0x448c +#define AFE_DL1_CON0 0x4490 +#define AFE_DL1_MON0 0x4494 +#define AFE_DL2_BASE_MSB 0x44a0 +#define AFE_DL2_BASE 0x44a4 +#define AFE_DL2_CUR_MSB 0x44a8 +#define AFE_DL2_CUR 0x44ac +#define AFE_DL2_END_MSB 0x44b0 +#define AFE_DL2_END 0x44b4 +#define AFE_DL2_RCH_MON 0x44b8 +#define AFE_DL2_LCH_MON 0x44bc +#define AFE_DL2_CON0 0x44c0 +#define AFE_DL2_MON0 0x44c4 +#define AFE_DL3_BASE_MSB 0x44d0 +#define AFE_DL3_BASE 0x44d4 +#define AFE_DL3_CUR_MSB 0x44d8 +#define AFE_DL3_CUR 0x44dc +#define AFE_DL3_END_MSB 0x44e0 +#define AFE_DL3_END 0x44e4 +#define AFE_DL3_RCH_MON 0x44e8 +#define AFE_DL3_LCH_MON 0x44ec +#define AFE_DL3_CON0 0x44f0 +#define AFE_DL3_MON0 0x44f4 +#define AFE_DL4_BASE_MSB 0x4500 +#define AFE_DL4_BASE 0x4504 +#define AFE_DL4_CUR_MSB 0x4508 +#define AFE_DL4_CUR 0x450c +#define AFE_DL4_END_MSB 0x4510 +#define AFE_DL4_END 0x4514 +#define AFE_DL4_RCH_MON 0x4518 +#define AFE_DL4_LCH_MON 0x451c +#define AFE_DL4_CON0 0x4520 +#define AFE_DL4_MON0 0x4524 +#define AFE_DL5_BASE_MSB 0x4530 +#define AFE_DL5_BASE 0x4534 +#define AFE_DL5_CUR_MSB 0x4538 +#define AFE_DL5_CUR 0x453c +#define AFE_DL5_END_MSB 0x4540 +#define AFE_DL5_END 0x4544 +#define AFE_DL5_RCH_MON 0x4548 +#define AFE_DL5_LCH_MON 0x454c +#define AFE_DL5_CON0 0x4550 +#define AFE_DL5_MON0 0x4554 +#define AFE_DL6_BASE_MSB 0x4560 +#define AFE_DL6_BASE 0x4564 +#define AFE_DL6_CUR_MSB 0x4568 +#define AFE_DL6_CUR 0x456c +#define AFE_DL6_END_MSB 0x4570 +#define AFE_DL6_END 0x4574 +#define AFE_DL6_RCH_MON 0x4578 +#define AFE_DL6_LCH_MON 0x457c +#define AFE_DL6_CON0 0x4580 +#define AFE_DL6_MON0 0x4584 +#define AFE_DL7_BASE_MSB 0x4590 +#define AFE_DL7_BASE 0x4594 +#define AFE_DL7_CUR_MSB 0x4598 +#define AFE_DL7_CUR 0x459c +#define AFE_DL7_END_MSB 0x45a0 +#define AFE_DL7_END 0x45a4 +#define AFE_DL7_RCH_MON 0x45a8 +#define AFE_DL7_LCH_MON 0x45ac +#define AFE_DL7_CON0 0x45b0 +#define AFE_DL7_MON0 0x45b4 +#define AFE_DL8_BASE_MSB 0x45c0 +#define AFE_DL8_BASE 0x45c4 +#define AFE_DL8_CUR_MSB 0x45c8 +#define AFE_DL8_CUR 0x45cc +#define AFE_DL8_END_MSB 0x45d0 +#define AFE_DL8_END 0x45d4 +#define AFE_DL8_RCH_MON 0x45d8 +#define AFE_DL8_LCH_MON 0x45dc +#define AFE_DL8_CON0 0x45e0 +#define AFE_DL8_MON0 0x45e4 +#define AFE_DL_4CH_BASE_MSB 0x45f0 +#define AFE_DL_4CH_BASE 0x45f4 +#define AFE_DL_4CH_CUR_MSB 0x45f8 +#define AFE_DL_4CH_CUR 0x45fc +#define AFE_DL_4CH_END_MSB 0x4600 +#define AFE_DL_4CH_END 0x4604 +#define AFE_DL_4CH_CON0 0x4610 +#define AFE_DL_4CH_MON0 0x4618 +#define AFE_DL_24CH_BASE_MSB 0x4620 +#define AFE_DL_24CH_BASE 0x4624 +#define AFE_DL_24CH_CUR_MSB 0x4628 +#define AFE_DL_24CH_CUR 0x462c +#define AFE_DL_24CH_END_MSB 0x4630 +#define AFE_DL_24CH_END 0x4634 +#define AFE_DL_24CH_CON0 0x4640 +#define AFE_DL_24CH_MON0 0x4648 +#define AFE_DL23_BASE_MSB 0x4680 +#define AFE_DL23_BASE 0x4684 +#define AFE_DL23_CUR_MSB 0x4688 +#define AFE_DL23_CUR 0x468c +#define AFE_DL23_END_MSB 0x4690 +#define AFE_DL23_END 0x4694 +#define AFE_DL23_RCH_MON 0x4698 +#define AFE_DL23_LCH_MON 0x469c +#define AFE_DL23_CON0 0x46a0 +#define AFE_DL23_MON0 0x46a4 +#define AFE_DL24_BASE_MSB 0x46b0 +#define AFE_DL24_BASE 0x46b4 +#define AFE_DL24_CUR_MSB 0x46b8 +#define AFE_DL24_CUR 0x46bc +#define AFE_DL24_END_MSB 0x46c0 +#define AFE_DL24_END 0x46c4 +#define AFE_DL24_RCH_MON 0x46c8 +#define AFE_DL24_LCH_MON 0x46cc +#define AFE_DL24_CON0 0x46d0 +#define AFE_DL24_MON0 0x46d4 +#define AFE_DL25_BASE_MSB 0x46e0 +#define AFE_DL25_BASE 0x46e4 +#define AFE_DL25_CUR_MSB 0x46e8 +#define AFE_DL25_CUR 0x46ec +#define AFE_DL25_END_MSB 0x46f0 +#define AFE_DL25_END 0x46f4 +#define AFE_DL25_RCH_MON 0x46f8 +#define AFE_DL25_LCH_MON 0x46fc +#define AFE_DL25_CON0 0x4700 +#define AFE_DL25_MON0 0x4704 +#define AFE_DL26_BASE_MSB 0x4710 +#define AFE_DL26_BASE 0x4714 +#define AFE_DL26_CUR_MSB 0x4718 +#define AFE_DL26_CUR 0x471c +#define AFE_DL26_END_MSB 0x4720 +#define AFE_DL26_END 0x4724 +#define AFE_DL26_RCH_MON 0x4728 +#define AFE_DL26_LCH_MON 0x472c +#define AFE_DL26_CON0 0x4730 +#define AFE_DL26_MON0 0x4734 +#define AFE_VUL0_BASE_MSB 0x4d60 +#define AFE_VUL0_BASE 0x4d64 +#define AFE_VUL0_CUR_MSB 0x4d68 +#define AFE_VUL0_CUR 0x4d6c +#define AFE_VUL0_END_MSB 0x4d70 +#define AFE_VUL0_END 0x4d74 +#define AFE_VUL0_RCH_MON 0x4d78 +#define AFE_VUL0_LCH_MON 0x4d7c +#define AFE_VUL0_CON0 0x4d80 +#define AFE_VUL0_MON0 0x4d84 +#define AFE_VUL1_BASE_MSB 0x4d90 +#define AFE_VUL1_BASE 0x4d94 +#define AFE_VUL1_CUR_MSB 0x4d98 +#define AFE_VUL1_CUR 0x4d9c +#define AFE_VUL1_END_MSB 0x4da0 +#define AFE_VUL1_END 0x4da4 +#define AFE_VUL1_RCH_MON 0x4da8 +#define AFE_VUL1_LCH_MON 0x4dac +#define AFE_VUL1_CON0 0x4db0 +#define AFE_VUL1_MON0 0x4db4 +#define AFE_VUL2_BASE_MSB 0x4dc0 +#define AFE_VUL2_BASE 0x4dc4 +#define AFE_VUL2_CUR_MSB 0x4dc8 +#define AFE_VUL2_CUR 0x4dcc +#define AFE_VUL2_END_MSB 0x4dd0 +#define AFE_VUL2_END 0x4dd4 +#define AFE_VUL2_RCH_MON 0x4dd8 +#define AFE_VUL2_LCH_MON 0x4ddc +#define AFE_VUL2_CON0 0x4de0 +#define AFE_VUL2_MON0 0x4de4 +#define AFE_VUL3_BASE_MSB 0x4df0 +#define AFE_VUL3_BASE 0x4df4 +#define AFE_VUL3_CUR_MSB 0x4df8 +#define AFE_VUL3_CUR 0x4dfc +#define AFE_VUL3_END_MSB 0x4e00 +#define AFE_VUL3_END 0x4e04 +#define AFE_VUL3_RCH_MON 0x4e08 +#define AFE_VUL3_LCH_MON 0x4e0c +#define AFE_VUL3_CON0 0x4e10 +#define AFE_VUL3_MON0 0x4e14 +#define AFE_VUL4_BASE_MSB 0x4e20 +#define AFE_VUL4_BASE 0x4e24 +#define AFE_VUL4_CUR_MSB 0x4e28 +#define AFE_VUL4_CUR 0x4e2c +#define AFE_VUL4_END_MSB 0x4e30 +#define AFE_VUL4_END 0x4e34 +#define AFE_VUL4_RCH_MON 0x4e38 +#define AFE_VUL4_LCH_MON 0x4e3c +#define AFE_VUL4_CON0 0x4e40 +#define AFE_VUL4_MON0 0x4e44 +#define AFE_VUL5_BASE_MSB 0x4e50 +#define AFE_VUL5_BASE 0x4e54 +#define AFE_VUL5_CUR_MSB 0x4e58 +#define AFE_VUL5_CUR 0x4e5c +#define AFE_VUL5_END_MSB 0x4e60 +#define AFE_VUL5_END 0x4e64 +#define AFE_VUL5_RCH_MON 0x4e68 +#define AFE_VUL5_LCH_MON 0x4e6c +#define AFE_VUL5_CON0 0x4e70 +#define AFE_VUL5_MON0 0x4e74 +#define AFE_VUL6_BASE_MSB 0x4e80 +#define AFE_VUL6_BASE 0x4e84 +#define AFE_VUL6_CUR_MSB 0x4e88 +#define AFE_VUL6_CUR 0x4e8c +#define AFE_VUL6_END_MSB 0x4e90 +#define AFE_VUL6_END 0x4e94 +#define AFE_VUL6_RCH_MON 0x4e98 +#define AFE_VUL6_LCH_MON 0x4e9c +#define AFE_VUL6_CON0 0x4ea0 +#define AFE_VUL6_MON0 0x4ea4 +#define AFE_VUL7_BASE_MSB 0x4eb0 +#define AFE_VUL7_BASE 0x4eb4 +#define AFE_VUL7_CUR_MSB 0x4eb8 +#define AFE_VUL7_CUR 0x4ebc +#define AFE_VUL7_END_MSB 0x4ec0 +#define AFE_VUL7_END 0x4ec4 +#define AFE_VUL7_RCH_MON 0x4ec8 +#define AFE_VUL7_LCH_MON 0x4ecc +#define AFE_VUL7_CON0 0x4ed0 +#define AFE_VUL7_MON0 0x4ed4 +#define AFE_VUL8_BASE_MSB 0x4ee0 +#define AFE_VUL8_BASE 0x4ee4 +#define AFE_VUL8_CUR_MSB 0x4ee8 +#define AFE_VUL8_CUR 0x4eec +#define AFE_VUL8_END_MSB 0x4ef0 +#define AFE_VUL8_END 0x4ef4 +#define AFE_VUL8_RCH_MON 0x4ef8 +#define AFE_VUL8_LCH_MON 0x4efc +#define AFE_VUL8_CON0 0x4f00 +#define AFE_VUL8_MON0 0x4f04 +#define AFE_VUL9_BASE_MSB 0x4f10 +#define AFE_VUL9_BASE 0x4f14 +#define AFE_VUL9_CUR_MSB 0x4f18 +#define AFE_VUL9_CUR 0x4f1c +#define AFE_VUL9_END_MSB 0x4f20 +#define AFE_VUL9_END 0x4f24 +#define AFE_VUL9_RCH_MON 0x4f28 +#define AFE_VUL9_LCH_MON 0x4f2c +#define AFE_VUL9_CON0 0x4f30 +#define AFE_VUL9_MON0 0x4f34 +#define AFE_VUL10_BASE_MSB 0x4f40 +#define AFE_VUL10_BASE 0x4f44 +#define AFE_VUL10_CUR_MSB 0x4f48 +#define AFE_VUL10_CUR 0x4f4c +#define AFE_VUL10_END_MSB 0x4f50 +#define AFE_VUL10_END 0x4f54 +#define AFE_VUL10_RCH_MON 0x4f58 +#define AFE_VUL10_LCH_MON 0x4f5c +#define AFE_VUL10_CON0 0x4f60 +#define AFE_VUL10_MON0 0x4f64 +#define AFE_VUL24_BASE_MSB 0x4fa0 +#define AFE_VUL24_BASE 0x4fa4 +#define AFE_VUL24_CUR_MSB 0x4fa8 +#define AFE_VUL24_CUR 0x4fac +#define AFE_VUL24_END_MSB 0x4fb0 +#define AFE_VUL24_END 0x4fb4 +#define AFE_VUL24_CON0 0x4fb8 +#define AFE_VUL24_MON0 0x4fbc +#define AFE_VUL25_BASE_MSB 0x4fc0 +#define AFE_VUL25_BASE 0x4fc4 +#define AFE_VUL25_CUR_MSB 0x4fc8 +#define AFE_VUL25_CUR 0x4fcc +#define AFE_VUL25_END_MSB 0x4fd0 +#define AFE_VUL25_END 0x4fd4 +#define AFE_VUL25_CON0 0x4fd8 +#define AFE_VUL25_MON0 0x4fdc +#define AFE_VUL26_BASE_MSB 0x4fe0 +#define AFE_VUL26_BASE 0x4fe4 +#define AFE_VUL26_CUR_MSB 0x4fe8 +#define AFE_VUL26_CUR 0x4fec +#define AFE_VUL26_END_MSB 0x4ff0 +#define AFE_VUL26_END 0x4ff4 +#define AFE_VUL26_CON0 0x4ff8 +#define AFE_VUL26_MON0 0x4ffc +#define AFE_VUL_CM0_BASE_MSB 0x51c0 +#define AFE_VUL_CM0_BASE 0x51c4 +#define AFE_VUL_CM0_CUR_MSB 0x51c8 +#define AFE_VUL_CM0_CUR 0x51cc +#define AFE_VUL_CM0_END_MSB 0x51d0 +#define AFE_VUL_CM0_END 0x51d4 +#define AFE_VUL_CM0_CON0 0x51d8 +#define AFE_VUL_CM1_BASE_MSB 0x51e0 +#define AFE_VUL_CM1_BASE 0x51e4 +#define AFE_VUL_CM1_CUR_MSB 0x51e8 +#define AFE_VUL_CM1_CUR 0x51ec +#define AFE_VUL_CM1_END_MSB 0x51f0 +#define AFE_VUL_CM1_END 0x51f4 +#define AFE_VUL_CM1_CON0 0x51f8 +#define AFE_VUL_CM2_BASE_MSB 0x5200 +#define AFE_VUL_CM2_BASE 0x5204 +#define AFE_VUL_CM2_CUR_MSB 0x5208 +#define AFE_VUL_CM2_CUR 0x520c +#define AFE_VUL_CM2_END_MSB 0x5210 +#define AFE_VUL_CM2_END 0x5214 +#define AFE_VUL_CM2_CON0 0x5218 +#define AFE_ETDM_IN0_BASE_MSB 0x5220 +#define AFE_ETDM_IN0_BASE 0x5224 +#define AFE_ETDM_IN0_CUR_MSB 0x5228 +#define AFE_ETDM_IN0_CUR 0x522c +#define AFE_ETDM_IN0_END_MSB 0x5230 +#define AFE_ETDM_IN0_END 0x5234 +#define AFE_ETDM_IN0_CON0 0x5238 +#define AFE_ETDM_IN1_BASE_MSB 0x5240 +#define AFE_ETDM_IN1_BASE 0x5244 +#define AFE_ETDM_IN1_CUR_MSB 0x5248 +#define AFE_ETDM_IN1_CUR 0x524c +#define AFE_ETDM_IN1_END_MSB 0x5250 +#define AFE_ETDM_IN1_END 0x5254 +#define AFE_ETDM_IN1_CON0 0x5258 +#define AFE_ETDM_IN2_BASE_MSB 0x5260 +#define AFE_ETDM_IN2_BASE 0x5264 +#define AFE_ETDM_IN2_CUR_MSB 0x5268 +#define AFE_ETDM_IN2_CUR 0x526c +#define AFE_ETDM_IN2_END_MSB 0x5270 +#define AFE_ETDM_IN2_END 0x5274 +#define AFE_ETDM_IN2_CON0 0x5278 +#define AFE_ETDM_IN3_BASE_MSB 0x5280 +#define AFE_ETDM_IN3_BASE 0x5284 +#define AFE_ETDM_IN3_CUR_MSB 0x5288 +#define AFE_ETDM_IN3_CUR 0x528c +#define AFE_ETDM_IN3_END_MSB 0x5290 +#define AFE_ETDM_IN3_END 0x5294 +#define AFE_ETDM_IN3_CON0 0x5298 +#define AFE_ETDM_IN4_BASE_MSB 0x52a0 +#define AFE_ETDM_IN4_BASE 0x52a4 +#define AFE_ETDM_IN4_CUR_MSB 0x52a8 +#define AFE_ETDM_IN4_CUR 0x52ac +#define AFE_ETDM_IN4_END_MSB 0x52b0 +#define AFE_ETDM_IN4_END 0x52b4 +#define AFE_ETDM_IN4_CON0 0x52b8 +#define AFE_ETDM_IN5_BASE_MSB 0x52c0 +#define AFE_ETDM_IN5_BASE 0x52c4 +#define AFE_ETDM_IN5_CUR_MSB 0x52c8 +#define AFE_ETDM_IN5_CUR 0x52cc +#define AFE_ETDM_IN5_END_MSB 0x52d0 +#define AFE_ETDM_IN5_END 0x52d4 +#define AFE_ETDM_IN5_CON0 0x52d8 +#define AFE_ETDM_IN6_BASE_MSB 0x52e0 +#define AFE_ETDM_IN6_BASE 0x52e4 +#define AFE_ETDM_IN6_CUR_MSB 0x52e8 +#define AFE_ETDM_IN6_CUR 0x52ec +#define AFE_ETDM_IN6_END_MSB 0x52f0 +#define AFE_ETDM_IN6_END 0x52f4 +#define AFE_ETDM_IN6_CON0 0x52f8 +#define AFE_HDMI_OUT_BASE_MSB 0x5360 +#define AFE_HDMI_OUT_BASE 0x5364 +#define AFE_HDMI_OUT_CUR_MSB 0x5368 +#define AFE_HDMI_OUT_CUR 0x536c +#define AFE_HDMI_OUT_END_MSB 0x5370 +#define AFE_HDMI_OUT_END 0x5374 +#define AFE_HDMI_OUT_CON0 0x5378 +#define AFE_VUL24_RCH_MON 0x53e0 +#define AFE_VUL24_LCH_MON 0x53e4 +#define AFE_VUL25_RCH_MON 0x53e8 +#define AFE_VUL25_LCH_MON 0x53ec +#define AFE_VUL26_RCH_MON 0x53f0 +#define AFE_VUL26_LCH_MON 0x53f4 +#define AFE_VUL_CM0_RCH_MON 0x5458 +#define AFE_VUL_CM0_LCH_MON 0x545c +#define AFE_VUL_CM1_RCH_MON 0x5460 +#define AFE_VUL_CM1_LCH_MON 0x5464 +#define AFE_VUL_CM2_RCH_MON 0x5468 +#define AFE_VUL_CM2_LCH_MON 0x546c +#define AFE_DL_4CH_CH0_MON 0x54f4 +#define AFE_DL_4CH_CH1_MON 0x54f8 +#define AFE_DL_4CH_CH2_MON 0x54fc +#define AFE_DL_4CH_CH3_MON 0x5500 +#define AFE_DL_24CH_CH0_MON 0x5504 +#define AFE_DL_24CH_CH1_MON 0x5508 +#define AFE_DL_24CH_CH2_MON 0x550c +#define AFE_DL_24CH_CH3_MON 0x5510 +#define AFE_DL_24CH_CH4_MON 0x5514 +#define AFE_DL_24CH_CH5_MON 0x5518 +#define AFE_DL_24CH_CH6_MON 0x551c +#define AFE_DL_24CH_CH7_MON 0x5520 +#define AFE_DL_24CH_CH8_MON 0x5524 +#define AFE_DL_24CH_CH9_MON 0x5528 +#define AFE_DL_24CH_CH10_MON 0x552c +#define AFE_DL_24CH_CH11_MON 0x5530 +#define AFE_DL_24CH_CH12_MON 0x5534 +#define AFE_DL_24CH_CH13_MON 0x5538 +#define AFE_DL_24CH_CH14_MON 0x553c +#define AFE_DL_24CH_CH15_MON 0x5540 +#define AFE_SRAM_BOUND 0x5620 +#define AFE_SECURE_CON0 0x5624 +#define AFE_SECURE_CON1 0x5628 +#define AFE_SE_SECURE_CON0 0x5630 +#define AFE_SE_SECURE_CON1 0x5634 +#define AFE_SE_SECURE_CON2 0x5638 +#define AFE_SE_SECURE_CON3 0x563c +#define AFE_SE_PROT_SIDEBAND0 0x5640 +#define AFE_SE_PROT_SIDEBAND1 0x5644 +#define AFE_SE_PROT_SIDEBAND2 0x5648 +#define AFE_SE_PROT_SIDEBAND3 0x564c +#define AFE_SE_DOMAIN_SIDEBAND0 0x5650 +#define AFE_SE_DOMAIN_SIDEBAND1 0x5654 +#define AFE_SE_DOMAIN_SIDEBAND2 0x5658 +#define AFE_SE_DOMAIN_SIDEBAND3 0x565c +#define AFE_SE_DOMAIN_SIDEBAND4 0x5660 +#define AFE_SE_DOMAIN_SIDEBAND5 0x5664 +#define AFE_SE_DOMAIN_SIDEBAND6 0x5668 +#define AFE_SE_DOMAIN_SIDEBAND7 0x566c +#define AFE_SE_DOMAIN_SIDEBAND8 0x5670 +#define AFE_SE_DOMAIN_SIDEBAND9 0x5674 +#define AFE_PROT_SIDEBAND0_MON 0x5678 +#define AFE_PROT_SIDEBAND1_MON 0x567c +#define AFE_PROT_SIDEBAND2_MON 0x5680 +#define AFE_PROT_SIDEBAND3_MON 0x5684 +#define AFE_DOMAIN_SIDEBAND0_MON 0x5688 +#define AFE_DOMAIN_SIDEBAND1_MON 0x568c +#define AFE_DOMAIN_SIDEBAND2_MON 0x5690 +#define AFE_DOMAIN_SIDEBAND3_MON 0x5694 +#define AFE_DOMAIN_SIDEBAND4_MON 0x5698 +#define AFE_DOMAIN_SIDEBAND5_MON 0x569c +#define AFE_DOMAIN_SIDEBAND6_MON 0x56a0 +#define AFE_DOMAIN_SIDEBAND7_MON 0x56a4 +#define AFE_DOMAIN_SIDEBAND8_MON 0x56a8 +#define AFE_DOMAIN_SIDEBAND9_MON 0x56ac +#define AFE_SECURE_CONN0 0x56b0 +#define AFE_SECURE_CONN_ETDM0 0x56b4 +#define AFE_SECURE_CONN_ETDM1 0x56b8 +#define AFE_SECURE_CONN_ETDM2 0x56bc +#define AFE_SECURE_SRAM_CON0 0x56c0 +#define AFE_SECURE_SRAM_CON1 0x56c4 +#define AFE_SE_CONN_INPUT_MASK0 0x56d0 +#define AFE_SE_CONN_INPUT_MASK1 0x56d4 +#define AFE_SE_CONN_INPUT_MASK2 0x56d8 +#define AFE_SE_CONN_INPUT_MASK3 0x56dc +#define AFE_SE_CONN_INPUT_MASK4 0x56e0 +#define AFE_SE_CONN_INPUT_MASK5 0x56e4 +#define AFE_SE_CONN_INPUT_MASK6 0x56e8 +#define AFE_SE_CONN_INPUT_MASK7 0x56ec +#define AFE_NON_SE_CONN_INPUT_MASK0 0x56f0 +#define AFE_NON_SE_CONN_INPUT_MASK1 0x56f4 +#define AFE_NON_SE_CONN_INPUT_MASK2 0x56f8 +#define AFE_NON_SE_CONN_INPUT_MASK3 0x56fc +#define AFE_NON_SE_CONN_INPUT_MASK4 0x5700 +#define AFE_NON_SE_CONN_INPUT_MASK5 0x5704 +#define AFE_NON_SE_CONN_INPUT_MASK6 0x5708 +#define AFE_NON_SE_CONN_INPUT_MASK7 0x570c +#define AFE_SE_CONN_OUTPUT_SEL0 0x5710 +#define AFE_SE_CONN_OUTPUT_SEL1 0x5714 +#define AFE_SE_CONN_OUTPUT_SEL2 0x5718 +#define AFE_SE_CONN_OUTPUT_SEL3 0x571c +#define AFE_SE_CONN_OUTPUT_SEL4 0x5720 +#define AFE_SE_CONN_OUTPUT_SEL5 0x5724 +#define AFE_SE_CONN_OUTPUT_SEL6 0x5728 +#define AFE_SE_CONN_OUTPUT_SEL7 0x572c +#define AFE_PCM0_INTF_CON1_MASK_MON 0x5730 +#define AFE_PCM0_INTF_CON0_MASK_MON 0x5734 +#define AFE_CONNSYS_I2S_CON_MASK_MON 0x5738 +#define AFE_TDM_CON2_MASK_MON 0x5744 +#define AFE_MTKAIF0_CFG0_MASK_MON 0x574c +#define AFE_MTKAIF1_CFG0_MASK_MON 0x5750 +#define AFE_ADDA_UL0_SRC_CON0_MASK_MON 0x5754 +#define AFE_ADDA_UL1_SRC_CON0_MASK_MON 0x5758 +#define AFE_ADDA_UL2_SRC_CON0_MASK_MON 0x575c +#define AFE_ASRC_NEW_CON0 0x7800 +#define AFE_ASRC_NEW_CON1 0x7804 +#define AFE_ASRC_NEW_CON2 0x7808 +#define AFE_ASRC_NEW_CON3 0x780c +#define AFE_ASRC_NEW_CON4 0x7810 +#define AFE_ASRC_NEW_CON5 0x7814 +#define AFE_ASRC_NEW_CON6 0x7818 +#define AFE_ASRC_NEW_CON7 0x781c +#define AFE_ASRC_NEW_CON8 0x7820 +#define AFE_ASRC_NEW_CON9 0x7824 +#define AFE_ASRC_NEW_CON10 0x7828 +#define AFE_ASRC_NEW_CON11 0x782c +#define AFE_ASRC_NEW_CON12 0x7830 +#define AFE_ASRC_NEW_CON13 0x7834 +#define AFE_ASRC_NEW_CON14 0x7838 +#define AFE_ASRC_NEW_IP_VERSION 0x783c +#define AFE_GASRC0_NEW_CON0 0x7840 +#define AFE_GASRC0_NEW_CON1 0x7844 +#define AFE_GASRC0_NEW_CON2 0x7848 +#define AFE_GASRC0_NEW_CON3 0x784c +#define AFE_GASRC0_NEW_CON4 0x7850 +#define AFE_GASRC0_NEW_CON5 0x7854 +#define AFE_GASRC0_NEW_CON6 0x7858 +#define AFE_GASRC0_NEW_CON7 0x785c +#define AFE_GASRC0_NEW_CON8 0x7860 +#define AFE_GASRC0_NEW_CON9 0x7864 +#define AFE_GASRC0_NEW_CON10 0x7868 +#define AFE_GASRC0_NEW_CON11 0x786c +#define AFE_GASRC0_NEW_CON12 0x7870 +#define AFE_GASRC0_NEW_CON13 0x7874 +#define AFE_GASRC0_NEW_CON14 0x7878 +#define AFE_GASRC0_NEW_IP_VERSION 0x787c +#define AFE_GASRC1_NEW_CON0 0x7880 +#define AFE_GASRC1_NEW_CON1 0x7884 +#define AFE_GASRC1_NEW_CON2 0x7888 +#define AFE_GASRC1_NEW_CON3 0x788c +#define AFE_GASRC1_NEW_CON4 0x7890 +#define AFE_GASRC1_NEW_CON5 0x7894 +#define AFE_GASRC1_NEW_CON6 0x7898 +#define AFE_GASRC1_NEW_CON7 0x789c +#define AFE_GASRC1_NEW_CON8 0x78a0 +#define AFE_GASRC1_NEW_CON9 0x78a4 +#define AFE_GASRC1_NEW_CON10 0x78a8 +#define AFE_GASRC1_NEW_CON11 0x78ac +#define AFE_GASRC1_NEW_CON12 0x78b0 +#define AFE_GASRC1_NEW_CON13 0x78b4 +#define AFE_GASRC1_NEW_CON14 0x78b8 +#define AFE_GASRC1_NEW_IP_VERSION 0x78bc +#define AFE_GASRC2_NEW_CON0 0x78c0 +#define AFE_GASRC2_NEW_CON1 0x78c4 +#define AFE_GASRC2_NEW_CON2 0x78c8 +#define AFE_GASRC2_NEW_CON3 0x78cc +#define AFE_GASRC2_NEW_CON4 0x78d0 +#define AFE_GASRC2_NEW_CON5 0x78d4 +#define AFE_GASRC2_NEW_CON6 0x78d8 +#define AFE_GASRC2_NEW_CON7 0x78dc +#define AFE_GASRC2_NEW_CON8 0x78e0 +#define AFE_GASRC2_NEW_CON9 0x78e4 +#define AFE_GASRC2_NEW_CON10 0x78e8 +#define AFE_GASRC2_NEW_CON11 0x78ec +#define AFE_GASRC2_NEW_CON12 0x78f0 +#define AFE_GASRC2_NEW_CON13 0x78f4 +#define AFE_GASRC2_NEW_CON14 0x78f8 +#define AFE_GASRC2_NEW_IP_VERSION 0x78fc +#define AFE_GASRC3_NEW_CON0 0x7900 +#define AFE_GASRC3_NEW_CON1 0x7904 +#define AFE_GASRC3_NEW_CON2 0x7908 +#define AFE_GASRC3_NEW_CON3 0x790c +#define AFE_GASRC3_NEW_CON4 0x7910 +#define AFE_GASRC3_NEW_CON5 0x7914 +#define AFE_GASRC3_NEW_CON6 0x7918 +#define AFE_GASRC3_NEW_CON7 0x791c +#define AFE_GASRC3_NEW_CON8 0x7920 +#define AFE_GASRC3_NEW_CON9 0x7924 +#define AFE_GASRC3_NEW_CON10 0x7928 +#define AFE_GASRC3_NEW_CON11 0x792c +#define AFE_GASRC3_NEW_CON12 0x7930 +#define AFE_GASRC3_NEW_CON13 0x7934 +#define AFE_GASRC3_NEW_CON14 0x7938 +#define AFE_GASRC3_NEW_IP_VERSION 0x793c +#define AFE_GASRC4_NEW_CON0 0x7940 +#define AFE_GASRC4_NEW_CON1 0x7944 +#define AFE_GASRC4_NEW_CON2 0x7948 +#define AFE_GASRC4_NEW_CON3 0x794c +#define AFE_GASRC4_NEW_CON4 0x7950 +#define AFE_GASRC4_NEW_CON5 0x7954 +#define AFE_GASRC4_NEW_CON6 0x7958 +#define AFE_GASRC4_NEW_CON7 0x795c +#define AFE_GASRC4_NEW_CON8 0x7960 +#define AFE_GASRC4_NEW_CON9 0x7964 +#define AFE_GASRC4_NEW_CON10 0x7968 +#define AFE_GASRC4_NEW_CON11 0x796c +#define AFE_GASRC4_NEW_CON12 0x7970 +#define AFE_GASRC4_NEW_CON13 0x7974 +#define AFE_GASRC4_NEW_CON14 0x7978 +#define AFE_GASRC4_NEW_IP_VERSION 0x797c +#define AFE_GASRC5_NEW_CON0 0x7980 +#define AFE_GASRC5_NEW_CON1 0x7984 +#define AFE_GASRC5_NEW_CON2 0x7988 +#define AFE_GASRC5_NEW_CON3 0x798c +#define AFE_GASRC5_NEW_CON4 0x7990 +#define AFE_GASRC5_NEW_CON5 0x7994 +#define AFE_GASRC5_NEW_CON6 0x7998 +#define AFE_GASRC5_NEW_CON7 0x799c +#define AFE_GASRC5_NEW_CON8 0x79a0 +#define AFE_GASRC5_NEW_CON9 0x79a4 +#define AFE_GASRC5_NEW_CON10 0x79a8 +#define AFE_GASRC5_NEW_CON11 0x79ac +#define AFE_GASRC5_NEW_CON12 0x79b0 +#define AFE_GASRC5_NEW_CON13 0x79b4 +#define AFE_GASRC5_NEW_CON14 0x79b8 +#define AFE_GASRC5_NEW_IP_VERSION 0x79bc +#define AFE_GASRC6_NEW_CON0 0x79c0 +#define AFE_GASRC6_NEW_CON1 0x79c4 +#define AFE_GASRC6_NEW_CON2 0x79c8 +#define AFE_GASRC6_NEW_CON3 0x79cc +#define AFE_GASRC6_NEW_CON4 0x79d0 +#define AFE_GASRC6_NEW_CON5 0x79d4 +#define AFE_GASRC6_NEW_CON6 0x79d8 +#define AFE_GASRC6_NEW_CON7 0x79dc +#define AFE_GASRC6_NEW_CON8 0x79e0 +#define AFE_GASRC6_NEW_CON9 0x79e4 +#define AFE_GASRC6_NEW_CON10 0x79e8 +#define AFE_GASRC6_NEW_CON11 0x79ec +#define AFE_GASRC6_NEW_CON12 0x79f0 +#define AFE_GASRC6_NEW_CON13 0x79f4 +#define AFE_GASRC6_NEW_CON14 0x79f8 +#define AFE_GASRC6_NEW_IP_VERSION 0x79fc +#define AFE_GASRC7_NEW_CON0 0x7a00 +#define AFE_GASRC7_NEW_CON1 0x7a04 +#define AFE_GASRC7_NEW_CON2 0x7a08 +#define AFE_GASRC7_NEW_CON3 0x7a0c +#define AFE_GASRC7_NEW_CON4 0x7a10 +#define AFE_GASRC7_NEW_CON5 0x7a14 +#define AFE_GASRC7_NEW_CON6 0x7a18 +#define AFE_GASRC7_NEW_CON7 0x7a1c +#define AFE_GASRC7_NEW_CON8 0x7a20 +#define AFE_GASRC7_NEW_CON9 0x7a24 +#define AFE_GASRC7_NEW_CON10 0x7a28 +#define AFE_GASRC7_NEW_CON11 0x7a2c +#define AFE_GASRC7_NEW_CON12 0x7a30 +#define AFE_GASRC7_NEW_CON13 0x7a34 +#define AFE_GASRC7_NEW_CON14 0x7a38 +#define AFE_GASRC7_NEW_IP_VERSION 0x7a3c +#define AFE_GASRC8_NEW_CON0 0x7a40 +#define AFE_GASRC8_NEW_CON1 0x7a44 +#define AFE_GASRC8_NEW_CON2 0x7a48 +#define AFE_GASRC8_NEW_CON3 0x7a4c +#define AFE_GASRC8_NEW_CON4 0x7a50 +#define AFE_GASRC8_NEW_CON5 0x7a54 +#define AFE_GASRC8_NEW_CON6 0x7a58 +#define AFE_GASRC8_NEW_CON7 0x7a5c +#define AFE_GASRC8_NEW_CON8 0x7a60 +#define AFE_GASRC8_NEW_CON9 0x7a64 +#define AFE_GASRC8_NEW_CON10 0x7a68 +#define AFE_GASRC8_NEW_CON11 0x7a6c +#define AFE_GASRC8_NEW_CON12 0x7a70 +#define AFE_GASRC8_NEW_CON13 0x7a74 +#define AFE_GASRC8_NEW_CON14 0x7a78 +#define AFE_GASRC8_NEW_IP_VERSION 0x7a7c +#define AFE_GASRC9_NEW_CON0 0x7a80 +#define AFE_GASRC9_NEW_CON1 0x7a84 +#define AFE_GASRC9_NEW_CON2 0x7a88 +#define AFE_GASRC9_NEW_CON3 0x7a8c +#define AFE_GASRC9_NEW_CON4 0x7a90 +#define AFE_GASRC9_NEW_CON5 0x7a94 +#define AFE_GASRC9_NEW_CON6 0x7a98 +#define AFE_GASRC9_NEW_CON7 0x7a9c +#define AFE_GASRC9_NEW_CON8 0x7aa0 +#define AFE_GASRC9_NEW_CON9 0x7aa4 +#define AFE_GASRC9_NEW_CON10 0x7aa8 +#define AFE_GASRC9_NEW_CON11 0x7aac +#define AFE_GASRC9_NEW_CON12 0x7ab0 +#define AFE_GASRC9_NEW_CON13 0x7ab4 +#define AFE_GASRC9_NEW_CON14 0x7ab8 +#define AFE_GASRC9_NEW_IP_VERSION 0x7abc +#define AFE_GASRC10_NEW_CON0 0x7ac0 +#define AFE_GASRC10_NEW_CON1 0x7ac4 +#define AFE_GASRC10_NEW_CON2 0x7ac8 +#define AFE_GASRC10_NEW_CON3 0x7acc +#define AFE_GASRC10_NEW_CON4 0x7ad0 +#define AFE_GASRC10_NEW_CON5 0x7ad4 +#define AFE_GASRC10_NEW_CON6 0x7ad8 +#define AFE_GASRC10_NEW_CON7 0x7adc +#define AFE_GASRC10_NEW_CON8 0x7ae0 +#define AFE_GASRC10_NEW_CON9 0x7ae4 +#define AFE_GASRC10_NEW_CON10 0x7ae8 +#define AFE_GASRC10_NEW_CON11 0x7aec +#define AFE_GASRC10_NEW_CON12 0x7af0 +#define AFE_GASRC10_NEW_CON13 0x7af4 +#define AFE_GASRC10_NEW_CON14 0x7af8 +#define AFE_GASRC10_NEW_IP_VERSION 0x7afc +#define AFE_GASRC11_NEW_CON0 0x7b00 +#define AFE_GASRC11_NEW_CON1 0x7b04 +#define AFE_GASRC11_NEW_CON2 0x7b08 +#define AFE_GASRC11_NEW_CON3 0x7b0c +#define AFE_GASRC11_NEW_CON4 0x7b10 +#define AFE_GASRC11_NEW_CON5 0x7b14 +#define AFE_GASRC11_NEW_CON6 0x7b18 +#define AFE_GASRC11_NEW_CON7 0x7b1c +#define AFE_GASRC11_NEW_CON8 0x7b20 +#define AFE_GASRC11_NEW_CON9 0x7b24 +#define AFE_GASRC11_NEW_CON10 0x7b28 +#define AFE_GASRC11_NEW_CON11 0x7b2c +#define AFE_GASRC11_NEW_CON12 0x7b30 +#define AFE_GASRC11_NEW_CON13 0x7b34 +#define AFE_GASRC11_NEW_CON14 0x7b38 +#define AFE_GASRC11_NEW_IP_VERSION 0x7b3c +#define AFE_GASRC12_NEW_CON0 0x7b40 +#define AFE_GASRC12_NEW_CON1 0x7b44 +#define AFE_GASRC12_NEW_CON2 0x7b48 +#define AFE_GASRC12_NEW_CON3 0x7b4c +#define AFE_GASRC12_NEW_CON4 0x7b50 +#define AFE_GASRC12_NEW_CON5 0x7b54 +#define AFE_GASRC12_NEW_CON6 0x7b58 +#define AFE_GASRC12_NEW_CON7 0x7b5c +#define AFE_GASRC12_NEW_CON8 0x7b60 +#define AFE_GASRC12_NEW_CON9 0x7b64 +#define AFE_GASRC12_NEW_CON10 0x7b68 +#define AFE_GASRC12_NEW_CON11 0x7b6c +#define AFE_GASRC12_NEW_CON12 0x7b70 +#define AFE_GASRC12_NEW_CON13 0x7b74 +#define AFE_GASRC12_NEW_CON14 0x7b78 +#define AFE_GASRC12_NEW_IP_VERSION 0x7b7c +#define AFE_GASRC13_NEW_CON0 0x7b80 +#define AFE_GASRC13_NEW_CON1 0x7b84 +#define AFE_GASRC13_NEW_CON2 0x7b88 +#define AFE_GASRC13_NEW_CON3 0x7b8c +#define AFE_GASRC13_NEW_CON4 0x7b90 +#define AFE_GASRC13_NEW_CON5 0x7b94 +#define AFE_GASRC13_NEW_CON6 0x7b98 +#define AFE_GASRC13_NEW_CON7 0x7b9c +#define AFE_GASRC13_NEW_CON8 0x7ba0 +#define AFE_GASRC13_NEW_CON9 0x7ba4 +#define AFE_GASRC13_NEW_CON10 0x7ba8 +#define AFE_GASRC13_NEW_CON11 0x7bac +#define AFE_GASRC13_NEW_CON12 0x7bb0 +#define AFE_GASRC13_NEW_CON13 0x7bb4 +#define AFE_GASRC13_NEW_CON14 0x7bb8 +#define AFE_GASRC13_NEW_IP_VERSION 0x7bbc +#define AFE_GASRC14_NEW_CON0 0x7bc0 +#define AFE_GASRC14_NEW_CON1 0x7bc4 +#define AFE_GASRC14_NEW_CON2 0x7bc8 +#define AFE_GASRC14_NEW_CON3 0x7bcc +#define AFE_GASRC14_NEW_CON4 0x7bd0 +#define AFE_GASRC14_NEW_CON5 0x7bd4 +#define AFE_GASRC14_NEW_CON6 0x7bd8 +#define AFE_GASRC14_NEW_CON7 0x7bdc +#define AFE_GASRC14_NEW_CON8 0x7be0 +#define AFE_GASRC14_NEW_CON9 0x7be4 +#define AFE_GASRC14_NEW_CON10 0x7be8 +#define AFE_GASRC14_NEW_CON11 0x7bec +#define AFE_GASRC14_NEW_CON12 0x7bf0 +#define AFE_GASRC14_NEW_CON13 0x7bf4 +#define AFE_GASRC14_NEW_CON14 0x7bf8 +#define AFE_GASRC14_NEW_IP_VERSION 0x7bfc +#define AFE_GASRC15_NEW_CON0 0x7c00 +#define AFE_GASRC15_NEW_CON1 0x7c04 +#define AFE_GASRC15_NEW_CON2 0x7c08 +#define AFE_GASRC15_NEW_CON3 0x7c0c +#define AFE_GASRC15_NEW_CON4 0x7c10 +#define AFE_GASRC15_NEW_CON5 0x7c14 +#define AFE_GASRC15_NEW_CON6 0x7c18 +#define AFE_GASRC15_NEW_CON7 0x7c1c +#define AFE_GASRC15_NEW_CON8 0x7c20 +#define AFE_GASRC15_NEW_CON9 0x7c24 +#define AFE_GASRC15_NEW_CON10 0x7c28 +#define AFE_GASRC15_NEW_CON11 0x7c2c +#define AFE_GASRC15_NEW_CON12 0x7c30 +#define AFE_GASRC15_NEW_CON13 0x7c34 +#define AFE_GASRC15_NEW_CON14 0x7c38 +#define AFE_GASRC15_NEW_IP_VERSION 0x7c3c + +#define AFE_MAX_REGISTER AFE_GASRC15_NEW_IP_VERSION + +#define AFE_IRQ_STATUS_BITS 0x87FFFFFF +#define AFE_IRQ_CNT_SHIFT 0 +#define AFE_IRQ_CNT_MASK 0xffffff +#endif + diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index 4bd14ae330d582..2788514be6b13d 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig @@ -54,4 +54,10 @@ config SND_PXA910_SOC Say Y if you want to add support for SoC audio on the Marvell PXA910 reference platform. +config SND_PXA2XX_LIB + tristate + select SND_DMAENGINE_PCM + +config SND_PXA2XX_LIB_AC97 + bool endmenu diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile index 93b4e57eaa5c5b..bf504a85657c36 100644 --- a/sound/soc/pxa/Makefile +++ b/sound/soc/pxa/Makefile @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # PXA Platform Support snd-soc-pxa2xx-y := pxa2xx-pcm.o -snd-soc-pxa2xx-ac97-y := pxa2xx-ac97.o +snd-soc-pxa2xx-ac97-y := pxa2xx-ac97.o pxa2xx-ac97-lib.o snd-soc-pxa2xx-i2s-y := pxa2xx-i2s.o +snd-soc-pxa2xx-lib-y := pxa2xx-pcm-lib.o snd-soc-pxa-ssp-y := pxa-ssp.o snd-soc-mmp-sspa-y := mmp-sspa.o @@ -11,6 +12,7 @@ obj-$(CONFIG_SND_PXA2XX_SOC_AC97) += snd-soc-pxa2xx-ac97.o obj-$(CONFIG_SND_PXA2XX_SOC_I2S) += snd-soc-pxa2xx-i2s.o obj-$(CONFIG_SND_PXA_SOC_SSP) += snd-soc-pxa-ssp.o obj-$(CONFIG_SND_MMP_SOC_SSPA) += snd-soc-mmp-sspa.o +obj-$(CONFIG_SND_PXA2XX_LIB) += snd-soc-pxa2xx-lib.o # PXA Machine Support snd-soc-spitz-y := spitz.o diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c index 73f36c9dd35c5d..fbbce81680cf8d 100644 --- a/sound/soc/pxa/mmp-sspa.c +++ b/sound/soc/pxa/mmp-sspa.c @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include "pxa2xx-lib.h" #include "mmp-sspa.h" /* diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index 37bd8dbd541f72..f8054c1c59fae7 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -27,9 +27,9 @@ #include #include #include -#include #include +#include "pxa2xx-lib.h" #include "pxa-ssp.h" /* diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/soc/pxa/pxa2xx-ac97-lib.c similarity index 91% rename from sound/arm/pxa2xx-ac97-lib.c rename to sound/soc/pxa/pxa2xx-ac97-lib.c index 79eb557d4942f1..d9c3935636da5f 100644 --- a/sound/arm/pxa2xx-ac97-lib.c +++ b/sound/soc/pxa/pxa2xx-ac97-lib.c @@ -23,13 +23,13 @@ #include #include "pxa2xx-ac97-regs.h" +#include "pxa2xx-lib.h" static DEFINE_MUTEX(car_mutex); static DECLARE_WAIT_QUEUE_HEAD(gsr_wq); static volatile long gsr_bits; static struct clk *ac97_clk; static struct clk *ac97conf_clk; -static int reset_gpio; struct gpio_desc *rst_gpio; static void __iomem *ac97_reg_base; @@ -83,7 +83,6 @@ int pxa2xx_ac97_read(int slot, unsigned short reg) wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE, 1); return val; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_read); int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val) { @@ -113,7 +112,6 @@ int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val) return ret; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_write); #ifdef CONFIG_PXA25x static inline void pxa_ac97_warm_pxa25x(void) @@ -140,10 +138,10 @@ static inline void pxa_ac97_warm_pxa27x(void) gsr_bits = 0; /* warm reset broken on Bulverde, so manually keep AC97 reset high */ - pxa27x_configure_ac97reset(reset_gpio, true); + pxa27x_configure_ac97reset(rst_gpio, true); udelay(10); writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR); - pxa27x_configure_ac97reset(reset_gpio, false); + pxa27x_configure_ac97reset(rst_gpio, false); udelay(500); } @@ -226,7 +224,6 @@ bool pxa2xx_ac97_try_warm_reset(void) return true; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_warm_reset); bool pxa2xx_ac97_try_cold_reset(void) { @@ -263,7 +260,6 @@ bool pxa2xx_ac97_try_cold_reset(void) return true; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_cold_reset); void pxa2xx_ac97_finish_reset(void) @@ -273,7 +269,6 @@ void pxa2xx_ac97_finish_reset(void) gcr |= GCR_SDONE_IE|GCR_CDONE_IE; writel(gcr, ac97_reg_base + GCR); } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_finish_reset); static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id) { @@ -307,14 +302,12 @@ int pxa2xx_ac97_hw_suspend(void) clk_disable_unprepare(ac97_clk); return 0; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_suspend); int pxa2xx_ac97_hw_resume(void) { clk_prepare_enable(ac97_clk); return 0; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_resume); #endif int pxa2xx_ac97_hw_probe(struct platform_device *dev) @@ -328,24 +321,14 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev) return PTR_ERR(ac97_reg_base); } - if (dev->dev.of_node) { + if (cpu_is_pxa27x()) { /* Assert reset using GPIOD_OUT_HIGH, because reset is GPIO_ACTIVE_LOW */ - rst_gpio = devm_gpiod_get(&dev->dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(rst_gpio)) { - ret = PTR_ERR(rst_gpio); - if (ret == -ENOENT) - reset_gpio = -1; - else if (ret) - return ret; - } else { - reset_gpio = desc_to_gpio(rst_gpio); - } - } else { - if (cpu_is_pxa27x()) - reset_gpio = 113; - } + rst_gpio = devm_gpiod_get_optional(&dev->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(rst_gpio)) + return dev_err_probe(&dev->dev, PTR_ERR(rst_gpio), + "reset gpio failed\n"); - if (cpu_is_pxa27x()) { /* * This gpio is needed for a work-around to a bug in the ac97 * controller during warm reset. The direction and level is set @@ -353,7 +336,7 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev) * AC97_nRESET alt function to generic gpio. */ gpiod_set_consumer_name(rst_gpio, "pxa27x ac97 reset"); - pxa27x_configure_ac97reset(reset_gpio, false); + pxa27x_configure_ac97reset(rst_gpio, false); ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK"); if (IS_ERR(ac97conf_clk)) { @@ -399,7 +382,6 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev) err_conf: return ret; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe); void pxa2xx_ac97_hw_remove(struct platform_device *dev) { @@ -413,7 +395,6 @@ void pxa2xx_ac97_hw_remove(struct platform_device *dev) clk_put(ac97_clk); ac97_clk = NULL; } -EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_remove); u32 pxa2xx_ac97_read_modr(void) { diff --git a/sound/arm/pxa2xx-ac97-regs.h b/sound/soc/pxa/pxa2xx-ac97-regs.h similarity index 100% rename from sound/arm/pxa2xx-ac97-regs.h rename to sound/soc/pxa/pxa2xx-ac97-regs.h diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c index a0c67260291867..7b9036947dfc8a 100644 --- a/sound/soc/pxa/pxa2xx-ac97.c +++ b/sound/soc/pxa/pxa2xx-ac97.c @@ -18,11 +18,12 @@ #include #include #include -#include #include #include +#include "pxa2xx-lib.h" + #define PCDR 0x0040 /* PCM FIFO Data Register */ #define MODR 0x0140 /* Modem FIFO Data Register */ #define MCDR 0x0060 /* Mic-in FIFO Data Register */ @@ -246,8 +247,7 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev) } ctrl = snd_ac97_controller_register(&pxa2xx_ac97_ops, &pdev->dev, - AC97_SLOTS_AVAILABLE_ALL, - NULL); + AC97_SLOTS_AVAILABLE_ALL); if (IS_ERR(ctrl)) return PTR_ERR(ctrl); diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c index f6ada6cffc8898..fe1df78926f598 100644 --- a/sound/soc/pxa/pxa2xx-i2s.c +++ b/sound/soc/pxa/pxa2xx-i2s.c @@ -18,11 +18,11 @@ #include #include #include -#include #include #include +#include "pxa2xx-lib.h" #include "pxa2xx-i2s.h" /* diff --git a/sound/soc/pxa/pxa2xx-lib.h b/sound/soc/pxa/pxa2xx-lib.h new file mode 100644 index 00000000000000..3a9d6ac8d367ca --- /dev/null +++ b/sound/soc/pxa/pxa2xx-lib.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PXA2XX_LIB_H +#define __PXA2XX_LIB_H + +#include +#include + +/* PCM */ +struct snd_pcm_substream; +struct snd_pcm_hw_params; +struct snd_soc_pcm_runtime; +struct snd_pcm; +struct snd_soc_component; + +int pxa2xx_soc_pcm_new(struct snd_soc_component *component, + struct snd_soc_pcm_runtime *rtd); +int pxa2xx_soc_pcm_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int pxa2xx_soc_pcm_close(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params); +int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component, + struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t pxa2xx_soc_pcm_pointer(struct snd_soc_component *component, + struct snd_pcm_substream *substream); + +/* AC97 */ +int pxa2xx_ac97_read(int slot, unsigned short reg); +int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val); + +bool pxa2xx_ac97_try_warm_reset(void); +bool pxa2xx_ac97_try_cold_reset(void); +void pxa2xx_ac97_finish_reset(void); + +int pxa2xx_ac97_hw_suspend(void); +int pxa2xx_ac97_hw_resume(void); + +int pxa2xx_ac97_hw_probe(struct platform_device *dev); +void pxa2xx_ac97_hw_remove(struct platform_device *dev); + +#endif diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/soc/pxa/pxa2xx-pcm-lib.c similarity index 86% rename from sound/arm/pxa2xx-pcm-lib.c rename to sound/soc/pxa/pxa2xx-pcm-lib.c index 571e9d909cdf0b..88a9d322630212 100644 --- a/sound/arm/pxa2xx-pcm-lib.c +++ b/sound/soc/pxa/pxa2xx-pcm-lib.c @@ -9,9 +9,10 @@ #include #include #include -#include #include +#include "pxa2xx-lib.h" + static const struct snd_pcm_hardware pxa2xx_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | @@ -29,8 +30,8 @@ static const struct snd_pcm_hardware pxa2xx_pcm_hardware = { .fifo_size = 32, }; -int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) +static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) { struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); @@ -56,28 +57,24 @@ int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, return 0; } -EXPORT_SYMBOL(pxa2xx_pcm_hw_params); -int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +static int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { return snd_dmaengine_pcm_trigger(substream, cmd); } -EXPORT_SYMBOL(pxa2xx_pcm_trigger); -snd_pcm_uframes_t +static snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream) { return snd_dmaengine_pcm_pointer(substream); } -EXPORT_SYMBOL(pxa2xx_pcm_pointer); -int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) +static int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) { return 0; } -EXPORT_SYMBOL(pxa2xx_pcm_prepare); -int pxa2xx_pcm_open(struct snd_pcm_substream *substream) +static int pxa2xx_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; @@ -114,22 +111,19 @@ int pxa2xx_pcm_open(struct snd_pcm_substream *substream) substream, dma_request_slave_channel(snd_soc_rtd_to_cpu(rtd, 0)->dev, dma_params->chan_name)); } -EXPORT_SYMBOL(pxa2xx_pcm_open); -int pxa2xx_pcm_close(struct snd_pcm_substream *substream) +static int pxa2xx_pcm_close(struct snd_pcm_substream *substream) { return snd_dmaengine_pcm_close_release_chan(substream); } -EXPORT_SYMBOL(pxa2xx_pcm_close); -int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm) +static int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm) { size_t size = pxa2xx_pcm_hardware.buffer_bytes_max; return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_WC, pcm->card->dev, size); } -EXPORT_SYMBOL(pxa2xx_pcm_preallocate_dma_buffer); int pxa2xx_soc_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index ff0fbb61dccd98..71b7bd948b5ee6 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c @@ -14,9 +14,10 @@ #include #include -#include #include +#include "pxa2xx-lib.h" + static const struct snd_soc_component_driver pxa2xx_soc_platform = { .pcm_new = pxa2xx_soc_pcm_new, .open = pxa2xx_soc_pcm_open, diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c index c8137e8883c40b..b80acb221d244b 100644 --- a/sound/soc/rockchip/rk3399_gru_sound.c +++ b/sound/soc/rockchip/rk3399_gru_sound.c @@ -606,6 +606,7 @@ static const struct of_device_id rockchip_sound_of_match[] = { { .compatible = "rockchip,rk3399-gru-sound", }, {}, }; +MODULE_DEVICE_TABLE(of, rockchip_sound_of_match); static struct platform_driver rockchip_sound_driver = { .probe = rockchip_sound_probe, @@ -624,4 +625,3 @@ MODULE_AUTHOR("Xing Zheng "); MODULE_DESCRIPTION("Rockchip ASoC Machine Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, rockchip_sound_of_match); diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 0a0a95b4f52047..49ff86b35ef187 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -662,6 +662,7 @@ static const struct of_device_id rockchip_i2s_match[] __maybe_unused = { { .compatible = "rockchip,rv1126-i2s", }, {}, }; +MODULE_DEVICE_TABLE(of, rockchip_i2s_match); static int rockchip_i2s_init_dai(struct rk_i2s_dev *i2s, struct resource *res, struct snd_soc_dai_driver **dp) @@ -878,4 +879,3 @@ MODULE_DESCRIPTION("ROCKCHIP IIS ASoC Interface"); MODULE_AUTHOR("jianqun "); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, rockchip_i2s_match); diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c index fc52149ed6ae3e..28fa253a51877e 100644 --- a/sound/soc/rockchip/rockchip_i2s_tdm.c +++ b/sound/soc/rockchip/rockchip_i2s_tdm.c @@ -1040,6 +1040,7 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = { { .compatible = "rockchip,rv1126-i2s-tdm", .data = &rv1126_i2s_soc_data }, {}, }; +MODULE_DEVICE_TABLE(of, rockchip_i2s_tdm_match); static const struct snd_soc_dai_driver i2s_tdm_dai = { .ops = &rockchip_i2s_tdm_dai_ops, @@ -1442,4 +1443,3 @@ MODULE_DESCRIPTION("ROCKCHIP I2S/TDM ASoC Interface"); MODULE_AUTHOR("Sugar Zhang "); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, rockchip_i2s_tdm_match); diff --git a/sound/soc/sdca/sdca_asoc.c b/sound/soc/sdca/sdca_asoc.c index 2bfc8e5aee31d9..e76afa396b0ab3 100644 --- a/sound/soc/sdca/sdca_asoc.c +++ b/sound/soc/sdca/sdca_asoc.c @@ -359,15 +359,77 @@ static int entity_parse_ot(struct device *dev, return 0; } +/** + * sdca_asoc_pde_poll_actual_ps - Verify PDE power state reached target state + * @dev: Pointer to the device for error logging. + * @regmap: Register map for reading ACTUAL_PS register. + * @function_id: SDCA function identifier. + * @entity_id: SDCA entity identifier for the power domain. + * @from_ps: Source power state (SDCA_PDE_PSn value). + * @to_ps: Target power state (SDCA_PDE_PSn value). + * @pde_delays: Pointer to array of PDE delay specifications for this device, + * or NULL to use default polling interval. + * @num_delays: Number of entries in pde_delays array. + * + * This function polls the ACTUAL_PS register to verify that a PDE power state + * transition has completed. Per SDCA specification, after writing REQUESTED_PS, + * the caller must poll ACTUAL_PS until it reflects the requested state. + * + * This function implements the polling logic but does NOT modify the power state. + * The caller is responsible for writing REQUESTED_PS before invoking this function. + * + * If a delay table is provided, appropriate polling intervals are extracted based + * on the from_ps and to_ps transition. If no table is provided or no matching entry + * is found, a default polling interval is used. + * + * Return: Returns zero when ACTUAL_PS reaches the target state, -ETIMEDOUT if the + * polling times out before reaching the target state, or a negative error code if + * a register read fails. + */ +int sdca_asoc_pde_poll_actual_ps(struct device *dev, struct regmap *regmap, + int function_id, int entity_id, + int from_ps, int to_ps, + const struct sdca_pde_delay *pde_delays, + int num_delays) +{ + static const int polls = 100; + static const int default_poll_us = 1000; + unsigned int reg, val; + int i, poll_us = default_poll_us; + int ret; + + if (pde_delays && num_delays > 0) { + for (i = 0; i < num_delays; i++) { + if (pde_delays[i].from_ps == from_ps && pde_delays[i].to_ps == to_ps) { + poll_us = pde_delays[i].us / polls; + break; + } + } + } + + reg = SDW_SDCA_CTL(function_id, entity_id, SDCA_CTL_PDE_ACTUAL_PS, 0); + + for (i = 0; i < polls; i++) { + if (i) + fsleep(poll_us); + + ret = regmap_read(regmap, reg, &val); + if (ret) + return ret; + else if (val == to_ps) + return 0; + } + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_NS(sdca_asoc_pde_poll_actual_ps, "SND_SOC_SDCA"); + static int entity_pde_event(struct snd_soc_dapm_widget *widget, struct snd_kcontrol *kctl, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm); struct sdca_entity *entity = widget->priv; - static const int polls = 100; - unsigned int reg, val; - int from, to, i; - int poll_us; + int from, to; int ret; if (!component) @@ -386,33 +448,17 @@ static int entity_pde_event(struct snd_soc_dapm_widget *widget, return 0; } - for (i = 0; i < entity->pde.num_max_delay; i++) { - struct sdca_pde_delay *delay = &entity->pde.max_delay[i]; - - if (delay->from_ps == from && delay->to_ps == to) { - poll_us = delay->us / polls; - break; - } - } - - reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(widget->reg), - SDW_SDCA_CTL_ENT(widget->reg), - SDCA_CTL_PDE_ACTUAL_PS, 0); - - for (i = 0; i < polls; i++) { - if (i) - fsleep(poll_us); - - ret = regmap_read(component->regmap, reg, &val); - if (ret) - return ret; - else if (val == to) - return 0; - } + ret = sdca_asoc_pde_poll_actual_ps(component->dev, component->regmap, + SDW_SDCA_CTL_FUNC(widget->reg), + SDW_SDCA_CTL_ENT(widget->reg), + from, to, + entity->pde.max_delay, + entity->pde.num_max_delay); + if (ret) + dev_err(component->dev, "%s: PDE transition %x -> %x failed, err=%d\n", + entity->label, from, to, ret); - dev_err(component->dev, "%s: power transition failed: %x\n", - entity->label, val); - return -ETIMEDOUT; + return ret; } static int entity_parse_pde(struct device *dev, diff --git a/sound/soc/sdca/sdca_class.c b/sound/soc/sdca/sdca_class.c index 6e9b66f7180191..a6a3da8de4371a 100644 --- a/sound/soc/sdca/sdca_class.c +++ b/sound/soc/sdca/sdca_class.c @@ -183,7 +183,6 @@ static void class_boot_work(struct work_struct *work) static int class_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *id) { struct device *dev = &sdw->dev; - struct sdca_device_data *data = &sdw->sdca_data; struct regmap_config *dev_config; struct sdca_class_drv *drv; int ret; @@ -199,12 +198,6 @@ static int class_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *id if (!dev_config) return -ENOMEM; - drv->functions = devm_kcalloc(dev, data->num_functions, - sizeof(*drv->functions), - GFP_KERNEL); - if (!drv->functions) - return -ENOMEM; - drv->dev = dev; drv->sdw = sdw; mutex_init(&drv->regmap_lock); diff --git a/sound/soc/sdca/sdca_class.h b/sound/soc/sdca/sdca_class.h index 6f24ea2bbd3817..8b63e62485e647 100644 --- a/sound/soc/sdca/sdca_class.h +++ b/sound/soc/sdca/sdca_class.h @@ -24,7 +24,6 @@ struct sdca_class_drv { struct regmap *dev_regmap; struct sdw_slave *sdw; - struct sdca_function_data *functions; struct sdca_interrupt_info *irq_info; struct mutex regmap_lock; diff --git a/sound/soc/sdca/sdca_class_function.c b/sound/soc/sdca/sdca_class_function.c index 31fc08d5130772..1496a15f7d2ac7 100644 --- a/sound/soc/sdca/sdca_class_function.c +++ b/sound/soc/sdca/sdca_class_function.c @@ -27,6 +27,7 @@ #include #include #include "sdca_class.h" +#include "sdca_function_device.h" struct class_function_drv { struct device *dev; @@ -294,8 +295,7 @@ static int class_function_probe(struct auxiliary_device *auxdev, { struct device *dev = &auxdev->dev; struct sdca_class_drv *core = dev_get_drvdata(dev->parent); - struct sdca_device_data *data = &core->sdw->sdca_data; - struct sdca_function_desc *desc; + struct sdca_dev *sdev = auxiliary_dev_to_sdca_dev(auxdev); struct snd_soc_component_driver *cmp_drv; struct snd_soc_dai_driver *dais; struct class_function_drv *drv; @@ -305,7 +305,6 @@ static int class_function_probe(struct auxiliary_device *auxdev, int ndefaults; int num_dais; int ret; - int i; drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) @@ -328,21 +327,9 @@ static int class_function_probe(struct auxiliary_device *auxdev, drv->dev = dev; drv->core = core; + drv->function = &sdev->function; - for (i = 0; i < data->num_functions; i++) { - desc = &data->function[i]; - - if (desc->type == aux_dev_id->driver_data) - break; - } - if (i == core->sdw->sdca_data.num_functions) { - dev_err(dev, "failed to locate function\n"); - return -EINVAL; - } - - drv->function = &core->functions[i]; - - ret = sdca_parse_function(dev, core->sdw, desc, drv->function); + ret = sdca_parse_function(dev, core->sdw, drv->function); if (ret) return ret; @@ -377,7 +364,7 @@ static int class_function_probe(struct auxiliary_device *auxdev, return dev_err_probe(dev, PTR_ERR(drv->regmap), "failed to create regmap"); - switch (desc->type) { + switch (drv->function->desc->type) { case SDCA_FUNCTION_TYPE_UAJ: case SDCA_FUNCTION_TYPE_RJ: cmp_drv->set_jack = class_function_set_jack; diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c index 196bade11ab5dd..77940bd6b33c95 100644 --- a/sound/soc/sdca/sdca_functions.c +++ b/sound/soc/sdca/sdca_functions.c @@ -98,7 +98,7 @@ static int find_sdca_function(struct acpi_device *adev, void *data) u32 function_type; int function_index; u64 addr; - int ret; + int i, ret; if (sdca_data->num_functions >= SDCA_MAX_FUNCTION_COUNT) { dev_err(dev, "maximum number of functions exceeded\n"); @@ -159,6 +159,14 @@ static int find_sdca_function(struct acpi_device *adev, void *data) /* store results */ function_index = sdca_data->num_functions; + + for (i = 0; i < function_index; i++) { + if (sdca_data->function[i].type == function_type) { + sdca_data->function[function_index].duplicate = true; + break; + } + } + sdca_data->function[function_index].adr = addr; sdca_data->function[function_index].type = function_type; sdca_data->function[function_index].name = function_name; @@ -1466,6 +1474,7 @@ static int find_sdca_entity_xu(struct device *dev, } static int find_sdca_entity(struct device *dev, struct sdw_slave *sdw, + struct sdca_function_data *function, struct fwnode_handle *function_node, struct fwnode_handle *entity_node, struct sdca_entity *entity) @@ -1481,6 +1490,13 @@ static int find_sdca_entity(struct device *dev, struct sdw_slave *sdw, return ret; } + if (function->desc->duplicate) { + entity->label = devm_kasprintf(dev, GFP_KERNEL, "%d %s", + function->desc->adr, entity->label); + if (!entity->label) + return -ENOMEM; + } + ret = fwnode_property_read_u32(entity_node, "mipi-sdca-entity-type", &tmp); if (ret) { dev_err(dev, "%s: type missing: %d\n", entity->label, ret); @@ -1578,7 +1594,7 @@ static int find_sdca_entities(struct device *dev, struct sdw_slave *sdw, return -EINVAL; } - ret = find_sdca_entity(dev, sdw, function_node, + ret = find_sdca_entity(dev, sdw, function, function_node, entity_node, &entities[i]); fwnode_handle_put(entity_node); if (ret) @@ -1605,8 +1621,14 @@ static struct sdca_entity *find_sdca_entity_by_label(struct sdca_function_data * const char *entity_label) { struct sdca_entity *entity = NULL; + char tmp[64]; int i; + if (function->desc->duplicate) { + snprintf(tmp, sizeof(tmp), "%d %s", function->desc->adr, entity_label); + entity_label = tmp; + } + for (i = 0; i < function->num_entities; i++) { entity = &function->entities[i]; @@ -2158,27 +2180,22 @@ static int find_sdca_filesets(struct device *dev, struct sdw_slave *sdw, * sdca_parse_function - parse ACPI DisCo for a Function * @dev: Pointer to device against which function data will be allocated. * @sdw: SoundWire slave device to be processed. - * @function_desc: Pointer to the Function short descriptor. * @function: Pointer to the Function information, to be populated. * * Return: Returns 0 for success. */ int sdca_parse_function(struct device *dev, struct sdw_slave *sdw, - struct sdca_function_desc *function_desc, struct sdca_function_data *function) { + struct fwnode_handle *node = function->desc->node; u32 tmp; int ret; - function->desc = function_desc; - - ret = fwnode_property_read_u32(function_desc->node, - "mipi-sdca-function-busy-max-delay", &tmp); + ret = fwnode_property_read_u32(node, "mipi-sdca-function-busy-max-delay", &tmp); if (!ret) function->busy_max_delay = tmp; - ret = fwnode_property_read_u32(function_desc->node, - "mipi-sdca-function-reset-max-delay", &tmp); + ret = fwnode_property_read_u32(node, "mipi-sdca-function-reset-max-delay", &tmp); if (ret || tmp == 0) { dev_dbg(dev, "reset delay missing, defaulting to 100mS\n"); function->reset_max_delay = 100000; @@ -2187,26 +2204,26 @@ int sdca_parse_function(struct device *dev, struct sdw_slave *sdw, } dev_dbg(dev, "%pfwP: name %s busy delay %dus reset delay %dus\n", - function->desc->node, function->desc->name, - function->busy_max_delay, function->reset_max_delay); + node, function->desc->name, function->busy_max_delay, + function->reset_max_delay); - ret = find_sdca_init_table(dev, function_desc->node, function); + ret = find_sdca_init_table(dev, node, function); if (ret) return ret; - ret = find_sdca_entities(dev, sdw, function_desc->node, function); + ret = find_sdca_entities(dev, sdw, node, function); if (ret) return ret; - ret = find_sdca_connections(dev, function_desc->node, function); + ret = find_sdca_connections(dev, node, function); if (ret) return ret; - ret = find_sdca_clusters(dev, function_desc->node, function); + ret = find_sdca_clusters(dev, node, function); if (ret < 0) return ret; - ret = find_sdca_filesets(dev, sdw, function_desc->node, function); + ret = find_sdca_filesets(dev, sdw, node, function); if (ret) return ret; diff --git a/sound/soc/sdca/sdca_interrupts.c b/sound/soc/sdca/sdca_interrupts.c index 6e10b4e660d96b..4539a52a8e32ba 100644 --- a/sound/soc/sdca/sdca_interrupts.c +++ b/sound/soc/sdca/sdca_interrupts.c @@ -375,8 +375,7 @@ int sdca_irq_data_populate(struct device *dev, struct regmap *regmap, if (!dev) return -ENODEV; - name = kasprintf(GFP_KERNEL, "%s %s %s", function->desc->name, - entity->label, control->label); + name = kasprintf(GFP_KERNEL, "%s %s", entity->label, control->label); if (!name) return -ENOMEM; diff --git a/sound/soc/sdca/sdca_jack.c b/sound/soc/sdca/sdca_jack.c index 49d317d3b8c85f..ae9636622a8402 100644 --- a/sound/soc/sdca/sdca_jack.c +++ b/sound/soc/sdca/sdca_jack.c @@ -145,6 +145,32 @@ int sdca_jack_alloc_state(struct sdca_interrupt *interrupt) } EXPORT_SYMBOL_NS_GPL(sdca_jack_alloc_state, "SND_SOC_SDCA"); +static int type_get_mask(enum sdca_terminal_type type) +{ + switch (type) { + case SDCA_TERM_TYPE_LINEIN_STEREO: + case SDCA_TERM_TYPE_LINEIN_FRONT_LR: + case SDCA_TERM_TYPE_LINEIN_CENTER_LFE: + case SDCA_TERM_TYPE_LINEIN_SURROUND_LR: + case SDCA_TERM_TYPE_LINEIN_REAR_LR: + return SND_JACK_LINEIN; + case SDCA_TERM_TYPE_LINEOUT_STEREO: + case SDCA_TERM_TYPE_LINEOUT_FRONT_LR: + case SDCA_TERM_TYPE_LINEOUT_CENTER_LFE: + case SDCA_TERM_TYPE_LINEOUT_SURROUND_LR: + case SDCA_TERM_TYPE_LINEOUT_REAR_LR: + return SND_JACK_LINEOUT; + case SDCA_TERM_TYPE_MIC_JACK: + return SND_JACK_MICROPHONE; + case SDCA_TERM_TYPE_HEADPHONE_JACK: + return SND_JACK_HEADPHONE; + case SDCA_TERM_TYPE_HEADSET_JACK: + return SND_JACK_HEADSET; + default: + return 0; + } +} + /** * sdca_jack_set_jack - attach an ASoC jack to SDCA * @info: SDCA interrupt information. @@ -154,7 +180,8 @@ EXPORT_SYMBOL_NS_GPL(sdca_jack_alloc_state, "SND_SOC_SDCA"); */ int sdca_jack_set_jack(struct sdca_interrupt_info *info, struct snd_soc_jack *jack) { - int i, ret; + int i, j; + int ret; guard(mutex)(&info->irq_lock); @@ -162,6 +189,7 @@ int sdca_jack_set_jack(struct sdca_interrupt_info *info, struct snd_soc_jack *ja struct sdca_interrupt *interrupt = &info->irqs[i]; struct sdca_control *control = interrupt->control; struct sdca_entity *entity = interrupt->entity; + struct sdca_control_range *range; struct jack_state *jack_state; if (!interrupt->irq) @@ -169,7 +197,22 @@ int sdca_jack_set_jack(struct sdca_interrupt_info *info, struct snd_soc_jack *ja switch (SDCA_CTL_TYPE(entity->type, control->sel)) { case SDCA_CTL_TYPE_S(GE, DETECTED_MODE): + range = sdca_selector_find_range(interrupt->dev, entity, + SDCA_CTL_GE_SELECTED_MODE, + SDCA_SELECTED_MODE_NCOLS, 0); + if (!range) + return -EINVAL; + jack_state = interrupt->priv; + + for (j = 0; j < range->rows; j++) { + enum sdca_terminal_type type; + + type = sdca_range(range, SDCA_SELECTED_MODE_TERM_TYPE, j); + + jack_state->mask |= type_get_mask(type); + } + jack_state->jack = jack; /* Report initial state in case IRQ was already handled */ @@ -191,7 +234,6 @@ int sdca_jack_report(struct sdca_interrupt *interrupt) struct jack_state *jack_state = interrupt->priv; struct sdca_control_range *range; enum sdca_terminal_type type; - unsigned int report = 0; unsigned int reg, val; int ret; @@ -213,35 +255,7 @@ int sdca_jack_report(struct sdca_interrupt *interrupt) type = sdca_range_search(range, SDCA_SELECTED_MODE_INDEX, val, SDCA_SELECTED_MODE_TERM_TYPE); - switch (type) { - case SDCA_TERM_TYPE_LINEIN_STEREO: - case SDCA_TERM_TYPE_LINEIN_FRONT_LR: - case SDCA_TERM_TYPE_LINEIN_CENTER_LFE: - case SDCA_TERM_TYPE_LINEIN_SURROUND_LR: - case SDCA_TERM_TYPE_LINEIN_REAR_LR: - report = SND_JACK_LINEIN; - break; - case SDCA_TERM_TYPE_LINEOUT_STEREO: - case SDCA_TERM_TYPE_LINEOUT_FRONT_LR: - case SDCA_TERM_TYPE_LINEOUT_CENTER_LFE: - case SDCA_TERM_TYPE_LINEOUT_SURROUND_LR: - case SDCA_TERM_TYPE_LINEOUT_REAR_LR: - report = SND_JACK_LINEOUT; - break; - case SDCA_TERM_TYPE_MIC_JACK: - report = SND_JACK_MICROPHONE; - break; - case SDCA_TERM_TYPE_HEADPHONE_JACK: - report = SND_JACK_HEADPHONE; - break; - case SDCA_TERM_TYPE_HEADSET_JACK: - report = SND_JACK_HEADSET; - break; - default: - break; - } - - snd_soc_jack_report(jack_state->jack, report, 0xFFFF); + snd_soc_jack_report(jack_state->jack, type_get_mask(type), jack_state->mask); return 0; } diff --git a/sound/soc/sdw_utils/soc_sdw_ti_amp.c b/sound/soc/sdw_utils/soc_sdw_ti_amp.c index 488ef2ef45d4f1..f156116fbeb652 100644 --- a/sound/soc/sdw_utils/soc_sdw_ti_amp.c +++ b/sound/soc/sdw_utils/soc_sdw_ti_amp.c @@ -7,12 +7,15 @@ #include #include -#include +#include +#include #include #include +#include #include #define TIAMP_SPK_VOLUME_0DB 200 +#define TAC5XX2_WIDGET_NAME_MAX 32 int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card, const char *name_prefix) @@ -95,3 +98,138 @@ int asoc_sdw_ti_amp_init(struct snd_soc_card *card, return 0; } EXPORT_SYMBOL_NS(asoc_sdw_ti_amp_init, "SND_SOC_SDW_UTILS"); + +static int asoc_sdw_ti_add_tac5xx2_routes(struct snd_soc_dapm_context *dapm, + const char *name_prefix) +{ + struct snd_soc_dapm_route routes[2]; + char left_widget[TAC5XX2_WIDGET_NAME_MAX]; + char right_widget[TAC5XX2_WIDGET_NAME_MAX]; + int ret; + + if (strlen(name_prefix) > (TAC5XX2_WIDGET_NAME_MAX - 7)) + return -ENAMETOOLONG; + + ret = scnprintf(left_widget, sizeof(left_widget), "%s SPK_L", name_prefix); + if (ret <= 0) + return -EINVAL; + + ret = scnprintf(right_widget, sizeof(right_widget), "%s SPK_R", name_prefix); + if (ret <= 0) + return -EINVAL; + + routes[0] = (struct snd_soc_dapm_route){"Left Spk", NULL, left_widget}; + routes[1] = (struct snd_soc_dapm_route){"Right Spk", NULL, right_widget}; + + return snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes)); +} + +int asoc_sdw_ti_tac5xx2_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_dai *dai) +{ + struct snd_soc_card *card = rtd->card; + struct snd_soc_dapm_context *dapm = snd_soc_card_to_dapm(card); + int ret, i; + struct snd_soc_dai *codec_dai; + const char *prefix; + + for_each_rtd_codec_dais(rtd, i, codec_dai) { + if (!strstr(codec_dai->name, "tac5") && + !strstr(codec_dai->name, "tas2883")) + continue; + + prefix = codec_dai->component->name_prefix; + if (!prefix) { + dev_warn(card->dev, + "No name prefix found for codec DAI: %s\n", + codec_dai->name); + continue; + } + ret = asoc_sdw_ti_add_tac5xx2_routes(dapm, prefix); + if (ret) { + dev_err(card->dev, "Failed to add routes for %s: %d\n", + prefix, ret); + return ret; + } + } + + dev_dbg(card->dev, "Added TAC5XX2 speaker routes\n"); + + return 0; +} +EXPORT_SYMBOL_NS(asoc_sdw_ti_tac5xx2_spk_rtd_init, "SND_SOC_SDW_UTILS"); + +int asoc_sdw_ti_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai) +{ + struct snd_soc_card *card = rtd->card; + struct snd_soc_component *component; + + component = dai->component; + + card->components = devm_kasprintf(card->dev, GFP_KERNEL, + "%s mic:%s", card->components, + component->name_prefix); + if (!card->components) + return -ENOMEM; + + dev_dbg(card->dev, "card->components: %s\n", card->components); + + return 0; +} +EXPORT_SYMBOL_NS(asoc_sdw_ti_dmic_rtd_init, "SND_SOC_SDW_UTILS"); + +static struct snd_soc_jack_pin ti_sdca_jack_pins[] = { + { + .pin = "Headphone", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Headset Mic", + .mask = SND_JACK_MICROPHONE, + }, +}; + +int asoc_sdw_ti_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai) +{ + struct snd_soc_card *card = rtd->card; + struct asoc_sdw_mc_private *ctx = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component; + struct snd_soc_jack *jack; + int ret; + + component = dai->component; + + card->components = devm_kasprintf(card->dev, GFP_KERNEL, + "%s hs:%s", card->components, + component->name_prefix); + if (!card->components) + return -ENOMEM; + + ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack", + SND_JACK_HEADSET | SND_JACK_BTN_0 | + SND_JACK_BTN_1 | SND_JACK_BTN_2 | + SND_JACK_BTN_3 | SND_JACK_BTN_4, + &ctx->sdw_headset, + ti_sdca_jack_pins, + ARRAY_SIZE(ti_sdca_jack_pins)); + if (ret) { + dev_err(rtd->card->dev, "Jack create failed%d\n", ret); + return ret; + } + + jack = &ctx->sdw_headset; + + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); + snd_jack_set_key(jack->jack, SND_JACK_BTN_4, KEY_NEXTSONG); + + ret = snd_soc_component_set_jack(component, jack, NULL); + if (ret) + dev_err(rtd->card->dev, "Headset Jack call-back failed: %d\n", + ret); + + return ret; +} +EXPORT_SYMBOL_NS(asoc_sdw_ti_sdca_jack_rtd_init, "SND_SOC_SDW_UTILS"); diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c index baf4c1249a66e6..d8378d1b435e0f 100644 --- a/sound/soc/sdw_utils/soc_sdw_utils.c +++ b/sound/soc/sdw_utils/soc_sdw_utils.c @@ -72,6 +72,157 @@ static const struct snd_kcontrol_new rt700_controls[] = { }; struct asoc_sdw_codec_info codec_info_list[] = { + { + .vendor_id = 0x0102, + .part_id = 0x5572, + .name_prefix = "tac5572", + .dais = { + { + /* speaker */ + .direction = {true, false}, + .dai_name = "tac5xx2-aif1", + .dai_type = SOC_SDW_DAI_TYPE_AMP, + .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID}, + .init = asoc_sdw_ti_amp_init, + .rtd_init = asoc_sdw_ti_tac5xx2_spk_rtd_init, + .controls = lr_spk_controls, + .num_controls = ARRAY_SIZE(lr_spk_controls), + .widgets = lr_spk_widgets, + .num_widgets = ARRAY_SIZE(lr_spk_widgets), + }, + { + /* mic */ + .direction = {false, true}, + .dai_name = "tac5xx2-aif2", + .dai_type = SOC_SDW_DAI_TYPE_MIC, + .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_DMIC_DAI_ID}, + .rtd_init = asoc_sdw_ti_dmic_rtd_init, + }, + { + /* UAJ */ + .direction = {true, true}, + .dai_name = "tac5xx2-aif3", + .dai_type = SOC_SDW_DAI_TYPE_JACK, + .dailink = {SOC_SDW_JACK_OUT_DAI_ID, SOC_SDW_JACK_IN_DAI_ID}, + .controls = generic_jack_controls, + .num_controls = ARRAY_SIZE(generic_jack_controls), + .widgets = generic_jack_widgets, + .num_widgets = ARRAY_SIZE(generic_jack_widgets), + .rtd_init = asoc_sdw_ti_sdca_jack_rtd_init, + }, + }, + .dai_num = 3, + }, + { + .vendor_id = 0x0102, + .part_id = 0x5672, + .name_prefix = "tac5672", + .dais = { + { + /* speaker with IV sense feedback */ + .direction = {true, true}, + .dai_name = "tac5xx2-aif1", + .dai_type = SOC_SDW_DAI_TYPE_AMP, + .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID}, + .init = asoc_sdw_ti_amp_init, + .rtd_init = asoc_sdw_ti_tac5xx2_spk_rtd_init, + .controls = lr_spk_controls, + .num_controls = ARRAY_SIZE(lr_spk_controls), + .widgets = lr_spk_widgets, + .num_widgets = ARRAY_SIZE(lr_spk_widgets), + }, + { + /* mic */ + .direction = {false, true}, + .dai_name = "tac5xx2-aif2", + .dai_type = SOC_SDW_DAI_TYPE_MIC, + .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_DMIC_DAI_ID}, + .rtd_init = asoc_sdw_ti_dmic_rtd_init, + }, + { + /* UAJ */ + .direction = {true, true}, + .dai_name = "tac5xx2-aif3", + .dai_type = SOC_SDW_DAI_TYPE_JACK, + .dailink = {SOC_SDW_JACK_OUT_DAI_ID, SOC_SDW_JACK_IN_DAI_ID}, + .controls = generic_jack_controls, + .num_controls = ARRAY_SIZE(generic_jack_controls), + .widgets = generic_jack_widgets, + .num_widgets = ARRAY_SIZE(generic_jack_widgets), + .rtd_init = asoc_sdw_ti_sdca_jack_rtd_init, + }, + }, + .dai_num = 3, + }, + { + .vendor_id = 0x0102, + .part_id = 0x5682, + .name_prefix = "tac5682", + .dais = { + { + /* speaker with echo reference feedback */ + .direction = {true, true}, + .dai_name = "tac5xx2-aif1", + .dai_type = SOC_SDW_DAI_TYPE_AMP, + .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID}, + .init = asoc_sdw_ti_amp_init, + .rtd_init = asoc_sdw_ti_tac5xx2_spk_rtd_init, + .controls = lr_spk_controls, + .num_controls = ARRAY_SIZE(lr_spk_controls), + .widgets = lr_spk_widgets, + .num_widgets = ARRAY_SIZE(lr_spk_widgets), + }, + { + /* mic */ + .direction = {false, true}, + .dai_name = "tac5xx2-aif2", + .dai_type = SOC_SDW_DAI_TYPE_MIC, + .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_DMIC_DAI_ID}, + .rtd_init = asoc_sdw_ti_dmic_rtd_init, + }, + { + /* UAJ */ + .direction = {true, true}, + .dai_name = "tac5xx2-aif3", + .dai_type = SOC_SDW_DAI_TYPE_JACK, + .dailink = {SOC_SDW_JACK_OUT_DAI_ID, SOC_SDW_JACK_IN_DAI_ID}, + .controls = generic_jack_controls, + .num_controls = ARRAY_SIZE(generic_jack_controls), + .widgets = generic_jack_widgets, + .num_widgets = ARRAY_SIZE(generic_jack_widgets), + .rtd_init = asoc_sdw_ti_sdca_jack_rtd_init, + }, + }, + .dai_num = 3, + }, + { + .vendor_id = 0x0102, + .part_id = 0x2883, + .name_prefix = "tas2883", + .dais = { + { + .direction = {true, false}, + .dai_name = "tac5xx2-aif1", + .dai_type = SOC_SDW_DAI_TYPE_AMP, + .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID}, + .init = asoc_sdw_ti_amp_init, + .rtd_init = asoc_sdw_ti_tac5xx2_spk_rtd_init, + .controls = lr_spk_controls, + .num_controls = ARRAY_SIZE(lr_spk_controls), + .widgets = lr_spk_widgets, + .num_widgets = ARRAY_SIZE(lr_spk_widgets), + }, + { + /* mic */ + .direction = {false, true}, + .dai_name = "tac5xx2-aif2", + .dai_type = SOC_SDW_DAI_TYPE_MIC, + .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_DMIC_DAI_ID}, + .rtd_init = asoc_sdw_ti_dmic_rtd_init, + }, + }, + .dai_num = 2, + }, { .vendor_id = 0x0102, .part_id = 0x0000, /* TAS2783A */ diff --git a/sound/soc/spacemit/k1_i2s.c b/sound/soc/spacemit/k1_i2s.c index 1cb99f1abc7cde..5420ca2aefbd87 100644 --- a/sound/soc/spacemit/k1_i2s.c +++ b/sound/soc/spacemit/k1_i2s.c @@ -93,8 +93,8 @@ static void spacemit_i2s_init(struct spacemit_i2s_dev *i2s) u32 sscr_val, sspsp_val, ssfcr_val, ssrwt_val; sscr_val = SSCR_TRAIL | SSCR_FRF_PSP; - ssfcr_val = FIELD_PREP(SSFCR_FIELD_TFT, 5) | - FIELD_PREP(SSFCR_FIELD_RFT, 5) | + ssfcr_val = FIELD_PREP(SSFCR_FIELD_TFT, 0xF) | + FIELD_PREP(SSFCR_FIELD_RFT, 0xF) | SSFCR_RSRE | SSFCR_TSRE; ssrwt_val = SSRWT_RWOT; sspsp_val = SSPSP_SFRMP; @@ -106,6 +106,37 @@ static void spacemit_i2s_init(struct spacemit_i2s_dev *i2s) writel(0, i2s->base + SSINTEN); } +static int spacemit_i2s_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct spacemit_i2s_dev *i2s = snd_soc_dai_get_drvdata(dai); + + switch (i2s->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_CHANNELS, + 2, 2); + snd_pcm_hw_constraint_mask64(substream->runtime, + SNDRV_PCM_HW_PARAM_FORMAT, + SNDRV_PCM_FMTBIT_S16_LE); + break; + case SND_SOC_DAIFMT_DSP_A: + case SND_SOC_DAIFMT_DSP_B: + snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_CHANNELS, + 1, 1); + snd_pcm_hw_constraint_mask64(substream->runtime, + SNDRV_PCM_HW_PARAM_FORMAT, + SNDRV_PCM_FMTBIT_S32_LE); + break; + default: + dev_dbg(i2s->dev, "unexpected format type"); + return -EINVAL; + } + + return 0; +} + static int spacemit_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) @@ -117,10 +148,6 @@ static int spacemit_i2s_hw_params(struct snd_pcm_substream *substream, u32 val; int ret; - val = readl(i2s->base + SSCR); - if (val & SSCR_SSE) - return 0; - dma_data = &i2s->playback_dma_data; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) @@ -157,22 +184,9 @@ static int spacemit_i2s_hw_params(struct snd_pcm_substream *substream, dma_data->maxburst = 32; dma_data->addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; } - - snd_pcm_hw_constraint_minmax(substream->runtime, - SNDRV_PCM_HW_PARAM_CHANNELS, - 1, 2); - snd_pcm_hw_constraint_mask64(substream->runtime, - SNDRV_PCM_HW_PARAM_FORMAT, - SNDRV_PCM_FMTBIT_S16_LE); break; case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: - snd_pcm_hw_constraint_minmax(substream->runtime, - SNDRV_PCM_HW_PARAM_CHANNELS, - 1, 1); - snd_pcm_hw_constraint_mask64(substream->runtime, - SNDRV_PCM_HW_PARAM_FORMAT, - SNDRV_PCM_FMTBIT_S32_LE); break; default: dev_dbg(i2s->dev, "unexpected format type"); @@ -181,6 +195,9 @@ static int spacemit_i2s_hw_params(struct snd_pcm_substream *substream, } val = readl(i2s->base + SSCR); + if (val & SSCR_SSE) + return 0; + val &= ~SSCR_DW_32BYTE; val |= data_width; writel(val, i2s->base + SSCR); @@ -303,6 +320,7 @@ static int spacemit_i2s_dai_remove(struct snd_soc_dai *dai) static const struct snd_soc_dai_ops spacemit_i2s_dai_ops = { .probe = spacemit_i2s_dai_probe, .remove = spacemit_i2s_dai_remove, + .startup = spacemit_i2s_startup, .hw_params = spacemit_i2s_hw_params, .set_sysclk = spacemit_i2s_set_sysclk, .set_fmt = spacemit_i2s_set_fmt, diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c index 08c58e8f3c222d..0df1fc64f532f0 100644 --- a/sound/soc/tegra/tegra20_ac97.c +++ b/sound/soc/tegra/tegra20_ac97.c @@ -441,6 +441,7 @@ static const struct of_device_id tegra20_ac97_of_match[] = { { .compatible = "nvidia,tegra20-ac97", }, {}, }; +MODULE_DEVICE_TABLE(of, tegra20_ac97_of_match); static struct platform_driver tegra20_ac97_driver = { .driver = { @@ -456,4 +457,3 @@ MODULE_AUTHOR("Lucas Stach"); MODULE_DESCRIPTION("Tegra20 AC97 ASoC driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra20_ac97_of_match); diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c index c620ab0c601fae..b48cc4a6967b82 100644 --- a/sound/soc/tegra/tegra20_das.c +++ b/sound/soc/tegra/tegra20_das.c @@ -188,6 +188,7 @@ static const struct of_device_id tegra20_das_of_match[] = { { .compatible = "nvidia,tegra20-das", }, {}, }; +MODULE_DEVICE_TABLE(of, tegra20_das_of_match); static struct platform_driver tegra20_das_driver = { .probe = tegra20_das_probe, @@ -202,4 +203,3 @@ MODULE_AUTHOR("Stephen Warren "); MODULE_DESCRIPTION("Tegra20 DAS driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra20_das_of_match); diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c index 51df0835ce3eca..d9ab210ad69a9c 100644 --- a/sound/soc/tegra/tegra20_i2s.c +++ b/sound/soc/tegra/tegra20_i2s.c @@ -485,6 +485,7 @@ static const struct of_device_id tegra20_i2s_of_match[] = { { .compatible = "nvidia,tegra20-i2s", }, {}, }; +MODULE_DEVICE_TABLE(of, tegra20_i2s_of_match); static const struct dev_pm_ops tegra20_i2s_pm_ops = { RUNTIME_PM_OPS(tegra20_i2s_runtime_suspend, @@ -507,4 +508,3 @@ MODULE_AUTHOR("Stephen Warren "); MODULE_DESCRIPTION("Tegra20 I2S ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra20_i2s_of_match); diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c index a1c2757a39320c..7299c6bfcf1581 100644 --- a/sound/soc/tegra/tegra210_admaif.c +++ b/sound/soc/tegra/tegra210_admaif.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include "tegra_isomgr_bw.h" @@ -912,35 +913,26 @@ MODULE_DEVICE_TABLE(of, tegra_admaif_of_match); static int tegra_admaif_probe(struct platform_device *pdev) { + const struct tegra_admaif_soc_data *soc_data; struct tegra_admaif *admaif; void __iomem *regs; struct resource *res; + size_t alloc_size; int err, i; - admaif = devm_kzalloc(&pdev->dev, sizeof(*admaif), GFP_KERNEL); + soc_data = of_device_get_match_data(&pdev->dev); + + alloc_size = struct_size(admaif, capture_dma_data, soc_data->num_ch); + alloc_size += sizeof(*admaif->playback_dma_data) * soc_data->num_ch; + admaif = devm_kzalloc(&pdev->dev, alloc_size, GFP_KERNEL); if (!admaif) return -ENOMEM; - admaif->soc_data = of_device_get_match_data(&pdev->dev); + admaif->playback_dma_data = admaif->capture_dma_data + soc_data->num_ch; + admaif->soc_data = soc_data; dev_set_drvdata(&pdev->dev, admaif); - admaif->capture_dma_data = - devm_kcalloc(&pdev->dev, - admaif->soc_data->num_ch, - sizeof(struct snd_dmaengine_dai_dma_data), - GFP_KERNEL); - if (!admaif->capture_dma_data) - return -ENOMEM; - - admaif->playback_dma_data = - devm_kcalloc(&pdev->dev, - admaif->soc_data->num_ch, - sizeof(struct snd_dmaengine_dai_dma_data), - GFP_KERNEL); - if (!admaif->playback_dma_data) - return -ENOMEM; - for (i = 0; i < ADMAIF_PATHS; i++) { admaif->mono_to_stereo[i] = devm_kcalloc(&pdev->dev, admaif->soc_data->num_ch, diff --git a/sound/soc/tegra/tegra210_admaif.h b/sound/soc/tegra/tegra210_admaif.h index 304d45c76a9af0..fd9877aa95d386 100644 --- a/sound/soc/tegra/tegra210_admaif.h +++ b/sound/soc/tegra/tegra210_admaif.h @@ -229,13 +229,13 @@ struct tegra_admaif_soc_data { }; struct tegra_admaif { - struct snd_dmaengine_dai_dma_data *capture_dma_data; struct snd_dmaengine_dai_dma_data *playback_dma_data; const struct tegra_admaif_soc_data *soc_data; unsigned int *mono_to_stereo[ADMAIF_PATHS]; unsigned int *stereo_to_mono[ADMAIF_PATHS]; struct regmap *regmap; struct tegra_adma_isomgr *adma_isomgr; + struct snd_dmaengine_dai_dma_data capture_dma_data[]; }; #endif diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c index ce44117a0b9cf1..f05617b5f43356 100644 --- a/sound/soc/tegra/tegra210_mixer.c +++ b/sound/soc/tegra/tegra210_mixer.c @@ -151,10 +151,17 @@ static int tegra210_mixer_configure_gain(struct snd_soc_component *cmpnt, for (i = 0; i < NUM_DURATION_PARMS; i++) { int val; - if (instant_gain) + if (instant_gain) { val = 1; - else - val = gain_params.duration[i]; + } else { + if (i == DURATION_N3_ID) + val = mixer->duration[id]; + else if (i == DURATION_INV_N3_ID) + val = (u32)(BIT_ULL(31 + TEGRA210_MIXER_PRESCALAR) / + mixer->duration[id]); + else + val = gain_params.duration[i]; + } err = tegra210_mixer_write_ram(mixer, REG_DURATION_PARAM(reg, i), @@ -173,6 +180,204 @@ static int tegra210_mixer_configure_gain(struct snd_soc_component *cmpnt, return err; } +static int tegra210_mixer_get_fade_duration(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.integer.value[0] = mixer->duration[mc->reg]; + + return 0; +} + +static int tegra210_mixer_put_fade_duration(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + unsigned int id = mc->reg; + u32 duration = ucontrol->value.integer.value[0]; + + if (duration == 0 || duration > TEGRA210_MIXER_FADE_DURATION_MAX) + return -EINVAL; + + if (mixer->duration[id] == duration) + return 0; + + mixer->duration[id] = duration; + mixer->fade_pending[id] = true; + + return 1; +} + +static int tegra210_mixer_get_fade_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.integer.value[0] = mixer->fade_gain[mc->reg]; + + return 0; +} + +static int tegra210_mixer_put_fade_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + unsigned int id = mc->reg; + + if (ucontrol->value.integer.value[0] < 0 || + ucontrol->value.integer.value[0] > TEGRA210_MIXER_GAIN_MAX) + return -EINVAL; + + if (mixer->fade_gain[id] == ucontrol->value.integer.value[0]) + return 0; + + mixer->fade_gain[id] = ucontrol->value.integer.value[0]; + mixer->fade_pending[id] = true; + + return 1; +} + +static int tegra210_mixer_get_fade_switch(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + ucontrol->value.integer.value[0] = 0; + + return 0; +} + +static int tegra210_mixer_put_fade_switch(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + int id, err, changed = 0; + + err = pm_runtime_resume_and_get(cmpnt->dev); + if (err < 0) + return err; + + /* Switch off: disable sample count for all active fades */ + if (!ucontrol->value.integer.value[0]) { + for (id = 0; id < TEGRA210_MIXER_RX_MAX; id++) { + if (!mixer->in_fade[id]) + continue; + + regmap_update_bits(mixer->regmap, + MIXER_REG(TEGRA210_MIXER_RX1_CTRL, + id), + TEGRA210_MIXER_SAMPLE_COUNT_ENABLE, + 0); + mixer->in_fade[id] = false; + changed = 1; + } + + pm_runtime_put(cmpnt->dev); + return changed; + } + + /* Stop active fades on pending streams before reconfiguring */ + for (id = 0; id < TEGRA210_MIXER_RX_MAX; id++) { + if (!mixer->fade_pending[id]) + continue; + + if (mixer->in_fade[id]) { + regmap_update_bits(mixer->regmap, + MIXER_REG(TEGRA210_MIXER_RX1_CTRL, + id), + TEGRA210_MIXER_SAMPLE_COUNT_ENABLE, + 0); + mixer->in_fade[id] = false; + } + + mixer->gain_value[id] = mixer->fade_gain[id]; + err = tegra210_mixer_configure_gain(cmpnt, id, false); + if (err) { + dev_err(cmpnt->dev, + "Failed to configure fade for RX%d\n", id + 1); + pm_runtime_put(cmpnt->dev); + return err; + } + + changed = 1; + } + + if (!changed) { + pm_runtime_put(cmpnt->dev); + return 0; + } + + /* Enable sample count for all pending streams */ + for (id = 0; id < TEGRA210_MIXER_RX_MAX; id++) { + if (!mixer->fade_pending[id]) + continue; + + err = regmap_update_bits(mixer->regmap, + MIXER_REG(TEGRA210_MIXER_RX1_CTRL, id), + TEGRA210_MIXER_SAMPLE_COUNT_ENABLE, + TEGRA210_MIXER_SAMPLE_COUNT_ENABLE); + if (err) { + dev_err(cmpnt->dev, + "Failed to enable sample count for RX%d\n", + id + 1); + pm_runtime_put(cmpnt->dev); + return err; + } + + mixer->in_fade[id] = true; + mixer->fade_pending[id] = false; + } + + pm_runtime_put(cmpnt->dev); + + return 1; +} + +static int tegra210_mixer_get_fade_status(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_kcontrol_chip(kcontrol); + struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); + u32 count; + int id, err; + + err = pm_runtime_resume_and_get(cmpnt->dev); + if (err < 0) + return err; + + for (id = 0; id < TEGRA210_MIXER_RX_MAX; id++) { + if (!mixer->in_fade[id]) { + ucontrol->value.integer.value[id] = TEGRA210_MIXER_FADE_IDLE; + continue; + } + + regmap_read(mixer->regmap, + MIXER_REG(TEGRA210_MIXER_RX1_SAMPLE_COUNT, id), + &count); + + if (count >= mixer->duration[id]) + ucontrol->value.integer.value[id] = TEGRA210_MIXER_FADE_IDLE; + else + ucontrol->value.integer.value[id] = TEGRA210_MIXER_FADE_ACTIVE; + } + + pm_runtime_put(cmpnt->dev); + + return 0; +} + static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -396,14 +601,37 @@ ADDER_CTRL_DECL(adder3, TEGRA210_MIXER_TX3_ADDER_CONFIG); ADDER_CTRL_DECL(adder4, TEGRA210_MIXER_TX4_ADDER_CONFIG); ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG); +static int tegra210_mixer_fade_status_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; + uinfo->count = TEGRA210_MIXER_RX_MAX; + uinfo->value.integer.min = TEGRA210_MIXER_FADE_IDLE; + uinfo->value.integer.max = TEGRA210_MIXER_FADE_ACTIVE; + + return 0; +} + +#define FADE_CTRL(id) \ + SOC_SINGLE_EXT("RX" #id " Fade Duration", (id) - 1, 0, \ + TEGRA210_MIXER_FADE_DURATION_MAX, 0, \ + tegra210_mixer_get_fade_duration, \ + tegra210_mixer_put_fade_duration), \ + SOC_SINGLE_EXT("RX" #id " Fade Gain", (id) - 1, 0, \ + TEGRA210_MIXER_GAIN_MAX, 0, \ + tegra210_mixer_get_fade_gain, \ + tegra210_mixer_put_fade_gain), + #define GAIN_CTRL(id) \ SOC_SINGLE_EXT("RX" #id " Gain Volume", \ MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \ - 0x20000, 0, tegra210_mixer_get_gain, \ + TEGRA210_MIXER_GAIN_MAX, 0, \ + tegra210_mixer_get_gain, \ tegra210_mixer_put_gain), \ SOC_SINGLE_EXT("RX" #id " Instant Gain Volume", \ MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \ - 0x20000, 0, tegra210_mixer_get_gain, \ + TEGRA210_MIXER_GAIN_MAX, 0, \ + tegra210_mixer_get_gain, \ tegra210_mixer_put_instant_gain), /* Volume controls for all MIXER inputs */ @@ -418,6 +646,28 @@ static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = { GAIN_CTRL(8) GAIN_CTRL(9) GAIN_CTRL(10) + + FADE_CTRL(1) + FADE_CTRL(2) + FADE_CTRL(3) + FADE_CTRL(4) + FADE_CTRL(5) + FADE_CTRL(6) + FADE_CTRL(7) + FADE_CTRL(8) + FADE_CTRL(9) + FADE_CTRL(10) + SOC_SINGLE_EXT("Fade Switch", SND_SOC_NOPM, 0, 1, 0, + tegra210_mixer_get_fade_switch, + tegra210_mixer_put_fade_switch), + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "Fade Status", + .info = tegra210_mixer_fade_status_info, + .access = SNDRV_CTL_ELEM_ACCESS_READ | + SNDRV_CTL_ELEM_ACCESS_VOLATILE, + .get = tegra210_mixer_get_fade_status, + }, }; static const struct snd_soc_dapm_widget tegra210_mixer_widgets[] = { @@ -579,6 +829,7 @@ static bool tegra210_mixer_volatile_reg(struct device *dev, case TEGRA210_MIXER_GAIN_CFG_RAM_DATA: case TEGRA210_MIXER_PEAKM_RAM_CTRL: case TEGRA210_MIXER_PEAKM_RAM_DATA: + case TEGRA210_MIXER_RX1_SAMPLE_COUNT: return true; default: return false; @@ -632,8 +883,11 @@ static int tegra210_mixer_platform_probe(struct platform_device *pdev) dev_set_drvdata(dev, mixer); /* Use default gain value for all MIXER inputs */ - for (i = 0; i < TEGRA210_MIXER_RX_MAX; i++) + for (i = 0; i < TEGRA210_MIXER_RX_MAX; i++) { mixer->gain_value[i] = gain_params.gain_value; + mixer->fade_gain[i] = gain_params.gain_value; + mixer->duration[i] = gain_params.duration[DURATION_N3_ID]; + } regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) diff --git a/sound/soc/tegra/tegra210_mixer.h b/sound/soc/tegra/tegra210_mixer.h index a330530fbc613c..bcbad08cbb9d32 100644 --- a/sound/soc/tegra/tegra210_mixer.h +++ b/sound/soc/tegra/tegra210_mixer.h @@ -79,12 +79,25 @@ #define TEGRA210_MIXER_RX_LIMIT (TEGRA210_MIXER_RX_MAX * TEGRA210_MIXER_REG_STRIDE) #define TEGRA210_MIXER_TX_MAX 5 #define TEGRA210_MIXER_TX_LIMIT (TEGRA210_MIXER_RX_LIMIT + (TEGRA210_MIXER_TX_MAX * TEGRA210_MIXER_REG_STRIDE)) +#define TEGRA210_MIXER_SAMPLE_COUNT_SHIFT 24 +#define TEGRA210_MIXER_SAMPLE_COUNT_ENABLE BIT(TEGRA210_MIXER_SAMPLE_COUNT_SHIFT) #define REG_CFG_DONE_TRIGGER 0xf #define VAL_CFG_DONE_TRIGGER 0x1 #define NUM_GAIN_POLY_COEFFS 9 -#define NUM_DURATION_PARMS 4 +#define TEGRA210_MIXER_GAIN_MAX 0x20000 +#define TEGRA210_MIXER_FADE_DURATION_MAX 0x7fffffff + +#define TEGRA210_MIXER_PRESCALAR 6 +#define TEGRA210_MIXER_FADE_IDLE 0 +#define TEGRA210_MIXER_FADE_ACTIVE 1 + +enum { + DURATION_N3_ID = 2, + DURATION_INV_N3_ID, + NUM_DURATION_PARMS, +}; struct tegra210_mixer_gain_params { int poly_coeff[NUM_GAIN_POLY_COEFFS]; @@ -94,6 +107,10 @@ struct tegra210_mixer_gain_params { struct tegra210_mixer { int gain_value[TEGRA210_MIXER_RX_MAX]; + int fade_gain[TEGRA210_MIXER_RX_MAX]; + u32 duration[TEGRA210_MIXER_RX_MAX]; + bool in_fade[TEGRA210_MIXER_RX_MAX]; + bool fade_pending[TEGRA210_MIXER_RX_MAX]; struct regmap *regmap; }; diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c index 2c299704ef4f2f..b55f8142c4a47b 100644 --- a/sound/soc/tegra/tegra210_mvc.c +++ b/sound/soc/tegra/tegra210_mvc.c @@ -438,6 +438,9 @@ static int tegra210_mvc_set_audio_cif(struct tegra210_mvc *mvc, channels = params_channels(params); switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S8: + audio_bits = TEGRA_ACIF_BITS_8; + break; case SNDRV_PCM_FORMAT_S16_LE: audio_bits = TEGRA_ACIF_BITS_16; break; diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 51e5ab6c276b36..ee68f28852c426 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -509,6 +509,7 @@ static const struct of_device_id tegra30_ahub_of_match[] = { { .compatible = "nvidia,tegra30-ahub", .data = &soc_data_tegra30 }, {}, }; +MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); static int tegra30_ahub_probe(struct platform_device *pdev) { @@ -684,4 +685,3 @@ MODULE_AUTHOR("Stephen Warren "); MODULE_DESCRIPTION("Tegra30 AHUB driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index b121af9ef8eda2..1d7b3aed51fd91 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -402,6 +402,7 @@ static const struct of_device_id tegra30_i2s_of_match[] = { { .compatible = "nvidia,tegra30-i2s", .data = &tegra30_i2s_config }, {}, }; +MODULE_DEVICE_TABLE(of, tegra30_i2s_of_match); static int tegra30_i2s_platform_probe(struct platform_device *pdev) { @@ -567,4 +568,3 @@ MODULE_AUTHOR("Stephen Warren "); MODULE_DESCRIPTION("Tegra30 I2S ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_DEVICE_TABLE(of, tegra30_i2s_of_match); diff --git a/sound/soc/tegra/tegra_isomgr_bw.c b/sound/soc/tegra/tegra_isomgr_bw.c index fa979960bc0968..1c007f870759a7 100644 --- a/sound/soc/tegra/tegra_isomgr_bw.c +++ b/sound/soc/tegra/tegra_isomgr_bw.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include "tegra_isomgr_bw.h" @@ -55,19 +56,18 @@ int tegra_isomgr_adma_setbw(struct snd_pcm_substream *substream, sample_bytes; } - mutex_lock(&adma_isomgr->mutex); + scoped_guard(mutex, &adma_isomgr->mutex) { + if (is_running) { + if (bandwidth + adma_isomgr->current_bandwidth > adma_isomgr->max_bw) + bandwidth = adma_isomgr->max_bw - adma_isomgr->current_bandwidth; - if (is_running) { - if (bandwidth + adma_isomgr->current_bandwidth > adma_isomgr->max_bw) - bandwidth = adma_isomgr->max_bw - adma_isomgr->current_bandwidth; - - adma_isomgr->current_bandwidth += bandwidth; - } else { - adma_isomgr->current_bandwidth -= adma_isomgr->bw_per_dev[type][pcm->device]; + adma_isomgr->current_bandwidth += bandwidth; + } else { + adma_isomgr->current_bandwidth -= + adma_isomgr->bw_per_dev[type][pcm->device]; + } } - mutex_unlock(&adma_isomgr->mutex); - adma_isomgr->bw_per_dev[type][pcm->device] = bandwidth; dev_dbg(dev, "Setting up bandwidth to %d KBps\n", adma_isomgr->current_bandwidth); diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index faa62c1a9b8e12..be9c363df361bc 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -74,7 +74,6 @@ struct j721e_audio_domain { struct j721e_priv { struct device *dev; struct snd_soc_card card; - struct snd_soc_dai_link *dai_links; struct snd_soc_codec_conf codec_conf[J721E_CODEC_CONF_COUNT]; struct snd_interval rate_range; const struct j721e_audio_match_data *match_data; @@ -84,6 +83,7 @@ struct j721e_priv { struct j721e_audio_domain audio_domains[J721E_AUDIO_DOMAIN_LAST]; struct mutex mutex; + struct snd_soc_dai_link dai_links[]; }; static const struct snd_soc_dapm_widget j721e_cpb_dapm_widgets[] = { @@ -844,33 +844,23 @@ static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx, static int j721e_soc_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; + const struct j721e_audio_match_data *match; struct snd_soc_card *card; - const struct of_device_id *match; struct j721e_priv *priv; int link_cnt, conf_cnt, ret, i; - if (!node) { - dev_err(&pdev->dev, "of node is missing.\n"); - return -ENODEV; - } - - match = of_match_node(j721e_audio_of_match, node); + match = of_device_get_match_data(&pdev->dev); if (!match) { dev_err(&pdev->dev, "No compatible match found\n"); return -ENODEV; } - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, + struct_size(priv, dai_links, match->num_links), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->match_data = match->data; - - priv->dai_links = devm_kcalloc(&pdev->dev, priv->match_data->num_links, - sizeof(*priv->dai_links), GFP_KERNEL); - if (!priv->dai_links) - return -ENOMEM; + priv->match_data = match; for (i = 0; i < J721E_AUDIO_DOMAIN_LAST; i++) priv->audio_domains[i].parent_clk_id = -1; diff --git a/sound/soc/uniphier/aio-compress.c b/sound/soc/uniphier/aio-compress.c index b18af98a552b75..57247a03b5c9ec 100644 --- a/sound/soc/uniphier/aio-compress.c +++ b/sound/soc/uniphier/aio-compress.c @@ -183,18 +183,16 @@ static int uniphier_aio_compr_prepare(struct snd_soc_component *component, struct uniphier_aio *aio = uniphier_priv(snd_soc_rtd_to_cpu(rtd, 0)); struct uniphier_aio_sub *sub = &aio->sub[cstream->direction]; int bytes = runtime->fragment_size; - unsigned long flags; int ret; ret = aiodma_ch_set_param(sub); if (ret) return ret; - spin_lock_irqsave(&sub->lock, flags); - ret = aiodma_rb_set_buffer(sub, sub->compr_addr, - sub->compr_addr + sub->compr_bytes, - bytes); - spin_unlock_irqrestore(&sub->lock, flags); + scoped_guard(spinlock_irqsave, &sub->lock) + ret = aiodma_rb_set_buffer(sub, sub->compr_addr, + sub->compr_addr + sub->compr_bytes, + bytes); if (ret) return ret; @@ -223,9 +221,8 @@ static int uniphier_aio_compr_trigger(struct snd_soc_component *component, struct uniphier_aio_sub *sub = &aio->sub[cstream->direction]; struct device *dev = &aio->chip->pdev->dev; int bytes = runtime->fragment_size, ret = 0; - unsigned long flags; - spin_lock_irqsave(&sub->lock, flags); + guard(spinlock_irqsave)(&sub->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); @@ -242,7 +239,6 @@ static int uniphier_aio_compr_trigger(struct snd_soc_component *component, dev_warn(dev, "Unknown trigger(%d)\n", cmd); ret = -EINVAL; } - spin_unlock_irqrestore(&sub->lock, flags); return ret; } @@ -256,10 +252,9 @@ static int uniphier_aio_compr_pointer(struct snd_soc_component *component, struct uniphier_aio *aio = uniphier_priv(snd_soc_rtd_to_cpu(rtd, 0)); struct uniphier_aio_sub *sub = &aio->sub[cstream->direction]; int bytes = runtime->fragment_size; - unsigned long flags; u32 pos; - spin_lock_irqsave(&sub->lock, flags); + guard(spinlock_irqsave)(&sub->lock); aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); @@ -273,8 +268,6 @@ static int uniphier_aio_compr_pointer(struct snd_soc_component *component, } tstamp->byte_offset = pos; - spin_unlock_irqrestore(&sub->lock, flags); - return 0; } @@ -332,7 +325,6 @@ static int uniphier_aio_compr_copy(struct snd_soc_component *component, struct uniphier_aio_sub *sub = &aio->sub[cstream->direction]; size_t cnt = min_t(size_t, count, aio_rb_space_to_end(sub) / 2); int bytes = runtime->fragment_size; - unsigned long flags; size_t s; int ret; @@ -360,7 +352,7 @@ static int uniphier_aio_compr_copy(struct snd_soc_component *component, if (ret) return -EFAULT; - spin_lock_irqsave(&sub->lock, flags); + guard(spinlock_irqsave)(&sub->lock); sub->threshold = 2 * bytes; aiodma_rb_set_threshold(sub, sub->compr_bytes, 2 * bytes); @@ -376,8 +368,6 @@ static int uniphier_aio_compr_copy(struct snd_soc_component *component, } aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); - spin_unlock_irqrestore(&sub->lock, flags); - return cnt; } diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c index c1ca5599710372..c01eae55d4fc43 100644 --- a/sound/soc/uniphier/aio-dma.c +++ b/sound/soc/uniphier/aio-dma.c @@ -32,15 +32,15 @@ static void aiodma_pcm_irq(struct uniphier_aio_sub *sub) runtime->channels * samples_to_bytes(runtime, 1); int ret; - spin_lock(&sub->lock); - ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes, - sub->threshold + bytes); - if (!ret) - sub->threshold += bytes; - - aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes); - aiodma_rb_clear_irq(sub); - spin_unlock(&sub->lock); + scoped_guard(spinlock, &sub->lock) { + ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes, + sub->threshold + bytes); + if (!ret) + sub->threshold += bytes; + + aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes); + aiodma_rb_clear_irq(sub); + } snd_pcm_period_elapsed(sub->substream); } @@ -51,15 +51,15 @@ static void aiodma_compr_irq(struct uniphier_aio_sub *sub) int bytes = runtime->fragment_size; int ret; - spin_lock(&sub->lock); - ret = aiodma_rb_set_threshold(sub, sub->compr_bytes, - sub->threshold + bytes); - if (!ret) - sub->threshold += bytes; + scoped_guard(spinlock, &sub->lock) { + ret = aiodma_rb_set_threshold(sub, sub->compr_bytes, + sub->threshold + bytes); + if (!ret) + sub->threshold += bytes; - aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); - aiodma_rb_clear_irq(sub); - spin_unlock(&sub->lock); + aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); + aiodma_rb_clear_irq(sub); + } snd_compr_fragment_elapsed(sub->cstream); } @@ -113,18 +113,16 @@ static int uniphier_aiodma_prepare(struct snd_soc_component *component, struct uniphier_aio_sub *sub = &aio->sub[substream->stream]; int bytes = runtime->period_size * runtime->channels * samples_to_bytes(runtime, 1); - unsigned long flags; int ret; ret = aiodma_ch_set_param(sub); if (ret) return ret; - spin_lock_irqsave(&sub->lock, flags); - ret = aiodma_rb_set_buffer(sub, runtime->dma_addr, - runtime->dma_addr + runtime->dma_bytes, - bytes); - spin_unlock_irqrestore(&sub->lock, flags); + scoped_guard(spinlock_irqsave, &sub->lock) + ret = aiodma_rb_set_buffer(sub, runtime->dma_addr, + runtime->dma_addr + runtime->dma_bytes, + bytes); if (ret) return ret; @@ -141,9 +139,8 @@ static int uniphier_aiodma_trigger(struct snd_soc_component *component, struct device *dev = &aio->chip->pdev->dev; int bytes = runtime->period_size * runtime->channels * samples_to_bytes(runtime, 1); - unsigned long flags; - spin_lock_irqsave(&sub->lock, flags); + guard(spinlock_irqsave)(&sub->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, @@ -161,7 +158,6 @@ static int uniphier_aiodma_trigger(struct snd_soc_component *component, dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd); break; } - spin_unlock_irqrestore(&sub->lock, flags); return 0; } @@ -176,17 +172,15 @@ static snd_pcm_uframes_t uniphier_aiodma_pointer( struct uniphier_aio_sub *sub = &aio->sub[substream->stream]; int bytes = runtime->period_size * runtime->channels * samples_to_bytes(runtime, 1); - unsigned long flags; snd_pcm_uframes_t pos; - spin_lock_irqsave(&sub->lock, flags); + guard(spinlock_irqsave)(&sub->lock); aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes); if (sub->swm->dir == PORT_DIR_OUTPUT) pos = bytes_to_frames(runtime, sub->rd_offs); else pos = bytes_to_frames(runtime, sub->wr_offs); - spin_unlock_irqrestore(&sub->lock, flags); return pos; } diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c index 2e6ed19a18cdd4..2a459267f0f91d 100644 --- a/sound/soc/ux500/mop500_ab8500.c +++ b/sound/soc/ux500/mop500_ab8500.c @@ -234,19 +234,18 @@ static int mop500_ab8500_hw_params(struct snd_pcm_substream *substream, substream->number); /* Ensure configuration consistency between DAIs */ - mutex_lock(&mop500_ab8500_params_lock); - if (mop500_ab8500_usage) { - if (mop500_ab8500_rate != params_rate(params) || - mop500_ab8500_channels != params_channels(params)) { - mutex_unlock(&mop500_ab8500_params_lock); - return -EBUSY; + scoped_guard(mutex, &mop500_ab8500_params_lock) { + if (mop500_ab8500_usage) { + if (mop500_ab8500_rate != params_rate(params) || + mop500_ab8500_channels != params_channels(params)) { + return -EBUSY; + } + } else { + mop500_ab8500_rate = params_rate(params); + mop500_ab8500_channels = params_channels(params); } - } else { - mop500_ab8500_rate = params_rate(params); - mop500_ab8500_channels = params_channels(params); + __set_bit(cpu_dai->id, &mop500_ab8500_usage); } - __set_bit(cpu_dai->id, &mop500_ab8500_usage); - mutex_unlock(&mop500_ab8500_params_lock); channels = params_channels(params); @@ -339,9 +338,8 @@ static int mop500_ab8500_hw_free(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); - mutex_lock(&mop500_ab8500_params_lock); + guard(mutex)(&mop500_ab8500_params_lock); __clear_bit(cpu_dai->id, &mop500_ab8500_usage); - mutex_unlock(&mop500_ab8500_params_lock); return 0; } diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 8af0c04041ee3e..b20aae0caf60a4 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c @@ -366,7 +366,7 @@ static int setup_card(struct snd_usb_caiaqdev *cdev) #ifdef CONFIG_SND_USB_CAIAQ_INPUT ret = snd_usb_caiaq_input_init(cdev); - if (ret < 0) { + if (ret < 0 && ret != -ENODEV) { dev_err(dev, "Unable to set up input system (ret=%d)\n", ret); return ret; } @@ -423,6 +423,7 @@ static int create_card(struct usb_device *usb_dev, cdev = caiaqdev(card); cdev->chip.dev = usb_get_dev(usb_dev); + card->private_free = card_free; cdev->chip.card = card; cdev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct)); @@ -511,10 +512,9 @@ static int init_card(struct snd_usb_caiaqdev *cdev) scnprintf(card->longname, sizeof(card->longname), "%s %s (%s)", cdev->vendor_name, cdev->product_name, usbpath); - card->private_free = card_free; err = setup_card(cdev); if (err < 0) - return err; + goto err_kill_urb; return 0; diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c index a9130891bb696d..5c70fdf61cc139 100644 --- a/sound/usb/caiaq/input.c +++ b/sound/usb/caiaq/input.c @@ -804,7 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev) default: /* no input methods supported on this device */ - ret = -EINVAL; + ret = -ENODEV; goto exit_free_idev; } diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c index 49b3dd8d827dae..d129b42eb979d0 100644 --- a/sound/usb/misc/ua101.c +++ b/sound/usb/misc/ua101.c @@ -974,6 +974,13 @@ static int detect_usb_format(struct ua101 *ua) ua->capture.channels = fmt_capture->bNrChannels; ua->playback.channels = fmt_playback->bNrChannels; + if (!ua->capture.channels || !ua->playback.channels) { + dev_err(&ua->dev->dev, + "invalid channel count: capture %u, playback %u\n", + ua->capture.channels, ua->playback.channels); + return -EINVAL; + } + ua->capture.frame_bytes = fmt_capture->bSubframeSize * ua->capture.channels; ua->playback.frame_bytes = diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9d9ed68166c88c..d61bde65421973 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -665,17 +665,13 @@ static int get_term_name(struct snd_usb_audio *chip, struct usb_audio_term *iter return 0; switch (iterm->type >> 16) { case UAC3_SELECTOR_UNIT: - strscpy(name, "Selector", maxlen); - return 8; + return strscpy(name, "Selector", maxlen); case UAC3_PROCESSING_UNIT: - strscpy(name, "Process Unit", maxlen); - return 12; + return strscpy(name, "Process Unit", maxlen); case UAC3_EXTENSION_UNIT: - strscpy(name, "Ext Unit", maxlen); - return 8; + return strscpy(name, "Ext Unit", maxlen); case UAC3_MIXER_UNIT: - strscpy(name, "Mixer", maxlen); - return 5; + return strscpy(name, "Mixer", maxlen); default: return scnprintf(name, maxlen, "Unit %d", iterm->id); } @@ -683,25 +679,18 @@ static int get_term_name(struct snd_usb_audio *chip, struct usb_audio_term *iter switch (iterm->type & 0xff00) { case 0x0100: - strscpy(name, "PCM", maxlen); - return 3; + return strscpy(name, "PCM", maxlen); case 0x0200: - strscpy(name, "Mic", maxlen); - return 3; + return strscpy(name, "Mic", maxlen); case 0x0400: - strscpy(name, "Headset", maxlen); - return 7; + return strscpy(name, "Headset", maxlen); case 0x0500: - strscpy(name, "Phone", maxlen); - return 5; + return strscpy(name, "Phone", maxlen); } - for (names = iterm_names; names->type; names++) { - if (names->type == iterm->type) { - strscpy(name, names->name, maxlen); - return strlen(names->name); - } - } + for (names = iterm_names; names->type; names++) + if (names->type == iterm->type) + return strscpy(name, names->name, maxlen); return 0; } @@ -1190,6 +1179,16 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 1; } break; + + case USB_ID(0x0e6f, 0x024a): /* PDP Riffmaster for PS4 */ + case USB_ID(0x0e6f, 0x0249): /* PDP Riffmaster for PS5 */ + if (!strcmp(kctl->id.name, "PCM Playback Volume")) { + usb_audio_info(chip, + "set volume quirk for PDP Riffmaster for PS4/PS5\n"); + cval->min = -2560; /* Mute under it */ + } + break; + case USB_ID(0x3302, 0x12db): /* MOONDROP Quark2 */ if (!strcmp(kctl->id.name, "PCM Playback Volume")) { usb_audio_info(chip, @@ -1983,7 +1982,9 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer, int name_len = get_term_name(mixer->chip, term, name, name_size, 0); if (name_len == 0) - strscpy(name, "Unknown", name_size); + name_len = strscpy(name, "Unknown", name_size); + if (name_len < 0) + return; /* * sound/core/ctljack.c has a convention of naming jack controls @@ -1991,9 +1992,9 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer, * indicating Input or Output after the terminal name. */ if (is_input) - strlcat(name, " - Input Jack", name_size); + strscpy(name + name_len, " - Input Jack", name_size - name_len); else - strlcat(name, " - Output Jack", name_size); + strscpy(name + name_len, " - Output Jack", name_size - name_len); } /* get connector value to "wake up" the USB audio */ diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 1bdaa46d4fe182..99975c3240a551 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -333,6 +333,7 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, int index = kcontrol->private_value & 0xff; unsigned int value = ucontrol->value.integer.value[0]; int old_value = kcontrol->private_value >> 8; + unsigned long old_pval = kcontrol->private_value; int err; if (value > 1) @@ -341,7 +342,11 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, return 0; kcontrol->private_value = (value << 8) | index; err = snd_audigy2nx_led_update(mixer, value, index); - return err < 0 ? err : 1; + if (err < 0) { + kcontrol->private_value = old_pval; + return err; + } + return 1; } static int snd_audigy2nx_led_resume(struct usb_mixer_elem_list *list) @@ -487,6 +492,7 @@ static int snd_emu0204_ch_switch_put(struct snd_kcontrol *kcontrol, struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); struct usb_mixer_interface *mixer = list->mixer; unsigned int value = ucontrol->value.enumerated.item[0]; + unsigned long old_pval = kcontrol->private_value; int err; if (value > 1) @@ -497,7 +503,11 @@ static int snd_emu0204_ch_switch_put(struct snd_kcontrol *kcontrol, kcontrol->private_value = value; err = snd_emu0204_ch_switch_update(mixer, value); - return err < 0 ? err : 1; + if (err < 0) { + kcontrol->private_value = old_pval; + return err; + } + return 1; } static int snd_emu0204_ch_switch_resume(struct usb_mixer_elem_list *list) @@ -821,7 +831,11 @@ static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol, kcontrol->private_value = new_status; err = snd_xonar_u1_switch_update(list->mixer, new_status); - return err < 0 ? err : 1; + if (err < 0) { + kcontrol->private_value = old_status; + return err; + } + return 1; } static int snd_xonar_u1_switch_resume(struct usb_mixer_elem_list *list) @@ -1159,7 +1173,8 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); - u8 oldval = (kcontrol->private_value >> 24) & 0xff; + unsigned long old_pval = kcontrol->private_value; + u8 oldval = (old_pval >> 24) & 0xff; u8 newval = ucontrol->value.integer.value[0]; int err; @@ -1169,7 +1184,11 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, kcontrol->private_value &= ~(0xff << 24); kcontrol->private_value |= (unsigned int)newval << 24; err = snd_ni_update_cur_val(list); - return err < 0 ? err : 1; + if (err < 0) { + kcontrol->private_value = old_pval; + return err; + } + return 1; } static const struct snd_kcontrol_new snd_nativeinstruments_ta6_mixers[] = { @@ -1324,7 +1343,8 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl); - unsigned int pval = list->kctl->private_value; + unsigned long old_pval = list->kctl->private_value; + unsigned int pval = old_pval; int cur_val, err, new_val; cur_val = pval >> 24; @@ -1335,7 +1355,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, kctl->private_value &= ~(0xff << 24); kctl->private_value |= new_val << 24; err = snd_ftu_eff_switch_update(list); - return err < 0 ? err : 1; + if (err < 0) { + kctl->private_value = old_pval; + return err; + } + return 1; } static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, @@ -2114,13 +2138,18 @@ static int snd_soundblaster_e1_switch_put(struct snd_kcontrol *kcontrol, { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); unsigned char value = !!ucontrol->value.integer.value[0]; + unsigned long old_pval = kcontrol->private_value; int err; if (kcontrol->private_value == value) return 0; kcontrol->private_value = value; err = snd_soundblaster_e1_switch_update(list->mixer, value); - return err < 0 ? err : 1; + if (err < 0) { + kcontrol->private_value = old_pval; + return err; + } + return 1; } static int snd_soundblaster_e1_switch_resume(struct usb_mixer_elem_list *list) @@ -2998,12 +3027,14 @@ static int snd_bbfpro_ctl_put(struct snd_kcontrol *kcontrol, if (val == old_value) return 0; + err = snd_bbfpro_ctl_update(mixer, reg, idx, val); + if (err < 0) + return err; + kcontrol->private_value = reg | ((idx & SND_BBFPRO_CTL_IDX_MASK) << SND_BBFPRO_CTL_IDX_SHIFT) | ((val & SND_BBFPRO_CTL_VAL_MASK) << SND_BBFPRO_CTL_VAL_SHIFT); - - err = snd_bbfpro_ctl_update(mixer, reg, idx, val); - return err < 0 ? err : 1; + return 1; } static int snd_bbfpro_ctl_resume(struct usb_mixer_elem_list *list) @@ -3188,11 +3219,13 @@ static int snd_bbfpro_vol_put(struct snd_kcontrol *kcontrol, new_val = uvalue & SND_BBFPRO_MIXER_VAL_MASK; + err = snd_bbfpro_vol_update(mixer, idx, new_val); + if (err < 0) + return err; + kcontrol->private_value = idx | (new_val << SND_BBFPRO_MIXER_VAL_SHIFT); - - err = snd_bbfpro_vol_update(mixer, idx, new_val); - return err < 0 ? err : 1; + return 1; } static int snd_bbfpro_vol_resume(struct usb_mixer_elem_list *list) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 803e03d4d77b88..4e9cfff4047fc0 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2652,6 +2652,9 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, + +#endif /* disabled */ + { /* * The AudioBox USB advertises S24_3LE as the only supported format @@ -2700,7 +2703,6 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, -#endif /* disabled */ { /* diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 7b803ad58487f9..0b4ecc2c6bcc40 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -125,7 +125,7 @@ static int add_audio_stream_from_fixed_fmt(struct snd_usb_audio *chip, snd_usb_audioformat_set_sync_ep(chip, fp); - err = snd_usb_add_audio_stream(chip, stream, fp); + err = snd_usb_add_audio_stream(chip, stream, fp, NULL); if (err < 0) return err; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 2532bf97e05e00..b2c5c8198281ad 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -79,7 +79,7 @@ static void snd_usb_audio_pcm_free(struct snd_pcm *pcm) static void snd_usb_init_substream(struct snd_usb_stream *as, int stream, struct audioformat *fp, - struct snd_usb_power_domain *pd) + struct snd_usb_power_domain **pdptr) { struct snd_usb_substream *subs = &as->substream[stream]; @@ -105,10 +105,11 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, if (fp->channels > subs->channels_max) subs->channels_max = fp->channels; - if (pd) { - subs->str_pd = pd; + if (pdptr && *pdptr) { + subs->str_pd = *pdptr; + *pdptr = NULL; /* assigned */ /* Initialize Power Domain to idle status D1 */ - snd_usb_power_domain_set(subs->stream->chip, pd, + snd_usb_power_domain_set(subs->stream->chip, subs->str_pd, UAC3_PD_STATE_D1); } @@ -352,6 +353,8 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor if (len < sizeof(*cs_desc)) break; cs_len = le16_to_cpu(cs_desc->wLength); + if (cs_len < sizeof(*cs_desc)) + break; if (len < cs_len) break; cs_type = cs_desc->bSegmentType; @@ -492,11 +495,14 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor * if not, create a new pcm stream. note, fp is added to the substream * fmt_list and will be freed on the chip instance release. do not free * fp or do remove it from the substream fmt_list to avoid double-free. + * + * pdptr is optional and can be NULL. When it's non-NULL and the PD gets + * assigned to the stream, *pdptr is cleared to NULL upon return. */ -static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, - int stream, - struct audioformat *fp, - struct snd_usb_power_domain *pd) +int snd_usb_add_audio_stream(struct snd_usb_audio *chip, + int stream, + struct audioformat *fp, + struct snd_usb_power_domain **pdptr) { struct snd_usb_stream *as; @@ -529,7 +535,7 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) return err; - snd_usb_init_substream(as, stream, fp, pd); + snd_usb_init_substream(as, stream, fp, pdptr); return add_chmap(as->pcm, stream, subs); } @@ -558,7 +564,7 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, else strscpy(pcm->name, "USB Audio"); - snd_usb_init_substream(as, stream, fp, pd); + snd_usb_init_substream(as, stream, fp, pdptr); /* * Keep using head insertion for M-Audio Audiophile USB (tm) which has a @@ -576,21 +582,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, return add_chmap(pcm, stream, &as->substream[stream]); } -int snd_usb_add_audio_stream(struct snd_usb_audio *chip, - int stream, - struct audioformat *fp) -{ - return __snd_usb_add_audio_stream(chip, stream, fp, NULL); -} - -static int snd_usb_add_audio_stream_v3(struct snd_usb_audio *chip, - int stream, - struct audioformat *fp, - struct snd_usb_power_domain *pd) -{ - return __snd_usb_add_audio_stream(chip, stream, fp, pd); -} - static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, struct usb_host_interface *alts, int protocol, int iface_no) @@ -1003,7 +994,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, * and request Cluster Descriptor */ wLength = le16_to_cpu(hc_header.wLength); - if (wLength < sizeof(cluster)) + if (wLength < sizeof(*cluster)) return NULL; cluster = kzalloc(wLength, GFP_KERNEL); if (!cluster) @@ -1113,8 +1104,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, } } - if (pd) - *pd_out = pd; + *pd_out = pd; return fp; } @@ -1129,7 +1119,6 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, struct usb_interface_descriptor *altsd; int i, altno, err, stream; struct audioformat *fp = NULL; - struct snd_usb_power_domain *pd = NULL; bool set_iface_first; int num, protocol; @@ -1171,6 +1160,12 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, if (snd_usb_apply_interface_quirk(chip, iface_no, altno)) continue; + /* pd may be allocated at snd_usb_get_audioformat_uac3() and + * assigned at snd_usb_add_audio_stream(); otherwise it'll be + * freed automatically by cleanup at each loop. + */ + struct snd_usb_power_domain *pd __free(kfree) = NULL; + /* * Roland audio streaming interfaces are marked with protocols * 0/1/2, but are UAC 1 compatible. @@ -1226,23 +1221,16 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, *has_non_pcm = true; if ((fp->fmt_type == UAC_FORMAT_TYPE_I) == non_pcm) { audioformat_free(fp); - kfree(pd); fp = NULL; - pd = NULL; continue; } snd_usb_audioformat_set_sync_ep(chip, fp); dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint); - if (protocol == UAC_VERSION_3) - err = snd_usb_add_audio_stream_v3(chip, stream, fp, pd); - else - err = snd_usb_add_audio_stream(chip, stream, fp); - + err = snd_usb_add_audio_stream(chip, stream, fp, &pd); if (err < 0) { audioformat_free(fp); - kfree(pd); return err; } diff --git a/sound/usb/stream.h b/sound/usb/stream.h index d92e18d5818fee..61b9a133da018a 100644 --- a/sound/usb/stream.h +++ b/sound/usb/stream.h @@ -7,7 +7,8 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int snd_usb_add_audio_stream(struct snd_usb_audio *chip, int stream, - struct audioformat *fp); + struct audioformat *fp, + struct snd_usb_power_domain **pdptr); #endif /* __USBAUDIO_STREAM_H */ diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index b68abb9e760b89..aad698fe294bf4 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -159,13 +159,7 @@ static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu) static struct task_ctx *lookup_task_ctx(struct task_struct *p) { - struct task_ctx *tctx; - - if (!(tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) { - scx_bpf_error("task_ctx lookup failed"); - return NULL; - } - return tctx; + return bpf_task_storage_get(&task_ctx_stor, p, 0, 0); } s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, @@ -175,7 +169,7 @@ s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, s32 cpu; if (!(tctx = lookup_task_ctx(p))) - return -ESRCH; + return prev_cpu; if (p->scx.weight < 2 && !(p->flags & PF_KTHREAD)) return prev_cpu; @@ -540,13 +534,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) */ if (prev) { tctx = bpf_task_storage_get(&task_ctx_stor, prev, 0, 0); - if (!tctx) { - scx_bpf_error("task_ctx lookup failed"); - return; - } - - tctx->core_sched_seq = - core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++; + if (tctx) + tctx->core_sched_seq = + core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++; } } @@ -584,10 +574,8 @@ static s64 task_qdist(struct task_struct *p) s64 qdist; tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); - if (!tctx) { - scx_bpf_error("task_ctx lookup failed"); + if (!tctx) return 0; - } qdist = tctx->core_sched_seq - core_sched_head_seqs[idx]; diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index feedd5ab7058fa..0607913a3022d5 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -2,7 +2,7 @@ /* * maple_tree.c: Userspace testing for maple tree test-suite * Copyright (c) 2018-2022 Oracle Corporation - * Author: Liam R. Howlett + * Author: Liam R. Howlett * * Any tests that require internal knowledge of the tree or threads and other * difficult to handle in kernel tests. diff --git a/tools/testing/selftests/arm64/gcs/gcs-util.h b/tools/testing/selftests/arm64/gcs/gcs-util.h index c99a6b39ac147b..7a81bb07ed4b8b 100644 --- a/tools/testing/selftests/arm64/gcs/gcs-util.h +++ b/tools/testing/selftests/arm64/gcs/gcs-util.h @@ -18,12 +18,6 @@ #ifndef NT_ARM_GCS #define NT_ARM_GCS 0x410 - -struct user_gcs { - __u64 features_enabled; - __u64 features_locked; - __u64 gcspr_el0; -}; #endif /* Shadow Stack/Guarded Control Stack interface */ diff --git a/tools/testing/selftests/arm64/gcs/libc-gcs.c b/tools/testing/selftests/arm64/gcs/libc-gcs.c index 17b2fabfec386e..72e82bfbecc99e 100644 --- a/tools/testing/selftests/arm64/gcs/libc-gcs.c +++ b/tools/testing/selftests/arm64/gcs/libc-gcs.c @@ -16,6 +16,7 @@ #include #include +#include #include diff --git a/tools/testing/selftests/drivers/net/README.rst b/tools/testing/selftests/drivers/net/README.rst index c8588436c2243f..c6bed9a985bc74 100644 --- a/tools/testing/selftests/drivers/net/README.rst +++ b/tools/testing/selftests/drivers/net/README.rst @@ -211,8 +211,8 @@ Avoid libraries and frameworks Test files should be relatively self contained. The libraries should only include very core or non-trivial code. -It may be tempting to "factor out" the common code, but fight that urge. -Library code increases the barrier of entry, and complexity in general. +It may be tempting to "factor out" the common code to lib/py/, but fight that +urge. Library code increases the barrier of entry, and complexity in general. Avoid mixing test code and boilerplate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -290,6 +290,12 @@ or:: def test(cfg, mode, protocol): pass +Linters +~~~~~~~ + +We expect clean ``ruff check`` and ``pylint --disable=R``. +The code should be clean, avoid disabling pylint warnings explicitly! + Running tests CI-style ====================== diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c index b058f27b214114..e5bbdb5bbdc38a 100644 --- a/tools/testing/selftests/kvm/access_tracking_perf_test.c +++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c @@ -101,15 +101,15 @@ struct test_params { enum vm_mem_backing_src_type backing_src; /* The amount of memory to allocate for each vCPU. */ - uint64_t vcpu_memory_bytes; + u64 vcpu_memory_bytes; /* The number of vCPUs to create in the VM. */ int nr_vcpus; }; -static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) +static u64 pread_u64(int fd, const char *filename, u64 index) { - uint64_t value; + u64 value; off_t offset = index * sizeof(value); TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), @@ -123,13 +123,13 @@ static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) #define PAGEMAP_PRESENT (1ULL << 63) #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) -static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) +static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva) { - uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); - uint64_t entry; - uint64_t pfn; + u64 hva = (u64)addr_gva2hva(vm, gva); + u64 entry; + u64 pfn; - entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); + entry = pread_u64(pagemap_fd, "pagemap", hva / getpagesize()); if (!(entry & PAGEMAP_PRESENT)) return 0; @@ -139,16 +139,16 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) return pfn; } -static bool is_page_idle(int page_idle_fd, uint64_t pfn) +static bool is_page_idle(int page_idle_fd, u64 pfn) { - uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); + u64 bits = pread_u64(page_idle_fd, "page_idle", pfn / 64); return !!((bits >> (pfn % 64)) & 1); } -static void mark_page_idle(int page_idle_fd, uint64_t pfn) +static void mark_page_idle(int page_idle_fd, u64 pfn) { - uint64_t bits = 1ULL << (pfn % 64); + u64 bits = 1ULL << (pfn % 64); TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, "Set page_idle bits for PFN 0x%" PRIx64, pfn); @@ -174,11 +174,11 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, struct memstress_vcpu_args *vcpu_args) { int vcpu_idx = vcpu_args->vcpu_idx; - uint64_t base_gva = vcpu_args->gva; - uint64_t pages = vcpu_args->pages; - uint64_t page; - uint64_t still_idle = 0; - uint64_t no_pfn = 0; + gva_t base_gva = vcpu_args->gva; + u64 pages = vcpu_args->pages; + u64 page; + u64 still_idle = 0; + u64 no_pfn = 0; int page_idle_fd; int pagemap_fd; @@ -193,8 +193,8 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); for (page = 0; page < pages; page++) { - uint64_t gva = base_gva + page * memstress_args.guest_page_size; - uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); + gva_t gva = base_gva + page * memstress_args.guest_page_size; + u64 pfn = lookup_pfn(pagemap_fd, vm, gva); if (!pfn) { no_pfn++; @@ -297,10 +297,10 @@ static void lru_gen_mark_memory_idle(struct kvm_vm *vm) lru_gen_last_gen = new_gen; } -static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) +static void assert_ucall(struct kvm_vcpu *vcpu, u64 expected_ucall) { struct ucall uc; - uint64_t actual_ucall = get_ucall(vcpu, &uc); + u64 actual_ucall = get_ucall(vcpu, &uc); TEST_ASSERT(expected_ucall == actual_ucall, "Guest exited unexpectedly (expected ucall %" PRIu64 @@ -417,7 +417,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) */ test_pages = params->nr_vcpus * params->vcpu_memory_bytes / max(memstress_args.guest_page_size, - (uint64_t)getpagesize()); + (u64)getpagesize()); memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main); diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c index cf8fb67104f155..90c475a61b22b6 100644 --- a/tools/testing/selftests/kvm/arch_timer.c +++ b/tools/testing/selftests/kvm/arch_timer.c @@ -78,9 +78,9 @@ static void *test_vcpu_run(void *arg) return NULL; } -static uint32_t test_get_pcpu(void) +static u32 test_get_pcpu(void) { - uint32_t pcpu; + u32 pcpu; unsigned int nproc_conf; cpu_set_t online_cpuset; @@ -98,7 +98,7 @@ static uint32_t test_get_pcpu(void) static int test_migrate_vcpu(unsigned int vcpu_idx) { int ret; - uint32_t new_pcpu = test_get_pcpu(); + u32 new_pcpu = test_get_pcpu(); pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c index 713005b6f508e8..8a019cbaf4c413 100644 --- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c @@ -66,7 +66,7 @@ static void test_guest_raz(struct kvm_vcpu *vcpu) } } -static uint64_t raz_wi_reg_ids[] = { +static u64 raz_wi_reg_ids[] = { KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1), KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1), KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1), @@ -94,8 +94,8 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) int i; for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) { - uint64_t reg_id = raz_wi_reg_ids[i]; - uint64_t val; + u64 reg_id = raz_wi_reg_ids[i]; + u64 val; val = vcpu_get_reg(vcpu, reg_id); TEST_ASSERT_EQ(val, 0); @@ -111,7 +111,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) } } -static uint64_t raz_invariant_reg_ids[] = { +static u64 raz_invariant_reg_ids[] = { KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1), KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)), KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1), @@ -123,8 +123,8 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) int i, r; for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) { - uint64_t reg_id = raz_invariant_reg_ids[i]; - uint64_t val; + u64 reg_id = raz_invariant_reg_ids[i]; + u64 val; val = vcpu_get_reg(vcpu, reg_id); TEST_ASSERT_EQ(val, 0); @@ -142,7 +142,7 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) { - uint64_t val, el0; + u64 val, el0; val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c index d592a4515399fc..5fa5c0ec2b3e77 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer.c @@ -56,7 +56,7 @@ static void guest_validate_irq(unsigned int intid, struct test_vcpu_shared_data *shared_data) { enum guest_stage stage = shared_data->guest_stage; - uint64_t xcnt = 0, xcnt_diff_us, cval = 0; + u64 xcnt = 0, xcnt_diff_us, cval = 0; unsigned long xctl = 0; unsigned int timer_irq = 0; unsigned int accessor; @@ -105,7 +105,7 @@ static void guest_validate_irq(unsigned int intid, static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid = gic_get_and_ack_irq(); - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; guest_validate_irq(intid, shared_data); @@ -116,7 +116,7 @@ static void guest_irq_handler(struct ex_regs *regs) static void guest_run_stage(struct test_vcpu_shared_data *shared_data, enum guest_stage stage) { - uint32_t irq_iter, config_iter; + u32 irq_iter, config_iter; shared_data->guest_stage = stage; shared_data->nr_iter = 0; @@ -140,7 +140,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data, static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; local_irq_disable(); diff --git a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c index 993c9e38e729d5..f7625eb711d6b8 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c @@ -23,25 +23,25 @@ #include "vgic.h" /* Depends on counter width. */ -static uint64_t CVAL_MAX; +static u64 CVAL_MAX; /* tval is a signed 32-bit int. */ -static const int32_t TVAL_MAX = INT32_MAX; -static const int32_t TVAL_MIN = INT32_MIN; +static const s32 TVAL_MAX = INT32_MAX; +static const s32 TVAL_MIN = INT32_MIN; /* After how much time we say there is no IRQ. */ -static const uint32_t TIMEOUT_NO_IRQ_US = 50000; +static const u32 TIMEOUT_NO_IRQ_US = 50000; /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */ -static uint64_t DEF_CNT; +static u64 DEF_CNT; /* Number of runs. */ -static const uint32_t NR_TEST_ITERS_DEF = 5; +static const u32 NR_TEST_ITERS_DEF = 5; /* Default wait test time in ms. */ -static const uint32_t WAIT_TEST_MS = 10; +static const u32 WAIT_TEST_MS = 10; /* Default "long" wait test time in ms. */ -static const uint32_t LONG_WAIT_TEST_MS = 100; +static const u32 LONG_WAIT_TEST_MS = 100; /* Shared with IRQ handler. */ struct test_vcpu_shared_data { @@ -53,9 +53,9 @@ struct test_args { /* Virtual or physical timer and counter tests. */ enum arch_timer timer; /* Delay used for most timer tests. */ - uint64_t wait_ms; + u64 wait_ms; /* Delay used in the test_long_timer_delays test. */ - uint64_t long_wait_ms; + u64 long_wait_ms; /* Number of iterations. */ int iterations; /* Whether to test the physical timer. */ @@ -82,12 +82,12 @@ enum sync_cmd { NO_USERSPACE_CMD, }; -typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec); +typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec); -static void sleep_poll(enum arch_timer timer, uint64_t usec); -static void sleep_sched_poll(enum arch_timer timer, uint64_t usec); -static void sleep_in_userspace(enum arch_timer timer, uint64_t usec); -static void sleep_migrate(enum arch_timer timer, uint64_t usec); +static void sleep_poll(enum arch_timer timer, u64 usec); +static void sleep_sched_poll(enum arch_timer timer, u64 usec); +static void sleep_in_userspace(enum arch_timer timer, u64 usec); +static void sleep_migrate(enum arch_timer timer, u64 usec); sleep_method_t sleep_method[] = { sleep_poll, @@ -115,14 +115,14 @@ enum timer_view { TIMER_TVAL, }; -static void assert_irqs_handled(uint32_t n) +static void assert_irqs_handled(u32 n) { int h = atomic_read(&shared_data.handled); __GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n); } -static void userspace_cmd(uint64_t cmd) +static void userspace_cmd(u64 cmd) { GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0); } @@ -132,12 +132,12 @@ static void userspace_migrate_vcpu(void) userspace_cmd(USERSPACE_MIGRATE_SELF); } -static void userspace_sleep(uint64_t usecs) +static void userspace_sleep(u64 usecs) { GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0); } -static void set_counter(enum arch_timer timer, uint64_t counter) +static void set_counter(enum arch_timer timer, u64 counter) { GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0); } @@ -146,8 +146,8 @@ static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid = gic_get_and_ack_irq(); enum arch_timer timer; - uint64_t cnt, cval; - uint32_t ctl; + u64 cnt, cval; + u32 ctl; bool timer_condition, istatus; if (intid == IAR_SPURIOUS) { @@ -178,8 +178,8 @@ static void guest_irq_handler(struct ex_regs *regs) gic_set_eoi(intid); } -static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles, - uint32_t ctl) +static void set_cval_irq(enum arch_timer timer, u64 cval_cycles, + u32 ctl) { atomic_set(&shared_data.handled, 0); atomic_set(&shared_data.spurious, 0); @@ -187,8 +187,8 @@ static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles, timer_set_ctl(timer, ctl); } -static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles, - uint32_t ctl) +static void set_tval_irq(enum arch_timer timer, u64 tval_cycles, + u32 ctl) { atomic_set(&shared_data.handled, 0); atomic_set(&shared_data.spurious, 0); @@ -196,7 +196,7 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles, timer_set_ctl(timer, ctl); } -static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl, +static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl, enum timer_view tv) { switch (tv) { @@ -275,13 +275,13 @@ static void wait_migrate_poll_for_irq(void) * Sleep for usec microseconds by polling in the guest or in * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE). */ -static void guest_poll(enum arch_timer test_timer, uint64_t usec, +static void guest_poll(enum arch_timer test_timer, u64 usec, enum sync_cmd usp_cmd) { - uint64_t cycles = usec_to_cycles(usec); + u64 cycles = usec_to_cycles(usec); /* Whichever timer we are testing with, sleep with the other. */ enum arch_timer sleep_timer = 1 - test_timer; - uint64_t start = timer_get_cntct(sleep_timer); + u64 start = timer_get_cntct(sleep_timer); while ((timer_get_cntct(sleep_timer) - start) < cycles) { if (usp_cmd == NO_USERSPACE_CMD) @@ -291,22 +291,22 @@ static void guest_poll(enum arch_timer test_timer, uint64_t usec, } } -static void sleep_poll(enum arch_timer timer, uint64_t usec) +static void sleep_poll(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, NO_USERSPACE_CMD); } -static void sleep_sched_poll(enum arch_timer timer, uint64_t usec) +static void sleep_sched_poll(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, USERSPACE_SCHED_YIELD); } -static void sleep_migrate(enum arch_timer timer, uint64_t usec) +static void sleep_migrate(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, USERSPACE_MIGRATE_SELF); } -static void sleep_in_userspace(enum arch_timer timer, uint64_t usec) +static void sleep_in_userspace(enum arch_timer timer, u64 usec) { userspace_sleep(usec); } @@ -315,15 +315,15 @@ static void sleep_in_userspace(enum arch_timer timer, uint64_t usec) * Reset the timer state to some nice values like the counter not being close * to the edge, and the control register masked and disabled. */ -static void reset_timer_state(enum arch_timer timer, uint64_t cnt) +static void reset_timer_state(enum arch_timer timer, u64 cnt) { set_counter(timer, cnt); timer_set_ctl(timer, CTL_IMASK); } -static void test_timer_xval(enum arch_timer timer, uint64_t xval, +static void test_timer_xval(enum arch_timer timer, u64 xval, enum timer_view tv, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { local_irq_disable(); @@ -348,23 +348,23 @@ static void test_timer_xval(enum arch_timer timer, uint64_t xval, * the "runner", like: tools/testing/selftests/kselftest/runner.sh. */ -static void test_timer_cval(enum arch_timer timer, uint64_t cval, +static void test_timer_cval(enum arch_timer timer, u64 cval, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt); } -static void test_timer_tval(enum arch_timer timer, int32_t tval, +static void test_timer_tval(enum arch_timer timer, s32 tval, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { - test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state, + test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state, reset_cnt); } -static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval, - uint64_t usec, enum timer_view timer_view, +static void test_xval_check_no_irq(enum arch_timer timer, u64 xval, + u64 usec, enum timer_view timer_view, sleep_method_t guest_sleep) { local_irq_disable(); @@ -379,17 +379,17 @@ static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval, assert_irqs_handled(0); } -static void test_cval_no_irq(enum arch_timer timer, uint64_t cval, - uint64_t usec, sleep_method_t wm) +static void test_cval_no_irq(enum arch_timer timer, u64 cval, + u64 usec, sleep_method_t wm) { test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm); } -static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec, +static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec, sleep_method_t wm) { - /* tval will be cast to an int32_t in test_xval_check_no_irq */ - test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm); + /* tval will be cast to an s32 in test_xval_check_no_irq */ + test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm); } /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */ @@ -463,7 +463,7 @@ static void test_timers_fired_multiple_times(enum arch_timer timer) * timeout for the wait: we use the wfi instruction. */ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm, - int32_t delta_1_ms, int32_t delta_2_ms) + s32 delta_1_ms, s32 delta_2_ms) { local_irq_disable(); reset_timer_state(timer, DEF_CNT); @@ -488,7 +488,7 @@ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm static void test_reprogram_timers(enum arch_timer timer) { int i; - uint64_t base_wait = test_args.wait_ms; + u64 base_wait = test_args.wait_ms; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { /* @@ -504,8 +504,8 @@ static void test_reprogram_timers(enum arch_timer timer) static void test_basic_functionality(enum arch_timer timer) { - int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms); - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); + s32 tval = (s32)msec_to_cycles(test_args.wait_ms); + u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -593,7 +593,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t reset_timer_state(timer, DEF_CNT); set_cval_irq(timer, - (uint64_t) TVAL_MAX + + (u64)TVAL_MAX + msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE); set_counter(timer, TVAL_MAX); @@ -608,7 +608,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */ static void test_timers_above_tval_max(enum arch_timer timer) { - uint64_t cval; + u64 cval; int i; /* @@ -638,8 +638,8 @@ static void test_timers_above_tval_max(enum arch_timer timer) * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and * then waits for an IRQ. */ -static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1, - uint64_t xval, uint64_t cnt_2, +static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1, + u64 xval, u64 cnt_2, irq_wait_method_t wm, enum timer_view tv) { local_irq_disable(); @@ -662,8 +662,8 @@ static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1, * then waits for an IRQ. */ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer, - uint64_t cnt_1, uint64_t xval, - uint64_t cnt_2, + u64 cnt_1, u64 xval, + u64 cnt_2, sleep_method_t guest_sleep, enum timer_view tv) { @@ -684,31 +684,31 @@ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer, timer_set_ctl(timer, CTL_IMASK); } -static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1, - int32_t tval, uint64_t cnt_2, +static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1, + s32 tval, u64 cnt_2, irq_wait_method_t wm) { test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL); } -static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1, - uint64_t cval, uint64_t cnt_2, +static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1, + u64 cval, u64 cnt_2, irq_wait_method_t wm) { test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL); } static void test_set_cnt_after_tval_no_irq(enum arch_timer timer, - uint64_t cnt_1, int32_t tval, - uint64_t cnt_2, sleep_method_t wm) + u64 cnt_1, s32 tval, + u64 cnt_2, sleep_method_t wm) { test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL); } static void test_set_cnt_after_cval_no_irq(enum arch_timer timer, - uint64_t cnt_1, uint64_t cval, - uint64_t cnt_2, sleep_method_t wm) + u64 cnt_1, u64 cval, + u64 cnt_2, sleep_method_t wm) { test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL); @@ -718,7 +718,7 @@ static void test_set_cnt_after_cval_no_irq(enum arch_timer timer, static void test_move_counters_ahead_of_timers(enum arch_timer timer) { int i; - int32_t tval; + s32 tval; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { irq_wait_method_t wm = irq_wait_method[i]; @@ -730,8 +730,7 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer) test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm); test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm); tval = TVAL_MAX; - test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1, - wm); + test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm); } } @@ -754,8 +753,8 @@ static void test_move_counters_behind_timers(enum arch_timer timer) static void test_timers_in_the_past(enum arch_timer timer) { - int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms); - uint64_t cval; + s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms); + u64 cval; int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -790,8 +789,8 @@ static void test_timers_in_the_past(enum arch_timer timer) static void test_long_timer_delays(enum arch_timer timer) { - int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms); - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); + s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms); + u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -846,11 +845,11 @@ static void guest_code(enum arch_timer timer) static cpu_set_t default_cpuset; -static uint32_t next_pcpu(void) +static u32 next_pcpu(void) { - uint32_t max = get_nprocs(); - uint32_t cur = sched_getcpu(); - uint32_t next = cur; + u32 max = get_nprocs(); + u32 cur = sched_getcpu(); + u32 next = cur; cpu_set_t cpuset = default_cpuset; TEST_ASSERT(max > 1, "Need at least two physical cpus"); @@ -862,7 +861,7 @@ static uint32_t next_pcpu(void) return next; } -static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt, +static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt, enum arch_timer timer) { if (timer == PHYSICAL) @@ -874,7 +873,7 @@ static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt, static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc) { enum sync_cmd cmd = uc->args[1]; - uint64_t val = uc->args[2]; + u64 val = uc->args[2]; enum arch_timer timer = uc->args[3]; switch (cmd) { @@ -1018,8 +1017,8 @@ static bool parse_args(int argc, char *argv[]) static void set_counter_defaults(void) { - const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; - uint64_t freq = read_sysreg(CNTFRQ_EL0); + const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; + u64 freq = read_sysreg(CNTFRQ_EL0); int width = ilog2(MIN_ROLLOVER_SECS * freq); width = clamp(width, 56, 64); diff --git a/tools/testing/selftests/kvm/arm64/debug-exceptions.c b/tools/testing/selftests/kvm/arm64/debug-exceptions.c index 1d431de8729c58..3eb4b1b6682dca 100644 --- a/tools/testing/selftests/kvm/arm64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/arm64/debug-exceptions.c @@ -31,14 +31,14 @@ extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx; extern unsigned char iter_ss_begin, iter_ss_end; -static volatile uint64_t sw_bp_addr, hw_bp_addr; -static volatile uint64_t wp_addr, wp_data_addr; -static volatile uint64_t svc_addr; -static volatile uint64_t ss_addr[4], ss_idx; -#define PC(v) ((uint64_t)&(v)) +static volatile u64 sw_bp_addr, hw_bp_addr; +static volatile u64 wp_addr, wp_data_addr; +static volatile u64 svc_addr; +static volatile u64 ss_addr[4], ss_idx; +#define PC(v) ((u64)&(v)) #define GEN_DEBUG_WRITE_REG(reg_name) \ -static void write_##reg_name(int num, uint64_t val) \ +static void write_##reg_name(int num, u64 val) \ { \ switch (num) { \ case 0: \ @@ -102,8 +102,8 @@ GEN_DEBUG_WRITE_REG(dbgwvr) static void reset_debug_state(void) { - uint8_t brps, wrps, i; - uint64_t dfr0; + u8 brps, wrps, i; + u64 dfr0; asm volatile("msr daifset, #8"); @@ -140,7 +140,7 @@ static void enable_os_lock(void) static void enable_monitor_debug_exceptions(void) { - uint64_t mdscr; + u64 mdscr; asm volatile("msr daifclr, #8"); @@ -149,9 +149,9 @@ static void enable_monitor_debug_exceptions(void) isb(); } -static void install_wp(uint8_t wpn, uint64_t addr) +static void install_wp(u8 wpn, u64 addr) { - uint32_t wcr; + u32 wcr; wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E; write_dbgwcr(wpn, wcr); @@ -162,9 +162,9 @@ static void install_wp(uint8_t wpn, uint64_t addr) enable_monitor_debug_exceptions(); } -static void install_hw_bp(uint8_t bpn, uint64_t addr) +static void install_hw_bp(u8 bpn, u64 addr) { - uint32_t bcr; + u32 bcr; bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E; write_dbgbcr(bpn, bcr); @@ -174,11 +174,10 @@ static void install_hw_bp(uint8_t bpn, uint64_t addr) enable_monitor_debug_exceptions(); } -static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, - uint64_t ctx) +static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx) { - uint32_t wcr; - uint64_t ctx_bcr; + u32 wcr; + u64 ctx_bcr; /* Setup a context-aware breakpoint for Linked Context ID Match */ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | @@ -188,7 +187,7 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, /* Setup a linked watchpoint (linked to the context-aware breakpoint) */ wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E | - DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT); + DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT); write_dbgwcr(addr_wp, wcr); write_dbgwvr(addr_wp, addr); isb(); @@ -196,10 +195,9 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, enable_monitor_debug_exceptions(); } -void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, - uint64_t ctx) +void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx) { - uint32_t addr_bcr, ctx_bcr; + u32 addr_bcr, ctx_bcr; /* Setup a context-aware breakpoint for Linked Context ID Match */ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | @@ -213,7 +211,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, */ addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | DBGBCR_BT_ADDR_LINK_CTX | - ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT); + ((u32)ctx_bp << DBGBCR_LBN_SHIFT); write_dbgbcr(addr_bp, addr_bcr); write_dbgbvr(addr_bp, addr); isb(); @@ -223,7 +221,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, static void install_ss(void) { - uint64_t mdscr; + u64 mdscr; asm volatile("msr daifclr, #8"); @@ -234,9 +232,9 @@ static void install_ss(void) static volatile char write_data; -static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) +static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn) { - uint64_t ctx = 0xabcdef; /* a random context number */ + u64 ctx = 0xabcdef; /* a random context number */ /* Software-breakpoint */ reset_debug_state(); @@ -377,8 +375,8 @@ static void guest_svc_handler(struct ex_regs *regs) static void guest_code_ss(int test_cnt) { - uint64_t i; - uint64_t bvr, wvr, w_bvr, w_wvr; + u64 i; + u64 bvr, wvr, w_bvr, w_wvr; for (i = 0; i < test_cnt; i++) { /* Bits [1:0] of dbg{b,w}vr are RES0 */ @@ -416,12 +414,12 @@ static void guest_code_ss(int test_cnt) GUEST_DONE(); } -static int debug_version(uint64_t id_aa64dfr0) +static int debug_version(u64 id_aa64dfr0) { return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0); } -static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) +static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -468,8 +466,8 @@ void test_single_step_from_userspace(int test_cnt) struct kvm_vm *vm; struct ucall uc; struct kvm_run *run; - uint64_t pc, cmd; - uint64_t test_pc = 0; + u64 pc, cmd; + u64 test_pc = 0; bool ss_enable = false; struct kvm_guest_debug debug = {}; @@ -506,7 +504,7 @@ void test_single_step_from_userspace(int test_cnt) "Unexpected pc 0x%lx (expected 0x%lx)", pc, test_pc); - if ((pc + 4) == (uint64_t)&iter_ss_end) { + if ((pc + 4) == (u64)&iter_ss_end) { test_pc = 0; debug.control = KVM_GUESTDBG_ENABLE; ss_enable = false; @@ -519,8 +517,8 @@ void test_single_step_from_userspace(int test_cnt) * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should * be the current pc + 4. */ - if ((pc >= (uint64_t)&iter_ss_begin) && - (pc < (uint64_t)&iter_ss_end)) + if ((pc >= (u64)&iter_ss_begin) && + (pc < (u64)&iter_ss_end)) test_pc = pc + 4; else test_pc = 0; @@ -533,9 +531,9 @@ void test_single_step_from_userspace(int test_cnt) * Run debug testing using the various breakpoint#, watchpoint# and * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration. */ -void test_guest_debug_exceptions_all(uint64_t aa64dfr0) +void test_guest_debug_exceptions_all(u64 aa64dfr0) { - uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; + u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; int b, w, c; /* Number of breakpoints */ @@ -580,7 +578,7 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; int opt; int ss_iteration = 10000; - uint64_t aa64dfr0; + u64 aa64dfr0; vm = vm_create_with_one_vcpu(&vcpu, guest_code); aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c index bf038a0371f44a..5d96cdf382c47e 100644 --- a/tools/testing/selftests/kvm/arm64/hypercalls.c +++ b/tools/testing/selftests/kvm/arm64/hypercalls.c @@ -29,9 +29,9 @@ #define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0 struct kvm_fw_reg_info { - uint64_t reg; /* Register definition */ - uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */ - uint64_t reset_val; /* Reset value for the register */ + u64 reg; /* Register definition */ + u64 max_feat_bit; /* Bit that represents the upper limit of the feature-map */ + u64 reset_val; /* Reset value for the register */ }; #define FW_REG_INFO(r) \ @@ -59,8 +59,8 @@ enum test_stage { static int stage = TEST_STAGE_REG_IFACE; struct test_hvc_info { - uint32_t func_id; - uint64_t arg1; + u32 func_id; + u64 arg1; }; #define TEST_HVC_INFO(f, a1) \ @@ -152,9 +152,9 @@ static void guest_code(void) } struct st_time { - uint32_t rev; - uint32_t attr; - uint64_t st_time; + u32 rev; + u32 attr; + u64 st_time; }; #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) @@ -162,7 +162,7 @@ struct st_time { static void steal_time_init(struct kvm_vcpu *vcpu) { - uint64_t st_ipa = (ulong)ST_GPA_BASE; + u64 st_ipa = (ulong)ST_GPA_BASE; unsigned int gpages; gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); @@ -174,13 +174,13 @@ static void steal_time_init(struct kvm_vcpu *vcpu) static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) { - uint64_t val; + u64 val; unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) { const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; - uint64_t set_val; + u64 set_val; /* First 'read' should be the reset value for the reg */ val = vcpu_get_reg(vcpu, reg_info->reg); @@ -229,7 +229,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) { - uint64_t val; + u64 val; unsigned int i; int ret; diff --git a/tools/testing/selftests/kvm/arm64/idreg-idst.c b/tools/testing/selftests/kvm/arm64/idreg-idst.c index 9ca9f125abdb79..a3e84701d81469 100644 --- a/tools/testing/selftests/kvm/arm64/idreg-idst.c +++ b/tools/testing/selftests/kvm/arm64/idreg-idst.c @@ -13,7 +13,7 @@ static volatile bool sys64, undef; #define __check_sr_read(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ sys64 = false; \ undef = false; \ @@ -101,7 +101,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t mmfr2; + u64 mmfr2; test_disable_default_vgic(); diff --git a/tools/testing/selftests/kvm/arm64/no-vgic.c b/tools/testing/selftests/kvm/arm64/no-vgic.c index b14686ef17d12a..25b2e3222f6855 100644 --- a/tools/testing/selftests/kvm/arm64/no-vgic.c +++ b/tools/testing/selftests/kvm/arm64/no-vgic.c @@ -15,7 +15,7 @@ static volatile bool handled; #define __check_sr_read(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ handled = false; \ dsb(sy); \ @@ -33,7 +33,7 @@ static volatile bool handled; #define __check_gicv5_gicr_op(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ handled = false; \ dsb(sy); \ @@ -82,7 +82,7 @@ static volatile bool handled; static void guest_code_gicv3(void) { - uint64_t val; + u64 val; /* * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having @@ -262,7 +262,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool has_v3, has_v5; - uint64_t pfr; + u64 pfr; test_disable_default_vgic(); diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c index 4ccbd389d13369..6bb3d82906b227 100644 --- a/tools/testing/selftests/kvm/arm64/page_fault_test.c +++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c @@ -23,7 +23,7 @@ #define TEST_PTE_GVA 0xb0000000 #define TEST_DATA 0x0123456789ABCDEF -static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA; +static u64 *guest_test_memory = (u64 *)TEST_GVA; #define CMD_NONE (0) #define CMD_SKIP_TEST (1ULL << 1) @@ -48,7 +48,7 @@ static struct event_cnt { struct test_desc { const char *name; - uint64_t mem_mark_cmd; + u64 mem_mark_cmd; /* Skip the test if any prepare function returns false */ bool (*guest_prepare[PREPARE_FN_NR])(void); void (*guest_test)(void); @@ -59,8 +59,8 @@ struct test_desc { void (*iabt_handler)(struct ex_regs *regs); void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run); void (*fail_vcpu_run_handler)(int ret); - uint32_t pt_memslot_flags; - uint32_t data_memslot_flags; + u32 pt_memslot_flags; + u32 data_memslot_flags; bool skip; struct event_cnt expected_events; }; @@ -70,9 +70,9 @@ struct test_params { struct test_desc *test_desc; }; -static inline void flush_tlb_page(uint64_t vaddr) +static inline void flush_tlb_page(gva_t gva) { - uint64_t page = vaddr >> 12; + gva_t page = gva >> 12; dsb(ishst); asm volatile("tlbi vaae1is, %0" :: "r" (page)); @@ -82,7 +82,7 @@ static inline void flush_tlb_page(uint64_t vaddr) static void guest_write64(void) { - uint64_t val; + u64 val; WRITE_ONCE(*guest_test_memory, TEST_DATA); val = READ_ONCE(*guest_test_memory); @@ -92,8 +92,8 @@ static void guest_write64(void) /* Check the system for atomic instructions. */ static bool guest_check_lse(void) { - uint64_t isar0 = read_sysreg(id_aa64isar0_el1); - uint64_t atomic; + u64 isar0 = read_sysreg(id_aa64isar0_el1); + u64 atomic; atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0); return atomic >= 2; @@ -101,8 +101,8 @@ static bool guest_check_lse(void) static bool guest_check_dc_zva(void) { - uint64_t dczid = read_sysreg(dczid_el0); - uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid); + u64 dczid = read_sysreg(dczid_el0); + u64 dzp = FIELD_GET(DCZID_EL0_DZP, dczid); return dzp == 0; } @@ -110,7 +110,7 @@ static bool guest_check_dc_zva(void) /* Compare and swap instruction. */ static void guest_cas(void) { - uint64_t val; + u64 val; GUEST_ASSERT(guest_check_lse()); asm volatile(".arch_extension lse\n" @@ -122,7 +122,7 @@ static void guest_cas(void) static void guest_read64(void) { - uint64_t val; + u64 val; val = READ_ONCE(*guest_test_memory); GUEST_ASSERT_EQ(val, 0); @@ -131,7 +131,7 @@ static void guest_read64(void) /* Address translation instruction */ static void guest_at(void) { - uint64_t par; + u64 par; asm volatile("at s1e1r, %0" :: "r" (guest_test_memory)); isb(); @@ -148,7 +148,7 @@ static void guest_at(void) */ static void guest_dc_zva(void) { - uint16_t val; + u16 val; asm volatile("dc zva, %0" :: "r" (guest_test_memory)); dsb(ish); @@ -164,8 +164,8 @@ static void guest_dc_zva(void) */ static void guest_ld_preidx(void) { - uint64_t val; - uint64_t addr = TEST_GVA - 8; + u64 val; + u64 addr = TEST_GVA - 8; /* * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is @@ -179,8 +179,8 @@ static void guest_ld_preidx(void) static void guest_st_preidx(void) { - uint64_t val = TEST_DATA; - uint64_t addr = TEST_GVA - 8; + u64 val = TEST_DATA; + u64 addr = TEST_GVA - 8; asm volatile("str %0, [%1, #8]!" : "+r" (val), "+r" (addr)); @@ -191,8 +191,8 @@ static void guest_st_preidx(void) static bool guest_set_ha(void) { - uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1); - uint64_t hadbs, tcr; + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); + u64 hadbs, tcr; /* Skip if HA is not supported. */ hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1); @@ -208,7 +208,7 @@ static bool guest_set_ha(void) static bool guest_clear_pte_af(void) { - *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF; + *((u64 *)TEST_PTE_GVA) &= ~PTE_AF; flush_tlb_page(TEST_GVA); return true; @@ -217,7 +217,7 @@ static bool guest_clear_pte_af(void) static void guest_check_pte_af(void) { dsb(ish); - GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF); + GUEST_ASSERT_EQ(*((u64 *)TEST_PTE_GVA) & PTE_AF, PTE_AF); } static void guest_check_write_in_dirty_log(void) @@ -302,26 +302,26 @@ static void no_iabt_handler(struct ex_regs *regs) static struct uffd_args { char *copy; void *hva; - uint64_t paging_size; + u64 paging_size; } pt_args, data_args; /* Returns true to continue the test, and false if it should be skipped. */ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, struct uffd_args *args) { - uint64_t addr = msg->arg.pagefault.address; - uint64_t flags = msg->arg.pagefault.flags; + u64 addr = msg->arg.pagefault.address; + u64 flags = msg->arg.pagefault.flags; struct uffdio_copy copy; int ret; TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, "The only expected UFFD mode is MISSING"); - TEST_ASSERT_EQ(addr, (uint64_t)args->hva); + TEST_ASSERT_EQ(addr, (u64)args->hva); pr_debug("uffd fault: addr=%p write=%d\n", (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE)); - copy.src = (uint64_t)args->copy; + copy.src = (u64)args->copy; copy.dst = addr; copy.len = args->paging_size; copy.mode = 0; @@ -407,7 +407,7 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm, struct userspace_mem_region *region) { void *hva = (void *)region->region.userspace_addr; - uint64_t paging_size = region->region.memory_size; + u64 paging_size = region->region.memory_size; int ret, fd = region->fd; if (fd != -1) { @@ -438,7 +438,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run) static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) { - uint64_t data; + u64 data; memcpy(&data, run->mmio.data, sizeof(data)); pr_debug("addr=%lld len=%d w=%d data=%lx\n", @@ -449,11 +449,11 @@ static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) static bool check_write_in_dirty_log(struct kvm_vm *vm, struct userspace_mem_region *region, - uint64_t host_pg_nr) + u64 host_pg_nr) { unsigned long *bmap; bool first_page_dirty; - uint64_t size = region->region.memory_size; + u64 size = region->region.memory_size; /* getpage_size() is not always equal to vm->page_size */ bmap = bitmap_zalloc(size / getpagesize()); @@ -468,7 +468,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd) { struct userspace_mem_region *data_region, *pt_region; bool continue_test = true; - uint64_t pte_gpa, pte_pg; + u64 pte_gpa, pte_pg; data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); pt_region = vm_get_mem_region(vm, MEM_REGION_PT); @@ -510,7 +510,7 @@ void fail_vcpu_run_mmio_no_syndrome_handler(int ret) events.fail_vcpu_runs += 1; } -typedef uint32_t aarch64_insn_t; +typedef u32 aarch64_insn_t; extern aarch64_insn_t __exec_test[2]; noinline void __return_0x77(void) @@ -525,7 +525,7 @@ noinline void __return_0x77(void) */ static void load_exec_code_for_test(struct kvm_vm *vm) { - uint64_t *code; + u64 *code; struct userspace_mem_region *region; void *hva; @@ -552,7 +552,7 @@ static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu, static void setup_gva_maps(struct kvm_vm *vm) { struct userspace_mem_region *region; - uint64_t pte_gpa; + u64 pte_gpa; region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); /* Map TEST_GVA first. This will install a new PTE. */ @@ -574,12 +574,12 @@ enum pf_test_memslots { */ static void setup_memslots(struct kvm_vm *vm, struct test_params *p) { - uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type); - uint64_t guest_page_size = vm->page_size; - uint64_t max_gfn = vm_compute_max_gfn(vm); + u64 backing_src_pagesz = get_backing_src_pagesz(p->src_type); + u64 guest_page_size = vm->page_size; + u64 max_gfn = vm_compute_max_gfn(vm); /* Enough for 2M of code when using 4K guest pages. */ - uint64_t code_npages = 512; - uint64_t pt_size, data_size, data_gpa; + u64 code_npages = 512; + u64 pt_size, data_size, data_gpa; /* * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using diff --git a/tools/testing/selftests/kvm/arm64/psci_test.c b/tools/testing/selftests/kvm/arm64/psci_test.c index 98e49f710aef97..e775faf2086805 100644 --- a/tools/testing/selftests/kvm/arm64/psci_test.c +++ b/tools/testing/selftests/kvm/arm64/psci_test.c @@ -22,8 +22,7 @@ #define CPU_ON_ENTRY_ADDR 0xfeedf00dul #define CPU_ON_CONTEXT_ID 0xdeadc0deul -static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, - uint64_t context_id) +static u64 psci_cpu_on(u64 target_cpu, u64 entry_addr, u64 context_id) { struct arm_smccc_res res; @@ -33,8 +32,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, return res.a0; } -static uint64_t psci_affinity_info(uint64_t target_affinity, - uint64_t lowest_affinity_level) +static u64 psci_affinity_info(u64 target_affinity, u64 lowest_affinity_level) { struct arm_smccc_res res; @@ -44,7 +42,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity, return res.a0; } -static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) +static u64 psci_system_suspend(u64 entry_addr, u64 context_id) { struct arm_smccc_res res; @@ -54,7 +52,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) return res.a0; } -static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) +static u64 psci_system_off2(u64 type, u64 cookie) { struct arm_smccc_res res; @@ -63,7 +61,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) return res.a0; } -static uint64_t psci_features(uint32_t func_id) +static u64 psci_features(u32 func_id) { struct arm_smccc_res res; @@ -110,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) static void assert_vcpu_reset(struct kvm_vcpu *vcpu) { - uint64_t obs_pc, obs_x0; + u64 obs_pc, obs_x0; obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0])); @@ -123,9 +121,9 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu) obs_x0, CPU_ON_CONTEXT_ID); } -static void guest_test_cpu_on(uint64_t target_cpu) +static void guest_test_cpu_on(u64 target_cpu) { - uint64_t target_state; + u64 target_state; GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); @@ -142,7 +140,7 @@ static void guest_test_cpu_on(uint64_t target_cpu) static void host_test_cpu_on(void) { struct kvm_vcpu *source, *target; - uint64_t target_mpidr; + u64 target_mpidr; struct kvm_vm *vm; struct ucall uc; @@ -166,7 +164,7 @@ static void host_test_cpu_on(void) static void guest_test_system_suspend(void) { - uint64_t ret; + u64 ret; /* assert that SYSTEM_SUSPEND is discoverable */ GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); @@ -200,7 +198,7 @@ static void host_test_system_suspend(void) static void guest_test_system_off2(void) { - uint64_t ret; + u64 ret; /* assert that SYSTEM_OFF2 is discoverable */ GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) & @@ -238,7 +236,7 @@ static void host_test_system_off2(void) { struct kvm_vcpu *source, *target; struct kvm_mp_state mps; - uint64_t psci_version = 0; + u64 psci_version = 0; int nr_shutdowns = 0; struct kvm_run *run; struct ucall uc; diff --git a/tools/testing/selftests/kvm/arm64/sea_to_user.c b/tools/testing/selftests/kvm/arm64/sea_to_user.c index 573dd790aeb8e2..e96d8982c28b8d 100644 --- a/tools/testing/selftests/kvm/arm64/sea_to_user.c +++ b/tools/testing/selftests/kvm/arm64/sea_to_user.c @@ -51,18 +51,16 @@ #define EINJ_OFFSET 0x01234badUL #define EINJ_GVA ((START_GVA) + (EINJ_OFFSET)) -static vm_paddr_t einj_gpa; +static gpa_t einj_gpa; static void *einj_hva; -static uint64_t einj_hpa; +static u64 einj_hpa; static bool far_invalid; -static uint64_t translate_to_host_paddr(unsigned long vaddr) +static u64 translate_hva_to_hpa(unsigned long hva) { - uint64_t pinfo; - int64_t offset = vaddr / getpagesize() * sizeof(pinfo); + u64 pinfo; + s64 offset = hva / getpagesize() * sizeof(pinfo); int fd; - uint64_t page_addr; - uint64_t paddr; fd = open("/proc/self/pagemap", O_RDONLY); if (fd < 0) @@ -77,12 +75,11 @@ static uint64_t translate_to_host_paddr(unsigned long vaddr) if ((pinfo & PAGE_PRESENT) == 0) ksft_exit_fail_perror("Page not present"); - page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT; - paddr = page_addr + (vaddr & (getpagesize() - 1)); - return paddr; + return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) + + (hva & (getpagesize() - 1)); } -static void write_einj_entry(const char *einj_path, uint64_t val) +static void write_einj_entry(const char *einj_path, u64 val) { char cmd[256] = {0}; FILE *cmdfile = NULL; @@ -96,7 +93,7 @@ static void write_einj_entry(const char *einj_path, uint64_t val) ksft_exit_fail_perror("Failed to write EINJ entry"); } -static void inject_uer(uint64_t paddr) +static void inject_uer(u64 hpa) { if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1) ksft_test_result_skip("EINJ table no available in firmware"); @@ -106,7 +103,7 @@ static void inject_uer(uint64_t paddr) write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER); write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER); - write_einj_entry(EINJ_ADDR, paddr); + write_einj_entry(EINJ_ADDR, hpa); write_einj_entry(EINJ_MASK, ~0x0UL); write_einj_entry(EINJ_NOTRIGGER, 1); write_einj_entry(EINJ_DOIT, 1); @@ -145,10 +142,10 @@ static void setup_sigbus_handler(void) static void guest_code(void) { - uint64_t guest_data; + u64 guest_data; /* Consumes error will cause a SEA. */ - guest_data = *(uint64_t *)EINJ_GVA; + guest_data = *(u64 *)EINJ_GVA; GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n", EINJ_GVA, guest_data); @@ -253,8 +250,8 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) size_t backing_page_size; size_t guest_page_size; size_t alignment; - uint64_t num_guest_pages; - vm_paddr_t start_gpa; + u64 num_guest_pages; + gpa_t start_gpa; enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB; struct kvm_vm *vm; @@ -278,7 +275,7 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) vm_userspace_mem_region_add( /*vm=*/vm, /*src_type=*/src_type, - /*guest_paddr=*/start_gpa, + /*gpa=*/start_gpa, /*slot=*/1, /*npages=*/num_guest_pages, /*flags=*/0); @@ -292,18 +289,18 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) static void vm_inject_memory_uer(struct kvm_vm *vm) { - uint64_t guest_data; + u64 guest_data; einj_gpa = addr_gva2gpa(vm, EINJ_GVA); einj_hva = addr_gva2hva(vm, EINJ_GVA); /* Populate certain data before injecting UER. */ - *(uint64_t *)einj_hva = 0xBAADCAFE; - guest_data = *(uint64_t *)einj_hva; + *(u64 *)einj_hva = 0xBAADCAFE; + guest_data = *(u64 *)einj_hva; ksft_print_msg("Before EINJect: data=%#lx\n", guest_data); - einj_hpa = translate_to_host_paddr((unsigned long)einj_hva); + einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva); ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n", EINJ_GVA, einj_gpa, einj_hva, einj_hpa); diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 3a7e5fe9ae7ab1..7429a1055df561 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -30,20 +30,20 @@ struct reg_ftr_bits { char *name; bool sign; enum ftr_type type; - uint8_t shift; - uint64_t mask; + u8 shift; + u64 mask; /* * For FTR_EXACT, safe_val is used as the exact safe value. * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. */ - int64_t safe_val; + s64 safe_val; /* Allowed to be changed by the host after run */ bool mutable; }; struct test_feature_reg { - uint32_t reg; + u32 reg; const struct reg_ftr_bits *ftr_bits; }; @@ -275,9 +275,9 @@ static void guest_code(void) } /* Return a safe value to a given ftr_bits an ftr value */ -uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) { - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); @@ -329,16 +329,16 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) } /* Return an invalid value to a given ftr_bits an ftr value */ -uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) { - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT: - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE: ftr++; @@ -358,7 +358,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) } else if (ftr != ftr_max) { switch (ftr_bits->type) { case FTR_EXACT: - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE: ftr++; @@ -382,12 +382,12 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) return ftr; } -static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, - const struct reg_ftr_bits *ftr_bits) +static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg, + const struct reg_ftr_bits *ftr_bits) { - uint8_t shift = ftr_bits->shift; - uint64_t mask = ftr_bits->mask; - uint64_t val, new_val, ftr; + u8 shift = ftr_bits->shift; + u64 mask = ftr_bits->mask; + u64 val, new_val, ftr; val = vcpu_get_reg(vcpu, reg); ftr = (val & mask) >> shift; @@ -405,12 +405,12 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, return new_val; } -static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, +static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg, const struct reg_ftr_bits *ftr_bits) { - uint8_t shift = ftr_bits->shift; - uint64_t mask = ftr_bits->mask; - uint64_t val, old_val, ftr; + u8 shift = ftr_bits->shift; + u64 mask = ftr_bits->mask; + u64 val, old_val, ftr; int r; val = vcpu_get_reg(vcpu, reg); @@ -431,7 +431,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, TEST_ASSERT_EQ(val, old_val); } -static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; +static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; #define encoding_to_range_idx(encoding) \ KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \ @@ -441,7 +441,7 @@ static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; @@ -458,8 +458,8 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; - uint32_t reg_id = test_regs[i].reg; - uint64_t reg = KVM_ARM64_SYS_REG(reg_id); + u32 reg_id = test_regs[i].reg; + u64 reg = KVM_ARM64_SYS_REG(reg_id); int idx; /* Get the index to masks array for the idreg */ @@ -489,11 +489,11 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) #define MPAM_IDREG_TEST 6 static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; - uint64_t val; + u64 val; int idx, err; /* @@ -584,13 +584,13 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) #define MTE_IDREG_TEST 1 static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; - uint64_t val; - uint64_t mte; - uint64_t mte_frac; + u64 val; + u64 mte; + u64 mte_frac; int idx, err; val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); @@ -644,7 +644,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); } -static uint64_t reset_mutable_bits(uint32_t id, uint64_t val) +static u64 reset_mutable_bits(u32 id, u64 val) { struct test_feature_reg *reg = NULL; @@ -674,7 +674,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu) struct ucall uc; while (!done) { - uint64_t val; + u64 val; vcpu_run(vcpu); @@ -707,7 +707,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu) static void test_clidr(struct kvm_vcpu *vcpu) { - uint64_t clidr; + u64 clidr; int level; clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1)); @@ -772,10 +772,10 @@ static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu) ksft_test_result_pass("%s\n", __func__); } -static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding) +static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding) { size_t idx = encoding_to_range_idx(encoding); - uint64_t observed; + u64 observed; observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]), @@ -808,7 +808,7 @@ int main(void) struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool aarch64_only; - uint64_t val, el0; + u64 val, el0; int test_cnt, i, j; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); diff --git a/tools/testing/selftests/kvm/arm64/smccc_filter.c b/tools/testing/selftests/kvm/arm64/smccc_filter.c index 1763b9d45400b3..21e41880261b43 100644 --- a/tools/testing/selftests/kvm/arm64/smccc_filter.c +++ b/tools/testing/selftests/kvm/arm64/smccc_filter.c @@ -37,7 +37,7 @@ static bool test_runs_at_el2(void) for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \ conduit <= SMC_INSN; conduit++) -static void guest_main(uint32_t func_id, enum smccc_conduit conduit) +static void guest_main(u32 func_id, enum smccc_conduit conduit) { struct arm_smccc_res res; @@ -49,7 +49,7 @@ static void guest_main(uint32_t func_id, enum smccc_conduit conduit) GUEST_SYNC(res.a0); } -static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, +static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, enum kvm_smccc_filter_action action) { struct kvm_smccc_filter filter = { @@ -62,7 +62,7 @@ static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_fun KVM_ARM_VM_SMCCC_FILTER, &filter); } -static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, +static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, enum kvm_smccc_filter_action action) { int ret = __set_smccc_filter(vm, start, nr_functions, action); @@ -112,7 +112,7 @@ static void test_filter_reserved_range(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm = setup_vm(&vcpu); - uint32_t smc64_fn; + u32 smc64_fn; int r; r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1, @@ -217,7 +217,7 @@ static void test_filter_denied(void) } } -static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id, +static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id, enum smccc_conduit conduit) { struct kvm_run *run = vcpu->run; diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c index 8d6d3a4ae4dbd6..47e34b43afb293 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_init.c +++ b/tools/testing/selftests/kvm/arm64/vgic_init.c @@ -19,7 +19,7 @@ #define NR_VCPUS 4 -#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset) +#define REG_OFFSET(vcpu, offset) (((u64)vcpu << 32) | offset) #define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2) #define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3) @@ -27,10 +27,10 @@ struct vm_gic { struct kvm_vm *vm; int gic_fd; - uint32_t gic_dev_type; + u32 gic_dev_type; }; -static uint64_t max_phys_size; +static u64 max_phys_size; /* * Helpers to access a redistributor register and verify the ioctl() failed or @@ -39,17 +39,17 @@ static uint64_t max_phys_size; static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset, int want, const char *msg) { - uint32_t ignored_val; + u32 ignored_val; int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, REG_OFFSET(vcpu, offset), &ignored_val); TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want); } -static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want, +static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want, const char *msg) { - uint32_t val; + u32 val; kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, REG_OFFSET(vcpu, offset), &val); @@ -71,8 +71,8 @@ static int run_vcpu(struct kvm_vcpu *vcpu) return __vcpu_run(vcpu) ? -errno : 0; } -static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, - uint32_t nr_vcpus, +static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type, + u32 nr_vcpus, struct kvm_vcpu *vcpus[]) { struct vm_gic v; @@ -84,7 +84,7 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, return v; } -static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type) +static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type) { struct vm_gic v; @@ -103,9 +103,9 @@ static void vm_gic_destroy(struct vm_gic *v) } struct vgic_region_attr { - uint64_t attr; - uint64_t size; - uint64_t alignment; + u64 attr; + u64 size; + u64 alignment; }; struct vgic_region_attr gic_v3_dist_region = { @@ -143,7 +143,7 @@ struct vgic_region_attr gic_v2_cpu_region = { static void subtest_dist_rdist(struct vm_gic *v) { int ret; - uint64_t addr; + u64 addr; struct vgic_region_attr rdist; /* CPU interface in GICv2*/ struct vgic_region_attr dist; @@ -223,7 +223,7 @@ static void subtest_dist_rdist(struct vm_gic *v) /* Test the new REDIST region API */ static void subtest_v3_redist_regions(struct vm_gic *v) { - uint64_t addr, expected_addr; + u64 addr, expected_addr; int ret; ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, @@ -332,7 +332,7 @@ static void subtest_v3_redist_regions(struct vm_gic *v) * VGIC KVM device is created and initialized before the secondary CPUs * get created */ -static void test_vgic_then_vcpus(uint32_t gic_dev_type) +static void test_vgic_then_vcpus(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -353,7 +353,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type) } /* All the VCPUs are created before the VGIC KVM device gets initialized */ -static void test_vcpus_then_vgic(uint32_t gic_dev_type) +static void test_vcpus_then_vgic(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -408,7 +408,7 @@ static void test_v3_new_redist_regions(void) struct kvm_vcpu *vcpus[NR_VCPUS]; void *dummy = NULL; struct vm_gic v; - uint64_t addr; + u64 addr; int ret; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); @@ -460,7 +460,7 @@ static void test_v3_new_redist_regions(void) static void test_v3_typer_accesses(void) { struct vm_gic v; - uint64_t addr; + u64 addr; int ret, i; v.vm = vm_create(NR_VCPUS); @@ -518,7 +518,7 @@ static void test_v3_typer_accesses(void) } static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, - uint32_t vcpuids[]) + u32 vcpuids[]) { struct vm_gic v; int i; @@ -544,9 +544,9 @@ static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, */ static void test_v3_last_bit_redist_regions(void) { - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; - uint64_t addr; + u64 addr; v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); @@ -578,9 +578,9 @@ static void test_v3_last_bit_redist_regions(void) /* Test last bit with legacy region */ static void test_v3_last_bit_single_rdist(void) { - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; - uint64_t addr; + u64 addr; v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); @@ -606,7 +606,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; int ret, i; - uint64_t addr; + u64 addr; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus); @@ -638,7 +638,7 @@ static void test_v3_its_region(void) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; - uint64_t addr; + u64 addr; int its_fd, ret; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); @@ -717,11 +717,11 @@ static void test_v3_nassgicap(void) /* * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). */ -int test_kvm_device(uint32_t gic_dev_type) +int test_kvm_device(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; - uint32_t other; + u32 other; int ret; v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); @@ -968,7 +968,7 @@ static void test_v3_sysregs(void) kvm_vm_free(vm); } -void run_tests(uint32_t gic_dev_type) +void run_tests(u32 gic_dev_type) { test_vcpus_then_vgic(gic_dev_type); test_vgic_then_vcpus(gic_dev_type); diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c index 2fb2c7939fe975..5e231998617e74 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_irq.c +++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c @@ -24,12 +24,12 @@ * function. */ struct test_args { - uint32_t nr_irqs; /* number of KVM supported IRQs. */ + u32 nr_irqs; /* number of KVM supported IRQs. */ bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ bool level_sensitive; /* 1 is level, 0 is edge */ int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ - uint32_t shared_data; + u32 shared_data; }; /* @@ -64,15 +64,15 @@ typedef enum { struct kvm_inject_args { kvm_inject_cmd cmd; - uint32_t first_intid; - uint32_t num; + u32 first_intid; + u32 num; int level; bool expect_failure; }; /* Used on the guest side to perform the hypercall. */ -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level, bool expect_failure); +static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, + u32 num, int level, bool expect_failure); /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, @@ -133,8 +133,8 @@ static struct kvm_inject_desc set_active_fns[] = { for_each_supported_inject_fn((args), (t), (f)) /* Shared between the guest main thread and the IRQ handlers. */ -volatile uint64_t irq_handled; -volatile uint32_t irqnr_received[MAX_SPI + 1]; +volatile u64 irq_handled; +volatile u32 irqnr_received[MAX_SPI + 1]; static void reset_stats(void) { @@ -145,25 +145,25 @@ static void reset_stats(void) irqnr_received[i] = 0; } -static uint64_t gic_read_ap1r0(void) +static u64 gic_read_ap1r0(void) { - uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); + u64 reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); dsb(sy); return reg; } -static void gic_write_ap1r0(uint64_t val) +static void gic_write_ap1r0(u64 val) { write_sysreg_s(val, SYS_ICC_AP1R0_EL1); isb(); } -static void guest_set_irq_line(uint32_t intid, uint32_t level); +static void guest_set_irq_line(u32 intid, u32 level); static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) { - uint32_t intid = gic_get_and_ack_irq(); + u32 intid = gic_get_and_ack_irq(); if (intid == IAR_SPURIOUS) return; @@ -189,8 +189,8 @@ static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) GUEST_ASSERT(!gic_irq_get_pending(intid)); } -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level, bool expect_failure) +static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, + u32 num, int level, bool expect_failure) { struct kvm_inject_args args = { .cmd = cmd, @@ -204,7 +204,7 @@ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, #define GUEST_ASSERT_IAR_EMPTY() \ do { \ - uint32_t _intid; \ + u32 _intid; \ _intid = gic_get_and_ack_irq(); \ GUEST_ASSERT(_intid == IAR_SPURIOUS); \ } while (0) @@ -237,13 +237,13 @@ static void reset_priorities(struct test_args *args) gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); } -static void guest_set_irq_line(uint32_t intid, uint32_t level) +static void guest_set_irq_line(u32 intid, u32 level) { kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); } static void test_inject_fail(struct test_args *args, - uint32_t intid, kvm_inject_cmd cmd) + u32 intid, kvm_inject_cmd cmd) { reset_stats(); @@ -255,10 +255,10 @@ static void test_inject_fail(struct test_args *args, } static void guest_inject(struct test_args *args, - uint32_t first_intid, uint32_t num, - kvm_inject_cmd cmd) + u32 first_intid, u32 num, + kvm_inject_cmd cmd) { - uint32_t i; + u32 i; reset_stats(); @@ -292,10 +292,10 @@ static void guest_inject(struct test_args *args, * deactivated yet. */ static void guest_restore_active(struct test_args *args, - uint32_t first_intid, uint32_t num, - kvm_inject_cmd cmd) + u32 first_intid, u32 num, + kvm_inject_cmd cmd) { - uint32_t prio, intid, ap1r; + u32 prio, intid, ap1r; int i; /* @@ -342,9 +342,9 @@ static void guest_restore_active(struct test_args *args, * This function should only be used in test_inject_preemption (with IRQs * masked). */ -static uint32_t wait_for_and_activate_irq(void) +static u32 wait_for_and_activate_irq(void) { - uint32_t intid; + u32 intid; do { asm volatile("wfi" : : : "memory"); @@ -360,11 +360,11 @@ static uint32_t wait_for_and_activate_irq(void) * interrupts for the whole test. */ static void test_inject_preemption(struct test_args *args, - uint32_t first_intid, int num, + u32 first_intid, int num, const unsigned long *exclude, kvm_inject_cmd cmd) { - uint32_t intid, prio, step = KVM_PRIO_STEPS; + u32 intid, prio, step = KVM_PRIO_STEPS; int i; /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs @@ -379,7 +379,7 @@ static void test_inject_preemption(struct test_args *args, local_irq_disable(); for (i = 0; i < num; i++) { - uint32_t tmp; + u32 tmp; intid = i + first_intid; if (exclude && test_bit(i, exclude)) @@ -431,7 +431,7 @@ static void test_inject_preemption(struct test_args *args, static void test_injection(struct test_args *args, struct kvm_inject_desc *f) { - uint32_t nr_irqs = args->nr_irqs; + u32 nr_irqs = args->nr_irqs; if (f->sgi) { guest_inject(args, MIN_SGI, 1, f->cmd); @@ -451,7 +451,7 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f) static void test_injection_failure(struct test_args *args, struct kvm_inject_desc *f) { - uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; + u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; int i; for (i = 0; i < ARRAY_SIZE(bad_intid); i++) @@ -490,7 +490,7 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc * static void guest_code(struct test_args *args) { - uint32_t i, nr_irqs = args->nr_irqs; + u32 i, nr_irqs = args->nr_irqs; bool level_sensitive = args->level_sensitive; struct kvm_inject_desc *f, *inject_fns; @@ -529,8 +529,8 @@ static void guest_code(struct test_args *args) GUEST_DONE(); } -static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, - struct test_args *test_args, bool expect_failure) +static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level, + struct test_args *test_args, bool expect_failure) { int ret; @@ -548,8 +548,8 @@ static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, } } -void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, - bool expect_failure) +void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level, + bool expect_failure) { if (!expect_failure) { kvm_irq_set_level_info(gic_fd, intid, level); @@ -573,17 +573,18 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, } static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, - bool expect_failure) + u32 intid, u32 num, + u32 kvm_max_routes, + bool expect_failure) { struct kvm_irq_routing *routing; int ret; - uint64_t i; + u64 i; assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES); routing = kvm_gsi_routing_create(); - for (i = intid; i < (uint64_t)intid + num; i++) + for (i = intid; i < (u64)intid + num; i++) kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI); if (!expect_failure) { @@ -591,7 +592,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, } else { ret = _kvm_gsi_routing_write(vm, routing); /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ - if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) + if (((u64)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) TEST_ASSERT(ret != 0 && errno == EINVAL, "Bad intid %u did not cause KVM_SET_GSI_ROUTING " "error: rc: %i errno: %i", intid, ret, errno); @@ -602,7 +603,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, } } -static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, +static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, bool expect_failure) { @@ -618,13 +619,13 @@ static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, } static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, - bool expect_failure) + u32 intid, u32 num, u32 kvm_max_routes, + bool expect_failure) { int fd[MAX_SPI]; - uint64_t val; + u64 val; int ret, f; - uint64_t i; + u64 i; /* * There is no way to try injecting an SGI or PPI as the interface @@ -643,29 +644,29 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, * that no actual interrupt was injected for those cases. */ - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) fd[f] = kvm_new_eventfd(); - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { - assert(i <= (uint64_t)UINT_MAX); + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { + assert(i <= (u64)UINT_MAX); kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]); } - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { val = 1; - ret = write(fd[f], &val, sizeof(uint64_t)); - TEST_ASSERT(ret == sizeof(uint64_t), + ret = write(fd[f], &val, sizeof(u64)); + TEST_ASSERT(ret == sizeof(u64), __KVM_SYSCALL_ERROR("write()", ret)); } - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) kvm_close(fd[f]); } /* handles the valid case: intid=0xffffffff num=1 */ #define for_each_intid(first, num, tmp, i) \ for ((tmp) = (i) = (first); \ - (tmp) < (uint64_t)(first) + (uint64_t)(num); \ + (tmp) < (u64)(first) + (u64)(num); \ (tmp)++, (i)++) static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, @@ -673,13 +674,13 @@ static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, struct test_args *test_args) { kvm_inject_cmd cmd = inject_args->cmd; - uint32_t intid = inject_args->first_intid; - uint32_t num = inject_args->num; + u32 intid = inject_args->first_intid; + u32 num = inject_args->num; int level = inject_args->level; bool expect_failure = inject_args->expect_failure; struct kvm_vm *vm = vcpu->vm; - uint64_t tmp; - uint32_t i; + u64 tmp; + u32 i; /* handles the valid case: intid=0xffffffff num=1 */ assert(intid < UINT_MAX - num || num == 1); @@ -731,7 +732,7 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, struct kvm_inject_args *args) { struct kvm_inject_args *kvm_args_hva; - vm_vaddr_t kvm_args_gva; + gva_t kvm_args_gva; kvm_args_gva = uc->args[1]; kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); @@ -745,14 +746,14 @@ static void print_args(struct test_args *args) args->eoi_split); } -static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) +static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split) { struct ucall uc; int gic_fd; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_inject_args inject_args; - vm_vaddr_t args_gva; + gva_t args_gva; struct test_args args = { .nr_irqs = nr_irqs, @@ -770,7 +771,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) vcpu_init_descriptor_tables(vcpu); /* Setup the guest args page (so it gets the args). */ - args_gva = vm_vaddr_alloc_page(vm); + args_gva = vm_alloc_page(vm); memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); vcpu_args_set(vcpu, 1, args_gva); @@ -810,7 +811,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid) gic_set_priority_mask(CPU_PRIO_MASK); if (cpuid == 0) { - uint32_t intid; + u32 intid; local_irq_disable(); @@ -848,7 +849,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid) static void guest_code_group_en(struct test_args *args, int cpuid) { - uint32_t intid; + u32 intid; gic_init(GIC_V3, 2); @@ -896,7 +897,7 @@ static void guest_code_group_en(struct test_args *args, int cpuid) static void guest_code_timer_spi(struct test_args *args, int cpuid) { - uint32_t intid; + u32 intid; u64 val; gic_init(GIC_V3, 2); @@ -986,7 +987,7 @@ static void test_vgic_two_cpus(void *gcode) struct kvm_vcpu *vcpus[2]; struct test_args args = {}; struct kvm_vm *vm; - vm_vaddr_t args_gva; + gva_t args_gva; int gic_fd, ret; vm = vm_create_with_vcpus(2, gcode, vcpus); @@ -996,7 +997,7 @@ static void test_vgic_two_cpus(void *gcode) vcpu_init_descriptor_tables(vcpus[1]); /* Setup the guest args page (so it gets the args). */ - args_gva = vm_vaddr_alloc_page(vm); + args_gva = vm_alloc_page(vm); memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); vcpu_args_set(vcpus[0], 2, args_gva, 0); vcpu_args_set(vcpus[1], 2, args_gva, 1); @@ -1033,7 +1034,7 @@ static void help(const char *name) int main(int argc, char **argv) { - uint32_t nr_irqs = 64; + u32 nr_irqs = 64; bool default_args = true; bool level_sensitive = false; int opt; diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c index e857a605f577a7..d64d434d3f06eb 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c +++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c @@ -23,7 +23,7 @@ #define GIC_LPI_OFFSET 8192 static size_t nr_iterations = 1000; -static vm_paddr_t gpa_base; +static gpa_t gpa_base; static struct kvm_vm *vm; static struct kvm_vcpu **vcpus; @@ -35,14 +35,14 @@ static struct test_data { u32 nr_devices; u32 nr_event_ids; - vm_paddr_t device_table; - vm_paddr_t collection_table; - vm_paddr_t cmdq_base; + gpa_t device_table; + gpa_t collection_table; + gpa_t cmdq_base; void *cmdq_base_va; - vm_paddr_t itt_tables; + gpa_t itt_tables; - vm_paddr_t lpi_prop_table; - vm_paddr_t lpi_pend_tables; + gpa_t lpi_prop_table; + gpa_t lpi_pend_tables; } test_data = { .nr_cpus = 1, .nr_devices = 1, @@ -73,7 +73,7 @@ static void guest_setup_its_mappings(void) /* Round-robin the LPIs to all of the vCPUs in the VM */ coll_id = 0; for (device_id = 0; device_id < nr_devices; device_id++) { - vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K); + gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K); its_send_mapd_cmd(test_data.cmdq_base_va, device_id, itt_base, SZ_64K, true); @@ -188,7 +188,7 @@ static void setup_test_data(void) size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K); u32 nr_devices = test_data.nr_devices; u32 nr_cpus = test_data.nr_cpus; - vm_paddr_t cmdq_base; + gpa_t cmdq_base; test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k, gpa_base, @@ -224,7 +224,7 @@ static void setup_gic(void) static void signal_lpi(u32 device_id, u32 event_id) { - vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; + gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; struct kvm_msi msi = { .address_lo = db_addr, diff --git a/tools/testing/selftests/kvm/arm64/vgic_v5.c b/tools/testing/selftests/kvm/arm64/vgic_v5.c index 3ce6cf37a629f0..d785b660d8476a 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_v5.c +++ b/tools/testing/selftests/kvm/arm64/vgic_v5.c @@ -17,10 +17,10 @@ struct vm_gic { struct kvm_vm *vm; int gic_fd; - uint32_t gic_dev_type; + u32 gic_dev_type; }; -static uint64_t max_phys_size; +static u64 max_phys_size; #define GUEST_CMD_IRQ_CDIA 10 #define GUEST_CMD_IRQ_DIEOI 11 @@ -96,7 +96,7 @@ static void vm_gic_destroy(struct vm_gic *v) kvm_vm_free(v->vm); } -static void test_vgic_v5_ppis(uint32_t gic_dev_type) +static void test_vgic_v5_ppis(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct ucall uc; @@ -173,7 +173,7 @@ static void test_vgic_v5_ppis(uint32_t gic_dev_type) /* * Returns 0 if it's possible to create GIC device of a given type (V5). */ -int test_kvm_device(uint32_t gic_dev_type) +int test_kvm_device(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -199,7 +199,7 @@ int test_kvm_device(uint32_t gic_dev_type) return 0; } -void run_tests(uint32_t gic_dev_type) +void run_tests(u32 gic_dev_type) { pr_info("Test VGICv5 PPIs\n"); test_vgic_v5_ppis(gic_dev_type); diff --git a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c index ae36325c022fb2..22223395969e0f 100644 --- a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c @@ -33,20 +33,20 @@ struct vpmu_vm { static struct vpmu_vm vpmu_vm; struct pmreg_sets { - uint64_t set_reg_id; - uint64_t clr_reg_id; + u64 set_reg_id; + u64 clr_reg_id; }; #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} -static uint64_t get_pmcr_n(uint64_t pmcr) +static u64 get_pmcr_n(u64 pmcr) { return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); } -static uint64_t get_counters_mask(uint64_t n) +static u64 get_counters_mask(u64 n) { - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); if (n) mask |= GENMASK(n - 1, 0); @@ -89,7 +89,7 @@ static inline void write_sel_evtyper(int sel, unsigned long val) static void pmu_disable_reset(void) { - uint64_t pmcr = read_sysreg(pmcr_el0); + u64 pmcr = read_sysreg(pmcr_el0); /* Reset all counters, disabling them */ pmcr &= ~ARMV8_PMU_PMCR_E; @@ -169,7 +169,7 @@ struct pmc_accessor pmc_accessors[] = { #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \ { \ - uint64_t _tval = read_sysreg(regname); \ + u64 _tval = read_sysreg(regname); \ \ if (set_expected) \ __GUEST_ASSERT((_tval & mask), \ @@ -185,7 +185,7 @@ struct pmc_accessor pmc_accessors[] = { * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers * are set or cleared as specified in @set_expected. */ -static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) +static void check_bitmap_pmu_regs(u64 mask, bool set_expected) { GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected); GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected); @@ -207,7 +207,7 @@ static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) */ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) { - uint64_t pmcr_n, test_bit = BIT(pmc_idx); + u64 pmcr_n, test_bit = BIT(pmc_idx); bool set_expected = false; if (set_op) { @@ -232,7 +232,7 @@ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) */ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) { - uint64_t write_data, read_data; + u64 write_data, read_data; /* Disable all PMCs and reset all PMCs to zero. */ pmu_disable_reset(); @@ -287,11 +287,11 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) } #define INVALID_EC (-1ul) -uint64_t expected_ec = INVALID_EC; +u64 expected_ec = INVALID_EC; static void guest_sync_handler(struct ex_regs *regs) { - uint64_t esr, ec; + u64 esr, ec; esr = read_sysreg(esr_el1); ec = ESR_ELx_EC(esr); @@ -351,9 +351,9 @@ static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx) * if reading/writing PMU registers for implemented or unimplemented * counters works as expected. */ -static void guest_code(uint64_t expected_pmcr_n) +static void guest_code(u64 expected_pmcr_n) { - uint64_t pmcr, pmcr_n, unimp_mask; + u64 pmcr, pmcr_n, unimp_mask; int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, @@ -402,12 +402,12 @@ static void guest_code(uint64_t expected_pmcr_n) static void create_vpmu_vm(void *guest_code) { struct kvm_vcpu_init init; - uint8_t pmuver, ec; - uint64_t dfr0, irq = 23; + u8 pmuver, ec; + u64 dfr0, irq = 23; struct kvm_device_attr irq_attr = { .group = KVM_ARM_VCPU_PMU_V3_CTRL, .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - .addr = (uint64_t)&irq, + .addr = (u64)&irq, }; /* The test creates the vpmu_vm multiple times. Ensure a clean state */ @@ -443,7 +443,7 @@ static void destroy_vpmu_vm(void) kvm_vm_free(vpmu_vm.vm); } -static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) +static void run_vcpu(struct kvm_vcpu *vcpu, u64 pmcr_n) { struct ucall uc; @@ -489,9 +489,9 @@ static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, * and run the test. */ -static void run_access_test(uint64_t pmcr_n) +static void run_access_test(u64 pmcr_n) { - uint64_t sp; + u64 sp; struct kvm_vcpu *vcpu; struct kvm_vcpu_init init; @@ -514,7 +514,7 @@ static void run_access_test(uint64_t pmcr_n) aarch64_vcpu_setup(vcpu, &init); vcpu_init_descriptor_tables(vcpu); vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp); - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); run_vcpu(vcpu, pmcr_n); @@ -531,12 +531,12 @@ static struct pmreg_sets validity_check_reg_sets[] = { * Create a VM, and check if KVM handles the userspace accesses of * the PMU register sets in @validity_check_reg_sets[] correctly. */ -static void run_pmregs_validity_test(uint64_t pmcr_n) +static void run_pmregs_validity_test(u64 pmcr_n) { int i; struct kvm_vcpu *vcpu; - uint64_t set_reg_id, clr_reg_id, reg_val; - uint64_t valid_counters_mask, max_counters_mask; + u64 set_reg_id, clr_reg_id, reg_val; + u64 valid_counters_mask, max_counters_mask; test_create_vpmu_vm_with_nr_counters(pmcr_n, false); vcpu = vpmu_vm.vcpu; @@ -588,7 +588,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) * the vCPU to @pmcr_n, which is larger than the host value. * The attempt should fail as @pmcr_n is too big to set for the vCPU. */ -static void run_error_test(uint64_t pmcr_n) +static void run_error_test(u64 pmcr_n) { pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); @@ -600,9 +600,9 @@ static void run_error_test(uint64_t pmcr_n) * Return the default number of implemented PMU event counters excluding * the cycle counter (i.e. PMCR_EL0.N value) for the guest. */ -static uint64_t get_pmcr_n_limit(void) +static u64 get_pmcr_n_limit(void) { - uint64_t pmcr; + u64 pmcr; create_vpmu_vm(guest_code); pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); @@ -624,7 +624,7 @@ static bool kvm_supports_nr_counters_attr(void) int main(void) { - uint64_t i, pmcr_n; + u64 i, pmcr_n; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); TEST_REQUIRE(kvm_supports_vgic_v3()); diff --git a/tools/testing/selftests/kvm/coalesced_io_test.c b/tools/testing/selftests/kvm/coalesced_io_test.c index 60cb2545489983..df4ed5e3877c11 100644 --- a/tools/testing/selftests/kvm/coalesced_io_test.c +++ b/tools/testing/selftests/kvm/coalesced_io_test.c @@ -14,16 +14,16 @@ struct kvm_coalesced_io { struct kvm_coalesced_mmio_ring *ring; - uint32_t ring_size; - uint64_t mmio_gpa; - uint64_t *mmio; + u32 ring_size; + u64 mmio_gpa; + u64 *mmio; /* * x86-only, but define pio_port for all architectures to minimize the * amount of #ifdeffery and complexity, without having to sacrifice * verbose error messages. */ - uint8_t pio_port; + u8 pio_port; }; static struct kvm_coalesced_io kvm_builtin_io_ring; @@ -70,13 +70,13 @@ static void guest_code(struct kvm_coalesced_io *io) static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, struct kvm_coalesced_io *io, - uint32_t ring_start, - uint32_t expected_exit) + u32 ring_start, + u32 expected_exit) { const bool want_pio = expected_exit == KVM_EXIT_IO; struct kvm_coalesced_mmio_ring *ring = io->ring; struct kvm_run *run = vcpu->run; - uint32_t pio_value; + u32 pio_value; WRITE_ONCE(ring->first, ring_start); WRITE_ONCE(ring->last, ring_start); @@ -88,13 +88,13 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, * data_offset is garbage, e.g. an MMIO gpa. */ if (run->exit_reason == KVM_EXIT_IO) - pio_value = *(uint32_t *)((void *)run + run->io.data_offset); + pio_value = *(u32 *)((void *)run + run->io.data_offset); else pio_value = 0; TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write && run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 && - *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || + *(u64 *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port && run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 && pio_value == io->pio_port + io->ring_size - 1)), @@ -105,14 +105,14 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa, (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason, run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other", - run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data, + run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(u64 *)run->mmio.data, run->io.port, run->io.direction, run->io.size, run->io.count, pio_value); } static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, struct kvm_coalesced_io *io, - uint32_t ring_start, - uint32_t expected_exit) + u32 ring_start, + u32 expected_exit) { struct kvm_coalesced_mmio_ring *ring = io->ring; int i; @@ -124,18 +124,18 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, ring->first, ring->last, io->ring_size, ring_start); for (i = 0; i < io->ring_size - 1; i++) { - uint32_t idx = (ring->first + i) % io->ring_size; + u32 idx = (ring->first + i) % io->ring_size; struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx]; #ifdef __x86_64__ if (i & 1) TEST_ASSERT(entry->phys_addr == io->pio_port && entry->len == 4 && entry->pio && - *(uint32_t *)entry->data == io->pio_port + i, + *(u32 *)entry->data == io->pio_port + i, "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x", io->pio_port, io->pio_port + i, i, entry->len, entry->pio ? "PIO" : "MMIO", - entry->phys_addr, *(uint32_t *)entry->data); + entry->phys_addr, *(u32 *)entry->data); else #endif TEST_ASSERT(entry->phys_addr == io->mmio_gpa && @@ -143,12 +143,12 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, "Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx", io->mmio_gpa, io->mmio_gpa + i, i, entry->len, entry->pio ? "PIO" : "MMIO", - entry->phys_addr, *(uint64_t *)entry->data); + entry->phys_addr, *(u64 *)entry->data); } } static void test_coalesced_io(struct kvm_vcpu *vcpu, - struct kvm_coalesced_io *io, uint32_t ring_start) + struct kvm_coalesced_io *io, u32 ring_start) { struct kvm_coalesced_mmio_ring *ring = io->ring; @@ -219,11 +219,11 @@ int main(int argc, char *argv[]) * the MMIO GPA identity mapped in the guest. */ .mmio_gpa = 4ull * SZ_1G, - .mmio = (uint64_t *)(4ull * SZ_1G), + .mmio = (u64 *)(4ull * SZ_1G), .pio_port = 0x80, }; - virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); + virt_map(vm, (u64)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); sync_global_to_guest(vm, kvm_builtin_io_ring); vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring); diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 0202b78f8680af..302c4923d0930f 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -24,7 +24,7 @@ #ifdef __NR_userfaultfd static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static size_t demand_paging_size; static char *guest_data_prototype; @@ -58,7 +58,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, struct uffd_msg *msg) { pid_t tid = syscall(__NR_gettid); - uint64_t addr = msg->arg.pagefault.address; + u64 addr = msg->arg.pagefault.address; struct timespec start; struct timespec ts_diff; int r; @@ -68,7 +68,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { struct uffdio_copy copy; - copy.src = (uint64_t)guest_data_prototype; + copy.src = (u64)guest_data_prototype; copy.dst = addr; copy.len = demand_paging_size; copy.mode = 0; @@ -138,7 +138,7 @@ struct test_params { bool partition_vcpu_memory_access; }; -static void prefault_mem(void *alias, uint64_t len) +static void prefault_mem(void *alias, u64 len) { size_t p; @@ -154,7 +154,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct memstress_vcpu_args *vcpu_args; struct test_params *p = arg; struct uffd_desc **uffd_descs = NULL; - uint64_t uffd_region_size; + u64 uffd_region_size; struct timespec start; struct timespec ts_diff; double vcpu_paging_rate; diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 0a1ea1d1e2d8f4..ef779fa91827c9 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -24,7 +24,7 @@ #define TEST_HOST_LOOP_N 2UL static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static bool run_vcpus_while_disabling_dirty_logging; /* Host variables */ @@ -37,7 +37,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) { struct kvm_vcpu *vcpu = vcpu_args->vcpu; int vcpu_idx = vcpu_args->vcpu_idx; - uint64_t pages_count = 0; + u64 pages_count = 0; struct kvm_run *run; struct timespec start; struct timespec ts_diff; @@ -93,11 +93,11 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) struct test_params { unsigned long iterations; - uint64_t phys_offset; + u64 phys_offset; bool partition_vcpu_memory_access; enum vm_mem_backing_src_type backing_src; int slots; - uint32_t write_percent; + u32 write_percent; bool random_access; }; @@ -106,9 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct test_params *p = arg; struct kvm_vm *vm; unsigned long **bitmaps; - uint64_t guest_num_pages; - uint64_t host_num_pages; - uint64_t pages_per_slot; + u64 guest_num_pages; + u64 host_num_pages; + u64 pages_per_slot; struct timespec start; struct timespec ts_diff; struct timespec get_dirty_log_total = (struct timespec){0}; diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 7627b328f18a31..12446a4b6e8de2 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -74,11 +74,11 @@ * the host. READ/WRITE_ONCE() should also be used with anything * that may change. */ -static uint64_t host_page_size; -static uint64_t guest_page_size; -static uint64_t guest_num_pages; -static uint64_t iteration; -static uint64_t nr_writes; +static u64 host_page_size; +static u64 guest_page_size; +static u64 guest_num_pages; +static u64 iteration; +static u64 nr_writes; static bool vcpu_stop; /* @@ -86,13 +86,13 @@ static bool vcpu_stop; * This will be set to the topmost valid physical address minus * the test memory size. */ -static uint64_t guest_test_phys_mem; +static u64 guest_test_phys_mem; /* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; /* * Continuously write to the first 8 bytes of a random pages within @@ -100,10 +100,10 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; */ static void guest_code(void) { - uint64_t addr; + u64 addr; #ifdef __s390x__ - uint64_t i; + u64 i; /* * On s390x, all pages of a 1M segment are initially marked as dirty @@ -113,7 +113,7 @@ static void guest_code(void) */ for (i = 0; i < guest_num_pages; i++) { addr = guest_test_virt_mem + i * guest_page_size; - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); nr_writes++; } #endif @@ -125,7 +125,7 @@ static void guest_code(void) * guest_page_size; addr = align_down(addr, host_page_size); - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); nr_writes++; } @@ -138,11 +138,11 @@ static bool host_quit; /* Points to the test VM memory region on which we track dirty logs */ static void *host_test_mem; -static uint64_t host_num_pages; +static u64 host_num_pages; /* For statistics only */ -static uint64_t host_dirty_count; -static uint64_t host_clear_count; +static u64 host_dirty_count; +static u64 host_clear_count; /* Whether dirty ring reset is requested, or finished */ static sem_t sem_vcpu_stop; @@ -169,7 +169,7 @@ static bool dirty_ring_vcpu_ring_full; * dirty gfn we've collected, so that if a mismatch of data found later in the * verifying process, we let it pass. */ -static uint64_t dirty_ring_last_page = -1ULL; +static u64 dirty_ring_last_page = -1ULL; /* * In addition to the above, it is possible (especially if this @@ -213,7 +213,7 @@ static uint64_t dirty_ring_last_page = -1ULL; * and also don't fail when it is reported in the next iteration, together with * an outdated iteration count. */ -static uint64_t dirty_ring_prev_iteration_last_page; +static u64 dirty_ring_prev_iteration_last_page; enum log_mode_t { /* Only use KVM_GET_DIRTY_LOG for logging */ @@ -236,7 +236,7 @@ static enum log_mode_t host_log_mode_option = LOG_MODE_ALL; /* Logging mode for current run */ static enum log_mode_t host_log_mode; static pthread_t vcpu_thread; -static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT; +static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT; static bool clear_log_supported(void) { @@ -255,15 +255,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm) } static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *unused) + void *bitmap, u32 num_pages, + u32 *unused) { kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); } static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *unused) + void *bitmap, u32 num_pages, + u32 *unused) { kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); @@ -297,8 +297,8 @@ static bool dirty_ring_supported(void) static void dirty_ring_create_vm_done(struct kvm_vm *vm) { - uint64_t pages; - uint32_t limit; + u64 pages; + u32 limit; /* * We rely on vcpu exit due to full dirty ring state. Adjust @@ -333,12 +333,12 @@ static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); } -static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, - int slot, void *bitmap, - uint32_t num_pages, uint32_t *fetch_index) +static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, + int slot, void *bitmap, + u32 num_pages, u32 *fetch_index) { struct kvm_dirty_gfn *cur; - uint32_t count = 0; + u32 count = 0; while (true) { cur = &dirty_gfns[*fetch_index % test_dirty_ring_count]; @@ -359,10 +359,10 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, } static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx) + void *bitmap, u32 num_pages, + u32 *ring_buf_idx) { - uint32_t count, cleared; + u32 count, cleared; /* Only have one vcpu */ count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), @@ -404,8 +404,8 @@ struct log_mode { void (*create_vm_done)(struct kvm_vm *vm); /* Hook to collect the dirty pages into the bitmap provided */ void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx); + void *bitmap, u32 num_pages, + u32 *ring_buf_idx); /* Hook to call when after each vcpu run */ void (*after_vcpu_run)(struct kvm_vcpu *vcpu); } log_modes[LOG_MODE_NUM] = { @@ -459,8 +459,8 @@ static void log_mode_create_vm_done(struct kvm_vm *vm) } static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx) + void *bitmap, u32 num_pages, + u32 *ring_buf_idx) { struct log_mode *mode = &log_modes[host_log_mode]; @@ -494,11 +494,11 @@ static void *vcpu_worker(void *data) static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap) { - uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0; - uint64_t step = vm_num_host_pages(mode, 1); + u64 page, nr_dirty_pages = 0, nr_clean_pages = 0; + u64 step = vm_num_host_pages(mode, 1); for (page = 0; page < host_num_pages; page += step) { - uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size); + u64 val = *(u64 *)(host_test_mem + page * host_page_size); bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]); /* @@ -575,7 +575,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap) } static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, void *guest_code) + u64 extra_mem_pages, void *guest_code) { struct kvm_vm *vm; @@ -592,7 +592,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, struct test_params { unsigned long iterations; unsigned long interval; - uint64_t phys_offset; + u64 phys_offset; }; static void run_test(enum vm_guest_mode mode, void *arg) @@ -601,7 +601,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_vcpu *vcpu; struct kvm_vm *vm; unsigned long *bmap[2]; - uint32_t ring_buf_idx = 0; + u32 ring_buf_idx = 0; int sem_val; if (!log_mode_supported()) { @@ -667,7 +667,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index f4644c9d2d3b42..216f10644c1aa6 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -216,7 +216,7 @@ static void run_test(struct vcpu_reg_list *c) * since we don't know the capabilities of any new registers. */ for_each_present_blessed_reg(i) { - uint8_t addr[2048 / 8]; + u8 addr[2048 / 8]; struct kvm_one_reg reg = { .id = reg_list->reg[i], .addr = (__u64)&addr, diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index ec7644aae999d4..d6528c6f5e031d 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -171,7 +171,7 @@ static void test_numa_allocation(int fd, size_t total_size) kvm_munmap(mem, total_size); } -static void test_collapse(int fd, uint64_t flags) +static void test_collapse(int fd, u64 flags) { const size_t pmd_size = get_trans_hugepagesz(); void *reserved_addr; @@ -346,7 +346,7 @@ static void test_invalid_punch_hole(int fd, size_t total_size) } static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, - uint64_t guest_memfd_flags) + u64 guest_memfd_flags) { size_t size; int fd; @@ -389,8 +389,8 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) static void test_guest_memfd_flags(struct kvm_vm *vm) { - uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); - uint64_t flag; + u64 valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); + u64 flag; int fd; for (flag = BIT(0); flag; flag <<= 1) { @@ -419,7 +419,7 @@ do { \ #define gmem_test(__test, __vm, __flags) \ __gmem_test(__test, __vm, __flags, page_size * 4) -static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) +static void __test_guest_memfd(struct kvm_vm *vm, u64 flags) { test_create_guest_memfd_multiple(vm); test_create_guest_memfd_invalid_sizes(vm, flags); @@ -452,7 +452,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) static void test_guest_memfd(unsigned long vm_type) { struct kvm_vm *vm = vm_create_barebones_type(vm_type); - uint64_t flags; + u64 flags; test_guest_memfd_flags(vm); @@ -470,7 +470,7 @@ static void test_guest_memfd(unsigned long vm_type) kvm_vm_free(vm); } -static void guest_code(uint8_t *mem, uint64_t size) +static void guest_code(u8 *mem, u64 size) { size_t i; @@ -489,12 +489,12 @@ static void test_guest_memfd_guest(void) * the guest's code, stack, and page tables, and low memory contains * the PCI hole and other MMIO regions that need to be avoided. */ - const uint64_t gpa = SZ_4G; + const gpa_t gpa = SZ_4G; const int slot = 1; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint8_t *mem; + u8 *mem; size_t size; int fd, i; diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c index bcf582852db997..79d3fc326e9177 100644 --- a/tools/testing/selftests/kvm/guest_print_test.c +++ b/tools/testing/selftests/kvm/guest_print_test.c @@ -16,22 +16,22 @@ #include "ucall_common.h" struct guest_vals { - uint64_t a; - uint64_t b; - uint64_t type; + u64 a; + u64 b; + u64 type; }; static struct guest_vals vals; /* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */ #define TYPE_LIST \ -TYPE(test_type_i64, I64, "%ld", int64_t) \ -TYPE(test_type_u64, U64u, "%lu", uint64_t) \ -TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \ -TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \ -TYPE(test_type_u32, U32u, "%u", uint32_t) \ -TYPE(test_type_x32, U32x, "0x%x", uint32_t) \ -TYPE(test_type_X32, U32X, "0x%X", uint32_t) \ +TYPE(test_type_i64, I64, "%ld", s64) \ +TYPE(test_type_u64, U64u, "%lu", u64) \ +TYPE(test_type_x64, U64x, "0x%lx", u64) \ +TYPE(test_type_X64, U64X, "0x%lX", u64) \ +TYPE(test_type_u32, U32u, "%u", u32) \ +TYPE(test_type_x32, U32x, "0x%x", u32) \ +TYPE(test_type_X32, U32X, "0x%X", u32) \ TYPE(test_type_int, INT, "%d", int) \ TYPE(test_type_char, CHAR, "%c", char) \ TYPE(test_type_str, STR, "'%s'", const char *) \ @@ -56,7 +56,7 @@ static void fn(struct kvm_vcpu *vcpu, T a, T b) \ \ snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \ snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \ - vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \ + vals = (struct guest_vals){ (u64)a, (u64)b, TYPE_##ext }; \ sync_global_to_guest(vcpu->vm, vals); \ run_test(vcpu, expected_printf, expected_assert); \ } diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c index 94bd6ed24cf3c5..3147f5c97e9432 100644 --- a/tools/testing/selftests/kvm/hardware_disable_test.c +++ b/tools/testing/selftests/kvm/hardware_disable_test.c @@ -80,7 +80,7 @@ static inline void check_join(pthread_t thread, void **retval) TEST_ASSERT(r == 0, "%s: failed to join thread", __func__); } -static void run_test(uint32_t run) +static void run_test(u32 run) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -88,7 +88,7 @@ static void run_test(uint32_t run) pthread_t threads[VCPU_NUM]; pthread_t throw_away; void *b; - uint32_t i, j; + u32 i, j; CPU_ZERO(&cpu_set); for (i = 0; i < VCPU_NUM; i++) @@ -149,7 +149,7 @@ void wait_for_child_setup(pid_t pid) int main(int argc, char **argv) { - uint32_t i; + u32 i; int s, r; pid_t pid; diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h index e2c4e9f0010f46..a5836d4ab7ee34 100644 --- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h @@ -18,20 +18,20 @@ enum arch_timer { #define CTL_ISTATUS (1 << 2) #define msec_to_cycles(msec) \ - (timer_get_cntfrq() * (uint64_t)(msec) / 1000) + (timer_get_cntfrq() * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - (timer_get_cntfrq() * (uint64_t)(usec) / 1000000) + (timer_get_cntfrq() * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq()) + ((u64)(cycles) * 1000000 / timer_get_cntfrq()) -static inline uint32_t timer_get_cntfrq(void) +static inline u32 timer_get_cntfrq(void) { return read_sysreg(cntfrq_el0); } -static inline uint64_t timer_get_cntct(enum arch_timer timer) +static inline u64 timer_get_cntct(enum arch_timer timer) { isb(); @@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer) return 0; } -static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) +static inline void timer_set_cval(enum arch_timer timer, u64 cval) { switch (timer) { case VIRTUAL: @@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) isb(); } -static inline uint64_t timer_get_cval(enum arch_timer timer) +static inline u64 timer_get_cval(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer) return 0; } -static inline void timer_set_tval(enum arch_timer timer, int32_t tval) +static inline void timer_set_tval(enum arch_timer timer, s32 tval) { switch (timer) { case VIRTUAL: @@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval) isb(); } -static inline int32_t timer_get_tval(enum arch_timer timer) +static inline s32 timer_get_tval(enum arch_timer timer) { isb(); switch (timer) { @@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer) return 0; } -static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) +static inline void timer_set_ctl(enum arch_timer timer, u32 ctl) { switch (timer) { case VIRTUAL: @@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) isb(); } -static inline uint32_t timer_get_ctl(enum arch_timer timer) +static inline u32 timer_get_ctl(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -142,15 +142,15 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer) return 0; } -static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec) { - uint64_t now_ct = timer_get_cntct(timer); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cntct(timer); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cval(timer, next_ct); } -static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec) { timer_set_tval(timer, msec_to_cycles(msec)); } diff --git a/tools/testing/selftests/kvm/include/arm64/delay.h b/tools/testing/selftests/kvm/include/arm64/delay.h index 329e4f5079ea55..6a5d4634af2c6f 100644 --- a/tools/testing/selftests/kvm/include/arm64/delay.h +++ b/tools/testing/selftests/kvm/include/arm64/delay.h @@ -8,10 +8,10 @@ #include "arch_timer.h" -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { enum arch_timer timer = VIRTUAL; - uint64_t start = timer_get_cntct(timer); + u64 start = timer_get_cntct(timer); while ((timer_get_cntct(timer) - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h index cc7a7f34ed3772..615745093c9821 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic.h +++ b/tools/testing/selftests/kvm/include/arm64/gic.h @@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid); * split is true, EOI drops the priority and deactivates the interrupt. */ void gic_set_eoi_split(bool split); -void gic_set_priority_mask(uint64_t mask); -void gic_set_priority(uint32_t intid, uint32_t prio); +void gic_set_priority_mask(u64 mask); +void gic_set_priority(u32 intid, u32 prio); void gic_irq_set_active(unsigned int intid); void gic_irq_clear_active(unsigned int intid); bool gic_irq_get_active(unsigned int intid); @@ -59,7 +59,7 @@ bool gic_irq_get_pending(unsigned int intid); void gic_irq_set_config(unsigned int intid, bool is_edge); void gic_irq_set_group(unsigned int intid, bool group); -void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, - vm_paddr_t pend_table); +void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, + gpa_t pend_table); #endif /* SELFTEST_KVM_GIC_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h index 58feef3eb386cb..a43a407e2d5c13 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h +++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h @@ -5,11 +5,10 @@ #include -void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, - vm_paddr_t device_tbl, size_t device_tbl_sz, - vm_paddr_t cmdq, size_t cmdq_size); +void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size); -void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, +void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, size_t itt_size, bool valid); void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id, diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h index ac97a1c436fc49..b8a902ba8573da 100644 --- a/tools/testing/selftests/kvm/include/arm64/processor.h +++ b/tools/testing/selftests/kvm/include/arm64/processor.h @@ -128,7 +128,7 @@ #define PTE_ADDR_51_50_LPA2_SHIFT 8 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init, void *guest_code); struct ex_regs { @@ -167,8 +167,8 @@ enum { (v) == VECTOR_SYNC_LOWER_64 || \ (v) == VECTOR_SYNC_LOWER_32) -void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, - uint32_t *ipa16k, uint32_t *ipa64k); +void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, + u32 *ipa16k, u32 *ipa64k); void vm_init_descriptor_tables(struct kvm_vm *vm); void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); @@ -179,8 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); -uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); +u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level); +u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva); static inline void cpu_relax(void) { @@ -287,9 +287,9 @@ struct arm_smccc_res { * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /** * smccc_smc - Invoke a SMCCC function using the smc conduit @@ -298,9 +298,9 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_smc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /* Execute a Wait For Interrupt instruction. */ void wfi(void); diff --git a/tools/testing/selftests/kvm/include/arm64/ucall.h b/tools/testing/selftests/kvm/include/arm64/ucall.h index 4ec801f37f00e7..2210d3d94c40c4 100644 --- a/tools/testing/selftests/kvm/include/arm64/ucall.h +++ b/tools/testing/selftests/kvm/include/arm64/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h index 688beccc94366c..1f8b04373987cc 100644 --- a/tools/testing/selftests/kvm/include/arm64/vgic.h +++ b/tools/testing/selftests/kvm/include/arm64/vgic.h @@ -11,27 +11,27 @@ #include "kvm_util.h" #define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \ - (((uint64_t)(count) << 52) | \ - ((uint64_t)((base) >> 16) << 16) | \ - ((uint64_t)(flags) << 12) | \ + (((u64)(count) << 52) | \ + ((u64)((base) >> 16) << 16) | \ + ((u64)(flags) << 12) | \ index) bool kvm_supports_vgic_v3(void); -int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); void __vgic_v3_init(int fd); -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); #define VGIC_MAX_RESERVED 1023 -void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); -int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); +void kvm_irq_set_level_info(int gic_fd, u32 intid, int level); +int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level); -void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); -int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); +void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); +int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); /* The vcpu arg only applies to private interrupts. */ -void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); -void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); #define KVM_IRQCHIP_NUM_PINS (1020 - 32) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index f861242b4ae806..2ecaaa0e996540 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -58,7 +58,7 @@ struct kvm_binary_stats { struct kvm_vcpu { struct list_head list; - uint32_t id; + u32 id; int fd; struct kvm_vm *vm; struct kvm_run *run; @@ -70,8 +70,8 @@ struct kvm_vcpu { #endif struct kvm_binary_stats stats; struct kvm_dirty_gfn *dirty_gfns; - uint32_t fetch_index; - uint32_t dirty_gfns_count; + u32 fetch_index; + u32 dirty_gfns_count; }; struct userspace_mem_regions { @@ -90,7 +90,7 @@ enum kvm_mem_region_type { struct kvm_mmu { bool pgd_created; - uint64_t pgd; + u64 pgd; int pgtable_levels; struct kvm_mmu_arch arch; @@ -105,16 +105,16 @@ struct kvm_vm { unsigned int page_shift; unsigned int pa_bits; unsigned int va_bits; - uint64_t max_gfn; + u64 max_gfn; struct list_head vcpus; struct userspace_mem_regions regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; - vm_paddr_t ucall_mmio_addr; - vm_vaddr_t handlers; - uint32_t dirty_ring_size; - uint64_t gpa_tag_mask; + gpa_t ucall_mmio_addr; + gva_t handlers; + u32 dirty_ring_size; + gpa_t gpa_tag_mask; /* * "mmu" is the guest's stage-1, with a short name because the vast @@ -132,7 +132,7 @@ struct kvm_vm { * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] * memslot. */ - uint32_t memslots[NR_MEM_REGIONS]; + u32 memslots[NR_MEM_REGIONS]; }; struct vcpu_reg_sublist { @@ -164,7 +164,7 @@ struct vcpu_reg_list { else struct userspace_mem_region * -memslot2region(struct kvm_vm *vm, uint32_t memslot); +memslot2region(struct kvm_vm *vm, u32 memslot); static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, enum kvm_mem_region_type type) @@ -213,13 +213,13 @@ enum vm_guest_mode { }; struct vm_shape { - uint32_t type; - uint8_t mode; - uint8_t pad0; - uint16_t pad1; + u32 type; + u8 mode; + u8 pad0; + u16 pad1; }; -kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); +kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64)); #define VM_TYPE_DEFAULT 0 @@ -404,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap) return ret; } -static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) +static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) + +static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, - uint64_t size, uint64_t attributes) +static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa, + u64 size, u64 attributes) { struct kvm_memory_attributes attr = { .attributes = attributes, @@ -438,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, } -static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); } -static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, 0); } -void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, +void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size, bool punch_hole); -static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, true); } -static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, false); } -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); -const char *vm_guest_mode_string(uint32_t i); +void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size); +const char *vm_guest_mode_string(u32 i); void kvm_vm_free(struct kvm_vm *vmp); void kvm_vm_restart(struct kvm_vm *vmp); @@ -474,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp); void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); int kvm_memfd_alloc(size_t size, bool hugepages); -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent); static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) { @@ -484,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) } static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, - uint64_t first_page, uint32_t num_pages) + u64 first_page, u32 num_pages) { struct kvm_clear_dirty_log args = { .dirty_bitmap = log, @@ -496,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); } -static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) +static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) { return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); } static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -515,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, } static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -535,8 +536,8 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm) return fd; } -static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, + u32 flags) { struct kvm_irqfd irqfd = { .fd = eventfd, @@ -548,20 +549,19 @@ static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, return __vm_ioctl(vm, KVM_IRQFD, &irqfd); } -static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags) { int ret = __kvm_irqfd(vm, gsi, eventfd, flags); TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); } -static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, 0); } -static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); } @@ -610,15 +610,15 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc } void read_stat_data(int stats_fd, struct kvm_stats_header *header, - struct kvm_stats_desc *desc, uint64_t *data, + struct kvm_stats_desc *desc, u64 *data, size_t max_elements); void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, - uint64_t *data, size_t max_elements); + u64 *data, size_t max_elements); #define __get_stat(stats, stat) \ ({ \ - uint64_t data; \ + u64 data; \ \ kvm_get_stat(stats, #stat, &data, 1); \ data; \ @@ -664,8 +664,8 @@ static inline bool is_smt_on(void) void vm_create_irqchip(struct kvm_vm *vm); -static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { struct kvm_create_guest_memfd guest_memfd = { .size = size, @@ -675,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); } -static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { int fd = __vm_create_guest_memfd(vm, size, flags); @@ -684,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return fd; } -void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); -int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); +void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); +int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); void vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, - uint32_t flags); + gpa_t gpa, u32 slot, u64 npages, u32 flags); void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, - int guest_memfd_fd, uint64_t guest_memfd_offset); + gpa_t gpa, u32 slot, u64 npages, u32 flags, + int guest_memfd_fd, u64 guest_memfd_offset); #ifndef vm_arch_has_protected_memory static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) @@ -710,36 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) } #endif -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); -struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); -void vm_populate_vaddr_bitmap(struct kvm_vm *vm); -vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); -vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); - -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, +void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags); +void vm_mem_region_reload(struct kvm_vm *vm, u32 slot); +void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa); +void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); +void vm_populate_gva_bitmap(struct kvm_vm *vm); +gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages); +gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); +gva_t vm_alloc_page(struct kvm_vm *vm); + +void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages); -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); +void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); +void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); +gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); #ifndef vcpu_arch_put_guest #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) #endif -static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) +static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) { return gpa & ~vm->gpa_tag_mask; } @@ -755,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu) void vcpu_run_complete_io(struct kvm_vcpu *vcpu); struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); -static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, - uint64_t arg0) +static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap, + u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; @@ -811,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); } -static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)addr }; return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); } -static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); } -static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) + +static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id) { - uint64_t val; - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + u64 val; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); return val; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); @@ -880,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) return fd; } -int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); +int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr); -static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr) { int ret = __kvm_has_device_attr(dev_fd, group, attr); TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); } -int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_get(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_get(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_get(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); } -int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_set(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_set(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_set(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); } -static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { return __kvm_has_device_attr(vcpu->fd, group, attr); } -static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { kvm_has_device_attr(vcpu->fd, group, attr); } -static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_set(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_set(vcpu->fd, group, attr, val); } -int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); -int __kvm_create_device(struct kvm_vm *vm, uint64_t type); +int __kvm_test_create_device(struct kvm_vm *vm, u64 type); +int __kvm_create_device(struct kvm_vm *vm, u64 type); -static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) +static inline int kvm_create_device(struct kvm_vm *vm, u64 type) { int fd = __kvm_create_device(vm, type); @@ -964,7 +964,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * Input Args: * vcpu - vCPU * num - number of arguments - * ... - arguments, each of type uint64_t + * ... - arguments, each of type u64 * * Output Args: None * @@ -972,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * * Sets the first @num input parameters for the function at @vcpu's entry point, * per the C calling convention of the architecture, to the values given as - * variable args. Each of the variable args is expected to be of type uint64_t. + * variable args. Each of the variable args is expected to be of type u64. * The maximum @num can be is specific to the architecture. */ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); +void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); +int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); #define KVM_MAX_IRQ_ROUTES 4096 struct kvm_irq_routing *kvm_gsi_routing_create(void); void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin); + u32 gsi, u32 pin); int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); const char *exit_reason_str(unsigned int exit_reason); -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot); -vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot, - bool protected); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); +gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot); +gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa, + u32 memslot, bool protected); +gpa_t vm_alloc_page_table(struct kvm_vm *vm); -static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot) +static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + gpa_t min_gpa, u32 memslot) { /* * By default, allocate memory as protected for VMs that support * protected memory, as the majority of memory for such VMs is * protected, i.e. using shared memory is effectively opt-in. */ - return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, + return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, vm_arch_has_protected_memory(vm)); } @@ -1016,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, * calculate the amount of memory needed for per-vCPU data, e.g. stacks. */ struct kvm_vm *____vm_create(struct vm_shape shape); -struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, - uint64_t nr_extra_pages); +struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, + u64 nr_extra_pages); static inline struct kvm_vm *vm_create_barebones(void) { @@ -1034,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) return ____vm_create(shape); } -static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) +static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus) { return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); } -struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, - uint64_t extra_mem_pages, +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, + u64 extra_mem_pages, void *guest_code, struct kvm_vcpu *vcpus[]); -static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, +static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus, void *guest_code, struct kvm_vcpu *vcpus[]) { @@ -1054,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code); /* @@ -1062,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, * additional pages of guest memory. Returns the VM and vCPU (via out param). */ static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code) { return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, @@ -1084,7 +1082,7 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); -void kvm_set_files_rlimit(uint32_t nr_vcpus); +void kvm_set_files_rlimit(u32 nr_vcpus); int __pin_task_to_cpu(pthread_t task, int cpu); @@ -1115,7 +1113,7 @@ static inline int pin_self_to_any_cpu(void) } void kvm_print_vcpu_pinning_help(void); -void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], +void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], int nr_vcpus); unsigned long vm_compute_max_gfn(struct kvm_vm *vm); @@ -1131,12 +1129,12 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) } #define sync_global_to_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(_p, &(g), sizeof(g)); \ }) #define sync_global_from_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(&(g), _p, sizeof(g)); \ }) @@ -1147,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) * undesirable to change the host's copy of the global. */ #define write_guest_global(vm, g, val) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ typeof(g) _val = val; \ \ memcpy(_p, &(_val), sizeof(g)); \ @@ -1156,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent); + u8 indent); static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent) + u8 indent) { vcpu_arch_dump(stream, vcpu, indent); } @@ -1171,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, * vm - Virtual Machine * vcpu_id - The id of the VCPU to add to the VM. */ -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); -static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, void *guest_code) { struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); @@ -1185,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, } /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id); static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, - uint32_t vcpu_id) + u32 vcpu_id) { return vm_arch_vcpu_recreate(vm, vcpu_id); } @@ -1203,27 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm) } /* - * VM Virtual Page Map - * - * Input Args: - * vm - Virtual Machine - * vaddr - VM Virtual Address - * paddr - VM Physical Address - * memslot - Memory region slot for new virtual translation tables - * - * Output Args: None - * - * Return: None - * * Within @vm, creates a virtual translation for the page starting - * at @vaddr to the page starting at @paddr. + * at @gva to the page starting at @gpa. */ -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa); -static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - virt_arch_pg_map(vm, vaddr, paddr); - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + virt_arch_pg_map(vm, gva, gpa); + sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); } @@ -1242,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr * Returns the VM physical address of the translated VM virtual * address given by @gva. */ -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); -static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) { return addr_arch_gva2gpa(vm, gva); } @@ -1264,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) * Dumps to the FILE stream given by @stream, the contents of all the * virtual translation tables for the VM given by @vm. */ -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent); -static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { virt_arch_dump(stream, vm, indent); } @@ -1277,7 +1263,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); } -static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v) +static inline u64 vm_page_align(struct kvm_vm *vm, u64 v) { return (v + vm->page_size - 1) & ~(vm->page_size - 1); } @@ -1293,9 +1279,9 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus); void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); void kvm_arch_vm_release(struct kvm_vm *vm); -bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); +bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa); -uint32_t guest_get_vcpuid(void); +u32 guest_get_vcpuid(void); bool kvm_arch_has_default_irqchip(void); diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h index 0366e9bce7f936..ed0087e316742e 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_types.h +++ b/tools/testing/selftests/kvm/include/kvm_util_types.h @@ -2,6 +2,8 @@ #ifndef SELFTEST_KVM_UTIL_TYPES_H #define SELFTEST_KVM_UTIL_TYPES_H +#include + /* * Provide a version of static_assert() that is guaranteed to have an optional * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE @@ -14,9 +16,9 @@ #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) -typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ -typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ +typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */ +typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */ -#define INVALID_GPA (~(uint64_t)0) +#define INVALID_GPA (~(u64)0) #endif /* SELFTEST_KVM_UTIL_TYPES_H */ diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h index 2ed106b32c81b8..3888aeeb352488 100644 --- a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h +++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h @@ -70,9 +70,9 @@ static inline void timer_set_next_cmp_ms(unsigned int msec, bool period) csr_write(val, LOONGARCH_CSR_TCFG); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/loongarch/ucall.h b/tools/testing/selftests/kvm/include/loongarch/ucall.h index 4ec801f37f00e7..2210d3d94c40c4 100644 --- a/tools/testing/selftests/kvm/include/loongarch/ucall.h +++ b/tools/testing/selftests/kvm/include/loongarch/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h index 9071eb6dea60a6..0d1d6230cc051b 100644 --- a/tools/testing/selftests/kvm/include/memstress.h +++ b/tools/testing/selftests/kvm/include/memstress.h @@ -20,9 +20,9 @@ #define MEMSTRESS_MEM_SLOT_INDEX 1 struct memstress_vcpu_args { - uint64_t gpa; - uint64_t gva; - uint64_t pages; + gpa_t gpa; + gva_t gva; + u64 pages; /* Only used by the host userspace part of the vCPU thread */ struct kvm_vcpu *vcpu; @@ -32,11 +32,11 @@ struct memstress_vcpu_args { struct memstress_args { struct kvm_vm *vm; /* The starting address and size of the guest test region. */ - uint64_t gpa; - uint64_t size; - uint64_t guest_page_size; - uint32_t random_seed; - uint32_t write_percent; + gpa_t gpa; + u64 size; + u64 guest_page_size; + u32 random_seed; + u32 write_percent; /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ bool nested; @@ -45,7 +45,7 @@ struct memstress_args { /* True if all vCPUs are pinned to pCPUs */ bool pin_vcpus; /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ - uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; + u32 vcpu_to_pcpu[KVM_MAX_VCPUS]; /* Test is done, stop running vCPUs. */ bool stop_vcpus; @@ -56,27 +56,27 @@ struct memstress_args { extern struct memstress_args memstress_args; struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, - uint64_t vcpu_memory_bytes, int slots, + u64 vcpu_memory_bytes, int slots, enum vm_mem_backing_src_type backing_src, bool partition_vcpu_memory_access); void memstress_destroy_vm(struct kvm_vm *vm); -void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); +void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent); void memstress_set_random_access(struct kvm_vm *vm, bool random_access); void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); void memstress_join_vcpu_threads(int vcpus); -void memstress_guest_code(uint32_t vcpu_id); +void memstress_guest_code(u32 vcpu_id); -uint64_t memstress_nested_pages(int nr_vcpus); +u64 memstress_nested_pages(int nr_vcpus); void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots); void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], - int slots, uint64_t pages_per_slot); -unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot); + int slots, u64 pages_per_slot); +unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot); void memstress_free_bitmaps(unsigned long *bitmaps[], int slots); #endif /* SELFTEST_KVM_MEMSTRESS_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h index 225d81dad064fc..28ffc014da2ae1 100644 --- a/tools/testing/selftests/kvm/include/riscv/arch_timer.h +++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h @@ -14,25 +14,25 @@ static unsigned long timer_freq; #define msec_to_cycles(msec) \ - ((timer_freq) * (uint64_t)(msec) / 1000) + ((timer_freq) * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - ((timer_freq) * (uint64_t)(usec) / 1000000) + ((timer_freq) * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / (timer_freq)) + ((u64)(cycles) * 1000000 / (timer_freq)) -static inline uint64_t timer_get_cycles(void) +static inline u64 timer_get_cycles(void) { return csr_read(CSR_TIME); } -static inline void timer_set_cmp(uint64_t cval) +static inline void timer_set_cmp(u64 cval) { csr_write(CSR_STIMECMP, cval); } -static inline uint64_t timer_get_cmp(void) +static inline u64 timer_get_cmp(void) { return csr_read(CSR_STIMECMP); } @@ -47,17 +47,17 @@ static inline void timer_irq_disable(void) csr_clear(CSR_SIE, IE_TIE); } -static inline void timer_set_next_cmp_ms(uint32_t msec) +static inline void timer_set_next_cmp_ms(u32 msec) { - uint64_t now_ct = timer_get_cycles(); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cycles(); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cmp(next_ct); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index 4dade8c4d18ef2..e3acf2ae9881ea 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -25,8 +25,7 @@ #define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) #define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) -static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, - uint64_t idx, uint64_t size) +static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size) { return KVM_REG_RISCV | type | subtype | idx | size; } @@ -62,14 +61,14 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, KVM_REG_RISCV_SBI_SINGLE, \ idx, KVM_REG_SIZE_ULONG) -bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext); -static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) +static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext) { return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); } -static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) +static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext) { return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); } diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h index a695ae36f3e0de..2de7c6a3609689 100644 --- a/tools/testing/selftests/kvm/include/riscv/ucall.h +++ b/tools/testing/selftests/kvm/include/riscv/ucall.h @@ -7,11 +7,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, KVM_RISCV_SELFTESTS_SBI_UCALL, diff --git a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h index b0ed71302722ae..6deaf18fec22e2 100644 --- a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h +++ b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h @@ -8,6 +8,6 @@ #ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER #define SELFTEST_KVM_DIAG318_TEST_HANDLER -uint64_t get_diag318_info(void); +u64 get_diag318_info(void); #endif diff --git a/tools/testing/selftests/kvm/include/s390/facility.h b/tools/testing/selftests/kvm/include/s390/facility.h index 00a1ced6538b2b..41a26574266689 100644 --- a/tools/testing/selftests/kvm/include/s390/facility.h +++ b/tools/testing/selftests/kvm/include/s390/facility.h @@ -16,7 +16,7 @@ /* alt_stfle_fac_list[16] + stfle_fac_list[16] */ #define NB_STFL_DOUBLEWORDS 32 -extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; +extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; extern bool stfle_flag; static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) @@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -static inline void stfle(uint64_t *fac, unsigned int nb_doublewords) +static inline void stfle(u64 *fac, unsigned int nb_doublewords) { register unsigned long r0 asm("0") = nb_doublewords - 1; diff --git a/tools/testing/selftests/kvm/include/s390/ucall.h b/tools/testing/selftests/kvm/include/s390/ucall.h index 8035a872a351bb..3907d629304f8b 100644 --- a/tools/testing/selftests/kvm/include/s390/ucall.h +++ b/tools/testing/selftests/kvm/include/s390/ucall.h @@ -6,11 +6,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h index bc760761e1a320..e027e5790946f1 100644 --- a/tools/testing/selftests/kvm/include/sparsebit.h +++ b/tools/testing/selftests/kvm/include/sparsebit.h @@ -6,7 +6,7 @@ * * Header file that describes API to the sparsebit library. * This library provides a memory efficient means of storing - * the settings of bits indexed via a uint64_t. Memory usage + * the settings of bits indexed via a u64. Memory usage * is reasonable, significantly less than (2^64 / 8) bytes, as * long as bits that are mostly set or mostly cleared are close * to each other. This library is efficient in memory usage @@ -25,8 +25,8 @@ extern "C" { #endif struct sparsebit; -typedef uint64_t sparsebit_idx_t; -typedef uint64_t sparsebit_num_t; +typedef u64 sparsebit_idx_t; +typedef u64 sparsebit_num_t; struct sparsebit *sparsebit_alloc(void); void sparsebit_free(struct sparsebit **sbitp); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index b4872ba8ed1245..d9b433b834f1b6 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -22,6 +22,8 @@ #include #include "kselftest.h" +#include + #define msecs_to_usecs(msec) ((msec) * 1000ULL) static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; } @@ -99,25 +101,25 @@ do { \ size_t parse_size(const char *size); -int64_t timespec_to_ns(struct timespec ts); -struct timespec timespec_add_ns(struct timespec ts, int64_t ns); +s64 timespec_to_ns(struct timespec ts); +struct timespec timespec_add_ns(struct timespec ts, s64 ns); struct timespec timespec_add(struct timespec ts1, struct timespec ts2); struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); struct timespec timespec_elapsed(struct timespec start); struct timespec timespec_div(struct timespec ts, int divisor); struct guest_random_state { - uint32_t seed; + u32 seed; }; -extern uint32_t guest_random_seed; +extern u32 guest_random_seed; extern struct guest_random_state guest_rng; -struct guest_random_state new_guest_random_state(uint32_t seed); -uint32_t guest_random_u32(struct guest_random_state *state); +struct guest_random_state new_guest_random_state(u32 seed); +u32 guest_random_u32(struct guest_random_state *state); static inline bool __guest_random_bool(struct guest_random_state *state, - uint8_t percent) + u8 percent) { return (guest_random_u32(state) % 100) < percent; } @@ -127,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state) return __guest_random_bool(state, 50); } -static inline uint64_t guest_random_u64(struct guest_random_state *state) +static inline u64 guest_random_u64(struct guest_random_state *state) { - return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state); + return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state); } enum vm_mem_backing_src_type { @@ -158,7 +160,7 @@ enum vm_mem_backing_src_type { struct vm_mem_backing_src_alias { const char *name; - uint32_t flag; + u32 flag; }; #define MIN_RUN_DELAY_NS 200000UL @@ -166,9 +168,9 @@ struct vm_mem_backing_src_alias { bool thp_configured(void); size_t get_trans_hugepagesz(void); size_t get_def_hugetlb_pagesz(void); -const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); -size_t get_backing_src_pagesz(uint32_t i); -bool is_backing_src_hugetlb(uint32_t i); +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i); +size_t get_backing_src_pagesz(u32 i); +bool is_backing_src_hugetlb(u32 i); void backing_src_help(const char *flag); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); long get_run_delay(void); @@ -189,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t) } /* Aligns x up to the next multiple of size. Size must be a power of 2. */ -static inline uint64_t align_up(uint64_t x, uint64_t size) +static inline u64 align_up(u64 x, u64 size) { - uint64_t mask = size - 1; + u64 mask = size - 1; TEST_ASSERT(size != 0 && !(size & (size - 1)), "size not a power of 2: %lu", size); return ((x + mask) & ~mask); } -static inline uint64_t align_down(uint64_t x, uint64_t size) +static inline u64 align_down(u64 x, u64 size) { - uint64_t x_aligned_up = align_up(x, size); + u64 x_aligned_up = align_up(x, size); if (x == x_aligned_up) return x; @@ -215,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size) int atoi_paranoid(const char *num_str); -static inline uint32_t atoi_positive(const char *name, const char *num_str) +static inline u32 atoi_positive(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); @@ -223,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str) return num; } -static inline uint32_t atoi_non_negative(const char *name, const char *num_str) +static inline u32 atoi_non_negative(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h index 9b6edaafe6d49e..b7d5d2c84701c5 100644 --- a/tools/testing/selftests/kvm/include/timer_test.h +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -18,21 +18,21 @@ /* Timer test cmdline parameters */ struct test_args { - uint32_t nr_vcpus; - uint32_t nr_iter; - uint32_t timer_period_ms; - uint32_t migration_freq_ms; - uint32_t timer_err_margin_us; + u32 nr_vcpus; + u32 nr_iter; + u32 timer_period_ms; + u32 migration_freq_ms; + u32 timer_err_margin_us; /* Members of struct kvm_arm_counter_offset */ - uint64_t counter_offset; - uint64_t reserved; + u64 counter_offset; + u64 reserved; }; /* Shared variables between host and guest */ struct test_vcpu_shared_data { - uint32_t nr_iter; + u32 nr_iter; int guest_stage; - uint64_t xcnt; + u64 xcnt; }; extern struct test_args test_args; diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index d9d6581b8d4f22..cbdcb0a50c4f9a 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -21,26 +21,26 @@ enum { #define UCALL_BUFFER_LEN 1024 struct ucall { - uint64_t cmd; - uint64_t args[UCALL_MAX_ARGS]; + u64 cmd; + u64 args[UCALL_MAX_ARGS]; char buffer[UCALL_BUFFER_LEN]; /* Host virtual address of this struct. */ struct ucall *hva; }; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -void ucall_arch_do_ucall(vm_vaddr_t uc); +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa); +void ucall_arch_do_ucall(gva_t uc); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); -void ucall(uint64_t cmd, int nargs, ...); -__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...); -__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp, +void ucall(u64 cmd, int nargs, ...); +__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...); +__printf(5, 6) void ucall_assert(u64 cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...); -uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); -void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -int ucall_nr_pages_required(uint64_t page_size); +u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); +void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa); +int ucall_nr_pages_required(u64 page_size); /* * Perform userspace call without any associated data. This bare call avoids @@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size); * the full ucall() are problematic and/or unwanted. Note, this will come out * as UCALL_NONE on the backend. */ -#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL) +#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL) #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h index 60f7f9d435dc2a..0bc1dc16600e05 100644 --- a/tools/testing/selftests/kvm/include/userfaultfd_util.h +++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h @@ -25,7 +25,7 @@ struct uffd_reader_args { struct uffd_desc { int uffd; - uint64_t num_readers; + u64 num_readers; /* Holds the write ends of the pipes for killing the readers. */ int *pipefds; pthread_t *readers; @@ -33,8 +33,8 @@ struct uffd_desc { }; struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - void *hva, uint64_t len, - uint64_t num_readers, + void *hva, u64 len, + u64 num_readers, uffd_handler_t handler); void uffd_stop_demand_paging(struct uffd_desc *uffd); diff --git a/tools/testing/selftests/kvm/include/x86/apic.h b/tools/testing/selftests/kvm/include/x86/apic.h index 5ca6bacbd70e1a..31887bdc3d6c40 100644 --- a/tools/testing/selftests/kvm/include/x86/apic.h +++ b/tools/testing/selftests/kvm/include/x86/apic.h @@ -79,42 +79,42 @@ void apic_disable(void); void xapic_enable(void); void x2apic_enable(void); -static inline uint32_t get_bsp_flag(void) +static inline u32 get_bsp_flag(void) { return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; } -static inline uint32_t xapic_read_reg(unsigned int reg) +static inline u32 xapic_read_reg(unsigned int reg) { - return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2]; + return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2]; } -static inline void xapic_write_reg(unsigned int reg, uint32_t val) +static inline void xapic_write_reg(unsigned int reg, u32 val) { - ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val; + ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val; } -static inline uint64_t x2apic_read_reg(unsigned int reg) +static inline u64 x2apic_read_reg(unsigned int reg) { return rdmsr(APIC_BASE_MSR + (reg >> 4)); } -static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value) +static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value) { return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n", fault, APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg_fault(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(fault == GP_VECTOR, "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n", diff --git a/tools/testing/selftests/kvm/include/x86/evmcs.h b/tools/testing/selftests/kvm/include/x86/evmcs.h index 5a74bb30e2f8ee..be79bda024bf16 100644 --- a/tools/testing/selftests/kvm/include/x86/evmcs.h +++ b/tools/testing/selftests/kvm/include/x86/evmcs.h @@ -10,9 +10,9 @@ #include "hyperv.h" #include "vmx.h" -#define u16 uint16_t -#define u32 uint32_t -#define u64 uint64_t +#define u16 u16 +#define u32 u32 +#define u64 u64 #define EVMCS_VERSION 1 @@ -245,7 +245,7 @@ static inline void evmcs_enable(void) enable_evmcs = true; } -static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs) +static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs) { current_vp_assist->current_nested_vmcs = vmcs_pa; current_vp_assist->enlighten_vmentry = 1; @@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv) return true; } -static inline int evmcs_vmptrst(uint64_t *value) +static inline int evmcs_vmptrst(u64 *value) { *value = current_vp_assist->current_nested_vmcs & ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; @@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value) return 0; } -static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) +static inline int evmcs_vmread(u64 encoding, u64 *value) { switch (encoding) { case GUEST_RIP: @@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) return 0; } -static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value) +static inline int evmcs_vmwrite(u64 encoding, u64 value) { switch (encoding) { case GUEST_RIP: @@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; diff --git a/tools/testing/selftests/kvm/include/x86/hyperv.h b/tools/testing/selftests/kvm/include/x86/hyperv.h index f13e532be2406e..78003f5a22f396 100644 --- a/tools/testing/selftests/kvm/include/x86/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86/hyperv.h @@ -254,12 +254,12 @@ * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status' * is set to the hypercall status (if no exception occurred). */ -static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address, - uint64_t *hv_status) +static inline u8 __hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address, + u64 *hv_status) { - uint64_t error_code; - uint8_t vector; + u64 error_code; + u8 vector; /* Note both the hypercall and the "asm safe" clobber r9-r11. */ asm volatile("mov %[output_address], %%r8\n\t" @@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, } /* Issue a Hyper-V hypercall and assert that it succeeded. */ -static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address) +static inline void hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address) { - uint64_t hv_status; - uint8_t vector; + u64 hv_status; + u8 vector; vector = __hyperv_hypercall(control, input_address, output_address, &hv_status); @@ -327,27 +327,27 @@ struct hv_vp_assist_page { extern struct hv_vp_assist_page *current_vp_assist; -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist); +int enable_vp_assist(u64 vp_assist_pa, void *vp_assist); struct hyperv_test_pages { /* VP assist page */ void *vp_assist_hva; - uint64_t vp_assist_gpa; + u64 vp_assist_gpa; void *vp_assist; /* Partition assist page */ void *partition_assist_hva; - uint64_t partition_assist_gpa; + u64 partition_assist_gpa; void *partition_assist; /* Enlightened VMCS */ void *enlightened_vmcs_hva; - uint64_t enlightened_vmcs_gpa; + u64 enlightened_vmcs_gpa; void *enlightened_vmcs; }; struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva); + gva_t *p_hv_pages_gva); /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ #define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h index be35d26bb32005..c33ab6e0417179 100644 --- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h @@ -11,19 +11,19 @@ extern bool is_forced_emulation_enabled; struct pte_masks { - uint64_t present; - uint64_t writable; - uint64_t user; - uint64_t readable; - uint64_t executable; - uint64_t accessed; - uint64_t dirty; - uint64_t huge; - uint64_t nx; - uint64_t c; - uint64_t s; + u64 present; + u64 writable; + u64 user; + u64 readable; + u64 executable; + u64 accessed; + u64 dirty; + u64 huge; + u64 nx; + u64 c; + u64 s; - uint64_t always_set; + u64 always_set; }; struct kvm_mmu_arch { @@ -33,12 +33,12 @@ struct kvm_mmu_arch { struct kvm_mmu; struct kvm_vm_arch { - vm_vaddr_t gdt; - vm_vaddr_t tss; - vm_vaddr_t idt; + gva_t gdt; + gva_t tss; + gva_t idt; - uint64_t c_bit; - uint64_t s_bit; + u64 c_bit; + u64 s_bit; int sev_fd; bool is_pt_protected; }; @@ -62,7 +62,7 @@ do { \ : "+m" (mem) \ : "r" (val) : "memory"); \ } else { \ - uint64_t __old = READ_ONCE(mem); \ + u64 __old = READ_ONCE(mem); \ \ __asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \ : [ptr] "+m" (mem), [old] "+a" (__old) \ diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h index 72575eadb63a08..98537cc8840d1e 100644 --- a/tools/testing/selftests/kvm/include/x86/pmu.h +++ b/tools/testing/selftests/kvm/include/x86/pmu.h @@ -6,8 +6,8 @@ #define SELFTEST_KVM_PMU_H #include -#include +#include #include #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 @@ -104,14 +104,15 @@ enum amd_pmu_zen_events { NR_AMD_ZEN_EVENTS, }; -extern const uint64_t intel_pmu_arch_events[]; -extern const uint64_t amd_pmu_zen_events[]; +extern const u64 intel_pmu_arch_events[]; +extern const u64 amd_pmu_zen_events[]; enum pmu_errata { INSTRUCTIONS_RETIRED_OVERCOUNT, BRANCHES_RETIRED_OVERCOUNT, }; -extern uint64_t pmu_errata_mask; + +extern u64 pmu_errata_mask; void kvm_init_pmu_errata(void); diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h index d8634a760a6091..77f576ee7789d8 100644 --- a/tools/testing/selftests/kvm/include/x86/processor.h +++ b/tools/testing/selftests/kvm/include/x86/processor.h @@ -23,7 +23,7 @@ extern bool host_cpu_is_intel; extern bool host_cpu_is_amd; extern bool host_cpu_is_hygon; extern bool host_cpu_is_amd_compatible; -extern uint64_t guest_tsc_khz; +extern u64 guest_tsc_khz; #ifndef MAX_NR_CPUID_ENTRIES #define MAX_NR_CPUID_ENTRIES 100 @@ -399,17 +399,17 @@ struct gpr64_regs { }; struct desc64 { - uint16_t limit0; - uint16_t base0; + u16 limit0; + u16 base0; unsigned base1:8, type:4, s:1, dpl:2, p:1; unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; - uint32_t base3; - uint32_t zero1; + u32 base3; + u32 zero1; } __attribute__((packed)); struct desc_ptr { - uint16_t size; - uint64_t address; + u16 size; + u64 address; } __attribute__((packed)); struct kvm_x86_state { @@ -427,18 +427,18 @@ struct kvm_x86_state { struct kvm_msrs msrs; }; -static inline uint64_t get_desc64_base(const struct desc64 *desc) +static inline u64 get_desc64_base(const struct desc64 *desc) { - return (uint64_t)desc->base3 << 32 | - (uint64_t)desc->base2 << 24 | - (uint64_t)desc->base1 << 16 | - (uint64_t)desc->base0; + return (u64)desc->base3 << 32 | + (u64)desc->base2 << 24 | + (u64)desc->base1 << 16 | + (u64)desc->base0; } -static inline uint64_t rdtsc(void) +static inline u64 rdtsc(void) { - uint32_t eax, edx; - uint64_t tsc_val; + u32 eax, edx; + u64 tsc_val; /* * The lfence is to wait (on Intel CPUs) until all previous * instructions have been executed. If software requires RDTSC to be @@ -446,39 +446,39 @@ static inline uint64_t rdtsc(void) * execute LFENCE immediately after RDTSC */ __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); - tsc_val = ((uint64_t)edx) << 32 | eax; + tsc_val = ((u64)edx) << 32 | eax; return tsc_val; } -static inline uint64_t rdtscp(uint32_t *aux) +static inline u64 rdtscp(u32 *aux) { - uint32_t eax, edx; + u32 eax, edx; __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); - return ((uint64_t)edx) << 32 | eax; + return ((u64)edx) << 32 | eax; } -static inline uint64_t rdmsr(uint32_t msr) +static inline u64 rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } -static inline void wrmsr(uint32_t msr, uint64_t value) +static inline void wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); } -static inline uint16_t inw(uint16_t port) +static inline u16 inw(u16 port) { - uint16_t tmp; + u16 tmp; __asm__ __volatile__("in %%dx, %%ax" : /* output */ "=a" (tmp) @@ -487,120 +487,120 @@ static inline uint16_t inw(uint16_t port) return tmp; } -static inline uint16_t get_es(void) +static inline u16 get_es(void) { - uint16_t es; + u16 es; __asm__ __volatile__("mov %%es, %[es]" : /* output */ [es]"=rm"(es)); return es; } -static inline uint16_t get_cs(void) +static inline u16 get_cs(void) { - uint16_t cs; + u16 cs; __asm__ __volatile__("mov %%cs, %[cs]" : /* output */ [cs]"=rm"(cs)); return cs; } -static inline uint16_t get_ss(void) +static inline u16 get_ss(void) { - uint16_t ss; + u16 ss; __asm__ __volatile__("mov %%ss, %[ss]" : /* output */ [ss]"=rm"(ss)); return ss; } -static inline uint16_t get_ds(void) +static inline u16 get_ds(void) { - uint16_t ds; + u16 ds; __asm__ __volatile__("mov %%ds, %[ds]" : /* output */ [ds]"=rm"(ds)); return ds; } -static inline uint16_t get_fs(void) +static inline u16 get_fs(void) { - uint16_t fs; + u16 fs; __asm__ __volatile__("mov %%fs, %[fs]" : /* output */ [fs]"=rm"(fs)); return fs; } -static inline uint16_t get_gs(void) +static inline u16 get_gs(void) { - uint16_t gs; + u16 gs; __asm__ __volatile__("mov %%gs, %[gs]" : /* output */ [gs]"=rm"(gs)); return gs; } -static inline uint16_t get_tr(void) +static inline u16 get_tr(void) { - uint16_t tr; + u16 tr; __asm__ __volatile__("str %[tr]" : /* output */ [tr]"=rm"(tr)); return tr; } -static inline uint64_t get_cr0(void) +static inline u64 get_cr0(void) { - uint64_t cr0; + u64 cr0; __asm__ __volatile__("mov %%cr0, %[cr0]" : /* output */ [cr0]"=r"(cr0)); return cr0; } -static inline void set_cr0(uint64_t val) +static inline void set_cr0(u64 val) { __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); } -static inline uint64_t get_cr3(void) +static inline u64 get_cr3(void) { - uint64_t cr3; + u64 cr3; __asm__ __volatile__("mov %%cr3, %[cr3]" : /* output */ [cr3]"=r"(cr3)); return cr3; } -static inline void set_cr3(uint64_t val) +static inline void set_cr3(u64 val) { __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); } -static inline uint64_t get_cr4(void) +static inline u64 get_cr4(void) { - uint64_t cr4; + u64 cr4; __asm__ __volatile__("mov %%cr4, %[cr4]" : /* output */ [cr4]"=r"(cr4)); return cr4; } -static inline void set_cr4(uint64_t val) +static inline void set_cr4(u64 val) { __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); } -static inline uint64_t get_cr8(void) +static inline u64 get_cr8(void) { - uint64_t cr8; + u64 cr8; __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); return cr8; } -static inline void set_cr8(uint64_t val) +static inline void set_cr8(u64 val) { __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); } @@ -651,14 +651,14 @@ static inline struct desc_ptr get_idt(void) return idt; } -static inline void outl(uint16_t port, uint32_t value) +static inline void outl(u16 port, u32 value) { __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); } -static inline void __cpuid(uint32_t function, uint32_t index, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void __cpuid(u32 function, u32 index, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { *eax = function; *ecx = index; @@ -672,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index, : "memory"); } -static inline void cpuid(uint32_t function, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void cpuid(u32 function, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { return __cpuid(function, 0, eax, ebx, ecx, edx); } -static inline uint32_t this_cpu_fms(void) +static inline u32 this_cpu_fms(void) { - uint32_t eax, ebx, ecx, edx; + u32 eax, ebx, ecx, edx; cpuid(1, &eax, &ebx, &ecx, &edx); return eax; } -static inline uint32_t this_cpu_family(void) +static inline u32 this_cpu_family(void) { return x86_family(this_cpu_fms()); } -static inline uint32_t this_cpu_model(void) +static inline u32 this_cpu_model(void) { return x86_model(this_cpu_fms()); } static inline bool this_cpu_vendor_string_is(const char *vendor) { - const uint32_t *chunk = (const uint32_t *)vendor; - uint32_t eax, ebx, ecx, edx; + const u32 *chunk = (const u32 *)vendor; + u32 eax, ebx, ecx, edx; cpuid(0, &eax, &ebx, &ecx, &edx); return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); @@ -724,10 +724,9 @@ static inline bool this_cpu_is_hygon(void) return this_cpu_vendor_string_is("HygonGenuine"); } -static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) +static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi) { - uint32_t gprs[4]; + u32 gprs[4]; __cpuid(function, index, &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], @@ -742,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature) feature.reg, feature.bit, feature.bit); } -static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 this_cpu_property(struct kvm_x86_cpu_property property) { return __this_cpu_has(property.function, property.index, property.reg, property.lo_bit, property.hi_bit); @@ -750,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -770,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -782,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || this_cpu_has(feature.f); } -static __always_inline uint64_t this_cpu_supported_xcr0(void) +static __always_inline u64 this_cpu_supported_xcr0(void) { if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -867,7 +866,7 @@ static inline void cpu_relax(void) static inline void udelay(unsigned long usec) { - uint64_t start, now, cycles; + u64 start, now, cycles; GUEST_ASSERT(guest_tsc_khz); cycles = guest_tsc_khz / 1000 * usec; @@ -898,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state); const struct kvm_msr_list *kvm_get_msr_index_list(void); const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); -uint64_t kvm_get_feature_msr(uint64_t msr_index); +bool kvm_msr_is_in_save_restore_list(u32 msr_index); +u64 kvm_get_feature_msr(u64 msr_index); static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) @@ -954,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index); + u32 function, u32 index); const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); -static inline uint32_t kvm_cpu_fms(void) +static inline u32 kvm_cpu_fms(void) { return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; } -static inline uint32_t kvm_cpu_family(void) +static inline u32 kvm_cpu_family(void) { return x86_family(kvm_cpu_fms()); } -static inline uint32_t kvm_cpu_model(void) +static inline u32 kvm_cpu_model(void) { return x86_model(kvm_cpu_fms()); } @@ -980,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature) return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); } -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property); +u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property); -static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property) { return kvm_cpuid_property(kvm_get_supported_cpuid(), property); } static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -1010,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -1022,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); } -static __always_inline uint64_t kvm_cpu_supported_xcr0(void) +static __always_inline u64 kvm_cpu_supported_xcr0(void) { if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } static inline size_t kvm_cpuid2_size(int nr_entries) @@ -1062,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu) } static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function, - uint32_t index) + u32 function, + u32 index) { TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); @@ -1074,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v } static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function) + u32 function) { return __vcpu_get_cpuid_entry(vcpu, function, 0); } @@ -1104,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_property property, - uint32_t value); -void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); + u32 value); +void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr); -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function); static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_feature feature) @@ -1135,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); } -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); +u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index); +int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value); /* * Assert on an MSR access(es) and pretty print the MSR name when possible. @@ -1161,14 +1160,14 @@ do { \ * is changing, etc. This is NOT an exhaustive list! The intent is to filter * out MSRs that are not durable _and_ that a selftest wants to write. */ -static inline bool is_durable_msr(uint32_t msr) +static inline bool is_durable_msr(u32 msr) { return msr != MSR_IA32_TSC; } #define vcpu_set_msr(vcpu, msr, val) \ do { \ - uint64_t r, v = val; \ + u64 r, v = val; \ \ TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ @@ -1182,28 +1181,28 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); void kvm_init_vm_address_properties(struct kvm_vm *vm); struct ex_regs { - uint64_t rax, rcx, rdx, rbx; - uint64_t rbp, rsi, rdi; - uint64_t r8, r9, r10, r11; - uint64_t r12, r13, r14, r15; - uint64_t vector; - uint64_t error_code; - uint64_t rip; - uint64_t cs; - uint64_t rflags; + u64 rax, rcx, rdx, rbx; + u64 rbp, rsi, rdi; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 vector; + u64 error_code; + u64 rip; + u64 cs; + u64 rflags; }; struct idt_entry { - uint16_t offset0; - uint16_t selector; - uint16_t ist : 3; - uint16_t : 5; - uint16_t type : 4; - uint16_t : 1; - uint16_t dpl : 2; - uint16_t p : 1; - uint16_t offset1; - uint32_t offset2; uint32_t reserved; + u16 offset0; + u16 selector; + u16 ist : 3; + u16 : 5; + u16 type : 4; + u16 : 1; + u16 dpl : 2; + u16 p : 1; + u16 offset1; + u32 offset2; u32 reserved; }; void vm_install_exception_handler(struct kvm_vm *vm, int vector, @@ -1262,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1274,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1285,8 +1284,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_fep(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1297,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1307,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, }) #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ -static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ +static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \ { \ - uint64_t error_code; \ - uint8_t vector; \ - uint32_t a, d; \ + u64 error_code; \ + u8 vector; \ + u32 a, d; \ \ asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ : "=a"(a), "=d"(d), \ @@ -1319,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ : "c"(idx) \ : KVM_ASM_SAFE_CLOBBERS); \ \ - *val = (uint64_t)a | ((uint64_t)d << 32); \ + *val = (u64)a | ((u64)d << 32); \ return vector; \ } @@ -1335,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr) BUILD_READ_U64_SAFE_HELPERS(rdpmc) BUILD_READ_U64_SAFE_HELPERS(xgetbv) -static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) +static inline u8 wrmsr_safe(u32 msr, u64 val) { return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); } -static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) +static inline u8 xsetbv_safe(u32 index, u64 value) { u32 eax = value; u32 edx = value >> 32; @@ -1395,23 +1394,20 @@ static inline bool kvm_is_lbrv_enabled(void) return !!get_kvm_amd_param_integer("lbrv"); } -uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr); +u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva); -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3); -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); +u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); +u64 __xen_hypercall(u64 nr, u64 a0, void *a1); +void xen_hypercall(u64 nr, u64 a0, void *a1); -static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, - uint64_t size, uint64_t flags) +static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0); } -static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size, - uint64_t flags) +static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { - uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); + u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); GUEST_ASSERT(!ret); } @@ -1456,7 +1452,7 @@ static inline void cli(void) asm volatile ("cli"); } -void __vm_xsave_require_permission(uint64_t xfeature, const char *name); +void __vm_xsave_require_permission(u64 xfeature, const char *name); #define vm_xsave_require_permission(xfeature) \ __vm_xsave_require_permission(xfeature, #xfeature) @@ -1511,17 +1507,17 @@ enum pg_level { void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, struct pte_masks *pte_masks); -void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, - uint64_t paddr, int level); -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level); +void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, + gpa_t gpa, int level); +void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 nr_bytes, int level); void vm_enable_tdp(struct kvm_vm *vm); bool kvm_cpu_has_tdp(void); -void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); +void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size); void tdp_identity_map_default_memslots(struct kvm_vm *vm); -void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size); -uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa); +void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size); +u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa); /* * Basic CPU control in CR0 diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h index 008b4169f5e234..1af44c151d60ae 100644 --- a/tools/testing/selftests/kvm/include/x86/sev.h +++ b/tools/testing/selftests/kvm/include/x86/sev.h @@ -46,16 +46,16 @@ static inline bool is_sev_vm(struct kvm_vm *vm) return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; } -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); +void sev_vm_launch(struct kvm_vm *vm, u32 policy); +void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement); void sev_vm_launch_finish(struct kvm_vm *vm); -void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); +void snp_vm_launch_start(struct kvm_vm *vm, u64 policy); void snp_vm_launch_update(struct kvm_vm *vm); void snp_vm_launch_finish(struct kvm_vm *vm); -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, +struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, struct kvm_vcpu **cpu); -void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); +void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement); kvm_static_assert(SEV_RET_SUCCESS == 0); @@ -85,7 +85,7 @@ static inline u64 snp_default_policy(void) unsigned long raw; \ } sev_cmd = { .c = { \ .id = (cmd), \ - .data = (uint64_t)(arg), \ + .data = (u64)(arg), \ .sev_fd = (vm)->arch.sev_fd, \ } }; \ \ @@ -120,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm, vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); } -static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t size) +static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 size) { struct kvm_sev_launch_update_data update_data = { .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), @@ -131,8 +131,8 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); } -static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t hva, uint64_t size, uint8_t type) +static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 hva, u64 size, u8 type) { struct kvm_sev_snp_launch_update update_data = { .uaddr = hva, diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h index 19337c34f13eb4..2d1afa09819b08 100644 --- a/tools/testing/selftests/kvm/include/x86/smm.h +++ b/tools/testing/selftests/kvm/include/x86/smm.h @@ -8,8 +8,7 @@ #define SMRAM_MEMSLOT ((1 << 16) | 1) #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) -void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, - uint64_t smram_gpa, +void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, const void *smi_handler, size_t handler_size); void inject_smi(struct kvm_vcpu *vcpu); diff --git a/tools/testing/selftests/kvm/include/x86/svm_util.h b/tools/testing/selftests/kvm/include/x86/svm_util.h index 5d7c42534bc45e..6c013eb838beba 100644 --- a/tools/testing/selftests/kvm/include/x86/svm_util.h +++ b/tools/testing/selftests/kvm/include/x86/svm_util.h @@ -16,20 +16,20 @@ struct svm_test_data { /* VMCB */ struct vmcb *vmcb; /* gva */ void *vmcb_hva; - uint64_t vmcb_gpa; + u64 vmcb_gpa; /* host state-save area */ struct vmcb_save_area *save_area; /* gva */ void *save_area_hva; - uint64_t save_area_gpa; + u64 save_area_gpa; /* MSR-Bitmap */ void *msr; /* gva */ void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; /* NPT */ - uint64_t ncr3_gpa; + u64 ncr3_gpa; }; static inline void vmmcall(void) @@ -56,9 +56,9 @@ static inline void vmmcall(void) "clgi\n" \ ) -struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); +struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva); void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); +void run_guest(struct vmcb *vmcb, u64 vmcb_gpa); static inline bool kvm_cpu_has_npt(void) { diff --git a/tools/testing/selftests/kvm/include/x86/ucall.h b/tools/testing/selftests/kvm/include/x86/ucall.h index d3825dcc3cd935..0e4950041e3ee2 100644 --- a/tools/testing/selftests/kvm/include/x86/ucall.h +++ b/tools/testing/selftests/kvm/include/x86/ucall.h @@ -6,7 +6,7 @@ #define UCALL_EXIT_REASON KVM_EXIT_IO -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h index 92b918700d249f..90fffaf9159583 100644 --- a/tools/testing/selftests/kvm/include/x86/vmx.h +++ b/tools/testing/selftests/kvm/include/x86/vmx.h @@ -285,16 +285,16 @@ enum vmcs_field { }; struct vmx_msr_entry { - uint32_t index; - uint32_t reserved; - uint64_t value; + u32 index; + u32 reserved; + u64 value; } __attribute__ ((aligned(16))); #include "evmcs.h" -static inline int vmxon(uint64_t phys) +static inline int vmxon(u64 phys) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -309,9 +309,9 @@ static inline void vmxoff(void) __asm__ __volatile__("vmxoff"); } -static inline int vmclear(uint64_t vmcs_pa) +static inline int vmclear(u64 vmcs_pa) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa) return ret; } -static inline int vmptrld(uint64_t vmcs_pa) +static inline int vmptrld(u64 vmcs_pa) { - uint8_t ret; + u8 ret; if (enable_evmcs) return -1; @@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa) return ret; } -static inline int vmptrst(uint64_t *value) +static inline int vmptrst(u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmptrst(value); @@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value) * A wrapper around vmptrst that ignores errors and returns zero if the * vmptrst instruction fails. */ -static inline uint64_t vmptrstz(void) +static inline u64 vmptrstz(void) { - uint64_t value = 0; + u64 value = 0; vmptrst(&value); return value; } @@ -391,8 +391,8 @@ static inline int vmlaunch(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -426,8 +426,8 @@ static inline int vmresume(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -447,10 +447,10 @@ static inline void vmcall(void) "r10", "r11", "r12", "r13", "r14", "r15"); } -static inline int vmread(uint64_t encoding, uint64_t *value) +static inline int vmread(u64 encoding, u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmread(encoding, value); @@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value) * A wrapper around vmread that ignores errors and returns zero if the * vmread instruction fails. */ -static inline uint64_t vmreadz(uint64_t encoding) +static inline u64 vmreadz(u64 encoding) { - uint64_t value = 0; + u64 value = 0; vmread(encoding, &value); return value; } -static inline int vmwrite(uint64_t encoding, uint64_t value) +static inline int vmwrite(u64 encoding, u64 value) { - uint8_t ret; + u8 ret; if (enable_evmcs) return evmcs_vmwrite(encoding, value); @@ -490,41 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value) return ret; } -static inline uint32_t vmcs_revision(void) +static inline u32 vmcs_revision(void) { return rdmsr(MSR_IA32_VMX_BASIC); } struct vmx_pages { void *vmxon_hva; - uint64_t vmxon_gpa; + u64 vmxon_gpa; void *vmxon; void *vmcs_hva; - uint64_t vmcs_gpa; + u64 vmcs_gpa; void *vmcs; void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; void *msr; void *shadow_vmcs_hva; - uint64_t shadow_vmcs_gpa; + u64 shadow_vmcs_gpa; void *shadow_vmcs; void *vmread_hva; - uint64_t vmread_gpa; + u64 vmread_gpa; void *vmread; void *vmwrite_hva; - uint64_t vmwrite_gpa; + u64 vmwrite_gpa; void *vmwrite; void *apic_access_hva; - uint64_t apic_access_gpa; + u64 apic_access_gpa; void *apic_access; - uint64_t eptp_gpa; + u64 eptp_gpa; }; union vmx_basic { @@ -550,7 +550,7 @@ union vmx_ctrl_msr { }; }; -struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); +struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); bool load_vmcs(struct vmx_pages *vmx); diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index c60a24a92829db..fc5242fb956fcd 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -46,12 +46,12 @@ static const char * const test_stage_string[] = { struct test_args { struct kvm_vm *vm; - uint64_t guest_test_virt_mem; - uint64_t host_page_size; - uint64_t host_num_pages; - uint64_t large_page_size; - uint64_t large_num_pages; - uint64_t host_pages_per_lpage; + u64 guest_test_virt_mem; + u64 host_page_size; + u64 host_num_pages; + u64 large_page_size; + u64 large_num_pages; + u64 host_pages_per_lpage; enum vm_mem_backing_src_type src_type; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; }; @@ -63,7 +63,7 @@ struct test_args { static enum test_stage guest_test_stage; /* Host variables */ -static uint32_t nr_vcpus = 1; +static u32 nr_vcpus = 1; static struct test_args test_args; static enum test_stage *current_stage; static bool host_quit; @@ -77,19 +77,19 @@ static sem_t test_stage_completed; * This will be set to the topmost valid physical address minus * the test memory size. */ -static uint64_t guest_test_phys_mem; +static u64 guest_test_phys_mem; /* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; static void guest_code(bool do_write) { struct test_args *p = &test_args; enum test_stage *current_stage = &guest_test_stage; - uint64_t addr; + u64 addr; int i, j; while (true) { @@ -113,9 +113,9 @@ static void guest_code(bool do_write) case KVM_CREATE_MAPPINGS: for (i = 0; i < p->large_num_pages; i++) { if (do_write) - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; else - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->large_page_size; } @@ -131,7 +131,7 @@ static void guest_code(bool do_write) case KVM_UPDATE_MAPPINGS: if (p->src_type == VM_MEM_SRC_ANONYMOUS) { for (i = 0; i < p->host_num_pages; i++) { - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; addr += p->host_page_size; } break; @@ -142,7 +142,7 @@ static void guest_code(bool do_write) * Write to the first host page in each large * page region, and triger break of large pages. */ - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; /* * Access the middle host pages in each large @@ -152,7 +152,7 @@ static void guest_code(bool do_write) */ addr += p->large_page_size / 2; for (j = 0; j < p->host_pages_per_lpage / 2; j++) { - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->host_page_size; } } @@ -167,7 +167,7 @@ static void guest_code(bool do_write) */ case KVM_ADJUST_MAPPINGS: for (i = 0; i < p->host_num_pages; i++) { - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->host_page_size; } break; @@ -227,8 +227,8 @@ static void *vcpu_worker(void *data) } struct test_params { - uint64_t phys_offset; - uint64_t test_mem_size; + u64 phys_offset; + u64 test_mem_size; enum vm_mem_backing_src_type src_type; }; @@ -237,12 +237,12 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) int ret; struct test_params *p = arg; enum vm_mem_backing_src_type src_type = p->src_type; - uint64_t large_page_size = get_backing_src_pagesz(src_type); - uint64_t guest_page_size = vm_guest_mode_params[mode].page_size; - uint64_t host_page_size = getpagesize(); - uint64_t test_mem_size = p->test_mem_size; - uint64_t guest_num_pages; - uint64_t alignment; + u64 large_page_size = get_backing_src_pagesz(src_type); + u64 guest_page_size = vm_guest_mode_params[mode].page_size; + u64 host_page_size = getpagesize(); + u64 test_mem_size = p->test_mem_size; + u64 guest_num_pages; + u64 alignment; void *host_test_mem; struct kvm_vm *vm; @@ -281,7 +281,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); /* Export shared structure test_args to guest */ sync_global_to_guest(vm, test_args); @@ -292,7 +292,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) ret = sem_init(&test_stage_completed, 0, 0); TEST_ASSERT(ret == 0, "Error in sem_init"); - current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage)); + current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage)); *current_stage = NUM_TEST_STAGES; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); @@ -304,7 +304,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) pr_info("Guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); pr_info("Host virtual test memory offset: 0x%lx\n", - (uint64_t)host_test_mem); + (u64)host_test_mem); pr_info("Number of testing vCPUs: %d\n", nr_vcpus); return vm; diff --git a/tools/testing/selftests/kvm/lib/arm64/gic.c b/tools/testing/selftests/kvm/lib/arm64/gic.c index b023868fe0b820..011dfe1dfcb39c 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic.c @@ -50,7 +50,7 @@ static void gic_dist_init(enum gic_type type, unsigned int nr_cpus) void gic_init(enum gic_type type, unsigned int nr_cpus) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); GUEST_ASSERT(type < GIC_TYPE_MAX); GUEST_ASSERT(nr_cpus); @@ -73,7 +73,7 @@ void gic_irq_disable(unsigned int intid) unsigned int gic_get_and_ack_irq(void) { - uint64_t irqstat; + u64 irqstat; unsigned int intid; GUEST_ASSERT(gic_common_ops); @@ -102,7 +102,7 @@ void gic_set_eoi_split(bool split) gic_common_ops->gic_set_eoi_split(split); } -void gic_set_priority_mask(uint64_t pmr) +void gic_set_priority_mask(u64 pmr) { GUEST_ASSERT(gic_common_ops); gic_common_ops->gic_set_priority_mask(pmr); diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_private.h b/tools/testing/selftests/kvm/lib/arm64/gic_private.h index b6a7e30c3eb1ff..6d393f5c568538 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_private.h +++ b/tools/testing/selftests/kvm/lib/arm64/gic_private.h @@ -12,20 +12,20 @@ struct gic_common_ops { void (*gic_cpu_init)(unsigned int cpu); void (*gic_irq_enable)(unsigned int intid); void (*gic_irq_disable)(unsigned int intid); - uint64_t (*gic_read_iar)(void); - void (*gic_write_eoir)(uint32_t irq); - void (*gic_write_dir)(uint32_t irq); + u64 (*gic_read_iar)(void); + void (*gic_write_eoir)(u32 irq); + void (*gic_write_dir)(u32 irq); void (*gic_set_eoi_split)(bool split); - void (*gic_set_priority_mask)(uint64_t mask); - void (*gic_set_priority)(uint32_t intid, uint32_t prio); - void (*gic_irq_set_active)(uint32_t intid); - void (*gic_irq_clear_active)(uint32_t intid); - bool (*gic_irq_get_active)(uint32_t intid); - void (*gic_irq_set_pending)(uint32_t intid); - void (*gic_irq_clear_pending)(uint32_t intid); - bool (*gic_irq_get_pending)(uint32_t intid); - void (*gic_irq_set_config)(uint32_t intid, bool is_edge); - void (*gic_irq_set_group)(uint32_t intid, bool group); + void (*gic_set_priority_mask)(u64 mask); + void (*gic_set_priority)(u32 intid, u32 prio); + void (*gic_irq_set_active)(u32 intid); + void (*gic_irq_clear_active)(u32 intid); + bool (*gic_irq_get_active)(u32 intid); + void (*gic_irq_set_pending)(u32 intid); + void (*gic_irq_clear_pending)(u32 intid); + bool (*gic_irq_get_pending)(u32 intid); + void (*gic_irq_set_config)(u32 intid, bool is_edge); + void (*gic_irq_set_group)(u32 intid, bool group); }; extern const struct gic_common_ops gicv3_ops; diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c index 50754a27f4932d..a99a53accfe90d 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c @@ -50,13 +50,13 @@ static void gicv3_gicd_wait_for_rwp(void) } } -static inline volatile void *gicr_base_cpu(uint32_t cpu) +static inline volatile void *gicr_base_cpu(u32 cpu) { /* Align all the redistributors sequentially */ return GICR_BASE_GVA + cpu * SZ_64K * 2; } -static void gicv3_gicr_wait_for_rwp(uint32_t cpu) +static void gicv3_gicr_wait_for_rwp(u32 cpu) { unsigned int count = 100000; /* 1s */ @@ -66,7 +66,7 @@ static void gicv3_gicr_wait_for_rwp(uint32_t cpu) } } -static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) +static void gicv3_wait_for_rwp(u32 cpu_or_dist) { if (cpu_or_dist & DIST_BIT) gicv3_gicd_wait_for_rwp(); @@ -91,34 +91,34 @@ static enum gicv3_intid_range get_intid_range(unsigned int intid) return INVALID_RANGE; } -static uint64_t gicv3_read_iar(void) +static u64 gicv3_read_iar(void) { - uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); + u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); dsb(sy); return irqstat; } -static void gicv3_write_eoir(uint32_t irq) +static void gicv3_write_eoir(u32 irq) { write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); isb(); } -static void gicv3_write_dir(uint32_t irq) +static void gicv3_write_dir(u32 irq) { write_sysreg_s(irq, SYS_ICC_DIR_EL1); isb(); } -static void gicv3_set_priority_mask(uint64_t mask) +static void gicv3_set_priority_mask(u64 mask) { write_sysreg_s(mask, SYS_ICC_PMR_EL1); } static void gicv3_set_eoi_split(bool split) { - uint32_t val; + u32 val; /* * All other fields are read-only, so no need to read CTLR first. In @@ -129,29 +129,29 @@ static void gicv3_set_eoi_split(bool split) isb(); } -uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) +u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset) { volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); return readl(base + offset); } -void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) +void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val) { volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); writel(reg_val, base + offset); } -uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) +u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask) { return gicv3_reg_readl(cpu_or_dist, offset) & mask; } -void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, - uint32_t mask, uint32_t reg_val) +void gicv3_setl_fields(u32 cpu_or_dist, u64 offset, + u32 mask, u32 reg_val) { - uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; + u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; tmp |= (reg_val & mask); gicv3_reg_writel(cpu_or_dist, offset, tmp); @@ -165,14 +165,14 @@ void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being * marked as "Reserved" in the Distributor map. */ -static void gicv3_access_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field, - bool write, uint32_t *val) +static void gicv3_access_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field, + bool write, u32 *val) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); enum gicv3_intid_range intid_range = get_intid_range(intid); - uint32_t fields_per_reg, index, mask, shift; - uint32_t cpu_or_dist; + u32 fields_per_reg, index, mask, shift; + u32 cpu_or_dist; GUEST_ASSERT(bits_per_field <= reg_bits); GUEST_ASSERT(!write || *val < (1U << bits_per_field)); @@ -197,32 +197,32 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; } -static void gicv3_write_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) +static void gicv3_write_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field, u32 val) { gicv3_access_reg(intid, offset, reg_bits, bits_per_field, true, &val); } -static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field) +static u32 gicv3_read_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field) { - uint32_t val; + u32 val; gicv3_access_reg(intid, offset, reg_bits, bits_per_field, false, &val); return val; } -static void gicv3_set_priority(uint32_t intid, uint32_t prio) +static void gicv3_set_priority(u32 intid, u32 prio) { gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); } /* Sets the intid to be level-sensitive or edge-triggered. */ -static void gicv3_irq_set_config(uint32_t intid, bool is_edge) +static void gicv3_irq_set_config(u32 intid, bool is_edge) { - uint32_t val; + u32 val; /* N/A for private interrupts. */ GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); @@ -230,57 +230,57 @@ static void gicv3_irq_set_config(uint32_t intid, bool is_edge) gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); } -static void gicv3_irq_enable(uint32_t intid) +static void gicv3_irq_enable(u32 intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } -static void gicv3_irq_disable(uint32_t intid) +static void gicv3_irq_disable(u32 intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } -static void gicv3_irq_set_active(uint32_t intid) +static void gicv3_irq_set_active(u32 intid) { gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); } -static void gicv3_irq_clear_active(uint32_t intid) +static void gicv3_irq_clear_active(u32 intid) { gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); } -static bool gicv3_irq_get_active(uint32_t intid) +static bool gicv3_irq_get_active(u32 intid) { return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); } -static void gicv3_irq_set_pending(uint32_t intid) +static void gicv3_irq_set_pending(u32 intid) { gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); } -static void gicv3_irq_clear_pending(uint32_t intid) +static void gicv3_irq_clear_pending(u32 intid) { gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); } -static bool gicv3_irq_get_pending(uint32_t intid) +static bool gicv3_irq_get_pending(u32 intid) { return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); } static void gicv3_enable_redist(volatile void *redist_base) { - uint32_t val = readl(redist_base + GICR_WAKER); + u32 val = readl(redist_base + GICR_WAKER); unsigned int count = 100000; /* 1s */ val &= ~GICR_WAKER_ProcessorSleep; @@ -293,10 +293,10 @@ static void gicv3_enable_redist(volatile void *redist_base) } } -static void gicv3_set_group(uint32_t intid, bool grp) +static void gicv3_set_group(u32 intid, bool grp) { - uint32_t cpu_or_dist; - uint32_t val; + u32 cpu_or_dist; + u32 val; cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid(); val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4); @@ -424,8 +424,8 @@ const struct gic_common_ops gicv3_ops = { .gic_irq_set_group = gicv3_set_group, }; -void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, - vm_paddr_t pend_table) +void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, + gpa_t pend_table) { volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid()); diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c index 7f9fdcf42ae64f..1188b578121dd3 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c @@ -54,7 +54,7 @@ static unsigned long its_find_baser(unsigned int type) return -1; } -static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) +static void its_install_table(unsigned int type, gpa_t base, size_t size) { unsigned long offset = its_find_baser(type); u64 baser; @@ -69,7 +69,7 @@ static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) its_write_u64(offset, baser); } -static void its_install_cmdq(vm_paddr_t base, size_t size) +static void its_install_cmdq(gpa_t base, size_t size) { u64 cbaser; @@ -82,9 +82,8 @@ static void its_install_cmdq(vm_paddr_t base, size_t size) its_write_u64(GITS_CBASER, cbaser); } -void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, - vm_paddr_t device_tbl, size_t device_tbl_sz, - vm_paddr_t cmdq, size_t cmdq_size) +void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size) { u32 ctlr; @@ -204,7 +203,7 @@ static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd) } } -void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, +void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, size_t itt_size, bool valid) { struct its_cmd_block cmd = {}; diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c index 43ea40edc53304..01325bf4d36fcc 100644 --- a/tools/testing/selftests/kvm/lib/arm64/processor.c +++ b/tools/testing/selftests/kvm/lib/arm64/processor.c @@ -19,20 +19,20 @@ #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 -static vm_vaddr_t exception_handlers; +static gva_t exception_handlers; -static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pgd_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; + u64 mask = (1UL << (vm->va_bits - shift)) - 1; return (gva >> shift) & mask; } -static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pud_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; TEST_ASSERT(vm->mmu.pgtable_levels == 4, "Mode %d does not have 4 page table levels", vm->mode); @@ -40,10 +40,10 @@ static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) return (gva >> shift) & mask; } -static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pmd_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; TEST_ASSERT(vm->mmu.pgtable_levels >= 3, "Mode %d does not have >= 3 page table levels", vm->mode); @@ -51,9 +51,9 @@ static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) return (gva >> shift) & mask; } -static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pte_index(struct kvm_vm *vm, gva_t gva) { - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; return (gva >> vm->page_shift) & mask; } @@ -63,9 +63,9 @@ static inline bool use_lpa2_pte_format(struct kvm_vm *vm) (vm->pa_bits > 48 || vm->va_bits > 48); } -static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) +static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs) { - uint64_t pte; + u64 pte; if (use_lpa2_pte_format(vm)) { pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift); @@ -81,9 +81,9 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) return pte; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) +static u64 pte_addr(struct kvm_vm *vm, u64 pte) { - uint64_t pa; + u64 pa; if (use_lpa2_pte_format(vm)) { pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift); @@ -97,13 +97,13 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) return pa; } -static uint64_t ptrs_per_pgd(struct kvm_vm *vm) +static u64 ptrs_per_pgd(struct kvm_vm *vm) { unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; return 1 << (vm->va_bits - shift); } -static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) +static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm) { return 1 << (vm->page_shift - 3); } @@ -121,47 +121,46 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t flags) +static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 flags) { - uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); - uint64_t pg_attr; - uint64_t *ptep; + u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); + u64 pg_attr; + u64 *ptep; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, - "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, - "Physical address beyond beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8; + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PGD_TYPE_TABLE | PTE_VALID); switch (vm->mmu.pgtable_levels) { case 4: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PUD_TYPE_TABLE | PTE_VALID); /* fall through */ case 3: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PMD_TYPE_TABLE | PTE_VALID); /* fall through */ case 2: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; break; default: TEST_FAIL("Page table levels must be 2, 3, or 4"); @@ -171,19 +170,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, if (!use_lpa2_pte_format(vm)) pg_attr |= PTE_SHARED; - *ptep = addr_pte(vm, paddr, pg_attr); + *ptep = addr_pte(vm, gpa, pg_attr); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint64_t attr_idx = MT_NORMAL; + u64 attr_idx = MT_NORMAL; - _virt_pg_map(vm, vaddr, paddr, attr_idx); + _virt_pg_map(vm, gva, gpa, attr_idx); } -uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) +u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level) { - uint64_t *ptep; + u64 *ptep; if (!vm->mmu.pgd_created) goto unmapped_gva; @@ -225,23 +224,23 @@ uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level exit(EXIT_FAILURE); } -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva) { return virt_get_pte_hva_at_level(vm, gva, 3); } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep = virt_get_pte_hva(vm, gva); + u64 *ptep = virt_get_pte_hva(vm, gva); return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) { #ifdef DEBUG static const char * const type[] = { "", "pud", "pmd", "pte" }; - uint64_t pte, *ptep; + u64 pte, *ptep; if (level == 4) return; @@ -256,10 +255,10 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p #endif } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int level = 4 - (vm->mmu.pgtable_levels - 1); - uint64_t pgd, *ptep; + u64 pgd, *ptep; if (!vm->mmu.pgd_created) return; @@ -298,7 +297,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) { struct kvm_vcpu_init default_init = { .target = -1, }; struct kvm_vm *vm = vcpu->vm; - uint64_t sctlr_el1, tcr_el1, ttbr0_el1; + u64 sctlr_el1, tcr_el1, ttbr0_el1; if (!init) { kvm_get_default_vcpu_target(vm, &default_init); @@ -397,9 +396,9 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { - uint64_t pstate, pc; + u64 pstate, pc; pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate)); pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); @@ -410,29 +409,29 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) { - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); } -static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init) { size_t stack_size; - uint64_t stack_vaddr; + gva_t stack_gva; struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); aarch64_vcpu_setup(vcpu, init); - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size); return vcpu; } -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init, void *guest_code) { struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); @@ -442,7 +441,7 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { return __aarch64_vcpu_add(vm, vcpu_id, NULL); } @@ -459,13 +458,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) for (i = 0; i < num; i++) { vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]), - va_arg(ap, uint64_t)); + va_arg(ap, u64)); } va_end(ap); } -void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec) +void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec) { ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec); while (1) @@ -498,7 +497,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) { extern char vectors; - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors); } void route_exception(struct ex_regs *regs, int vector) @@ -536,10 +535,10 @@ void route_exception(struct ex_regs *regs, int vector) void vm_init_descriptor_tables(struct kvm_vm *vm) { - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - vm->page_size, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, + MEM_REGION_DATA); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; } void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, @@ -563,13 +562,13 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handlers->exception_handlers[vector][0] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return read_sysreg(tpidr_el1); } -static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, - uint32_t not_sup_val, uint32_t ipa52_min_val) +static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran, + u32 not_sup_val, u32 ipa52_min_val) { if (gran == not_sup_val) return 0; @@ -579,16 +578,16 @@ static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, return min(vm_ipa, 48U); } -void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, - uint32_t *ipa16k, uint32_t *ipa64k) +void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, + u32 *ipa16k, u32 *ipa64k) { struct kvm_vcpu_init preferred_init; int kvm_fd, vm_fd, vcpu_fd, err; - uint64_t val; - uint32_t gran; + u64 val; + u32 gran; struct kvm_one_reg reg = { .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), - .addr = (uint64_t)&val, + .addr = (u64)&val, }; kvm_fd = open_kvm_dev_path_or_exit(); @@ -646,17 +645,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7") -void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res) +void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res) { __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, arg6, res); } -void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res) +void smccc_smc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res) { __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, arg6, res); @@ -671,7 +670,7 @@ void kvm_selftest_arch_init(void) guest_modes_append_default(); } -void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +void vm_populate_gva_bitmap(struct kvm_vm *vm) { /* * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space diff --git a/tools/testing/selftests/kvm/lib/arm64/ucall.c b/tools/testing/selftests/kvm/lib/arm64/ucall.c index ddab0ce89d4dfe..e0550ad5aa7566 100644 --- a/tools/testing/selftests/kvm/lib/arm64/ucall.c +++ b/tools/testing/selftests/kvm/lib/arm64/ucall.c @@ -6,17 +6,17 @@ */ #include "kvm_util.h" -vm_vaddr_t *ucall_exit_mmio_addr; +gva_t *ucall_exit_mmio_addr; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); virt_map(vm, mmio_gva, mmio_gpa, 1); vm->ucall_mmio_addr = mmio_gpa; - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); } void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) @@ -25,9 +25,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_MMIO && run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), "Unexpected ucall exit mmio address access"); - return (void *)(*((uint64_t *)run->mmio.data)); + return (void *)(*((u64 *)run->mmio.data)); } return NULL; diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c index d0f7bd0984b84f..4ecebf3146a25c 100644 --- a/tools/testing/selftests/kvm/lib/arm64/vgic.c +++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c @@ -41,10 +41,10 @@ bool kvm_supports_vgic_v3(void) * redistributor regions of the guest. Since it depends on the number of * vCPUs for the VM, it must be called after all the vCPUs have been created. */ -int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) { int gic_fd; - uint64_t attr; + u64 attr; unsigned int nr_gic_pages; /* Distributor setup */ @@ -77,7 +77,7 @@ void __vgic_v3_init(int fd) KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); } -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) { unsigned int nr_vcpus_created = 0; struct list_head *iter; @@ -104,11 +104,11 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) } /* should only work for level sensitive interrupts */ -int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level) { - uint64_t attr = 32 * (intid / 32); - uint64_t index = intid % 32; - uint64_t val; + u64 attr = 32 * (intid / 32); + u64 index = intid % 32; + u64 val; int ret; ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, @@ -122,16 +122,16 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) return ret; } -void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +void kvm_irq_set_level_info(int gic_fd, u32 intid, int level) { int ret = _kvm_irq_set_level_info(gic_fd, intid, level); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret)); } -int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) { - uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + u32 irq = intid & KVM_ARM_IRQ_NUM_MASK; TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " "doesn't allow injecting SGIs. There's no mask for it."); @@ -144,23 +144,23 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) return _kvm_irq_line(vm, irq, level); } -void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) { int ret = _kvm_arm_irq_line(vm, intid, level); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); } -static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, - uint64_t reg_off) +static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, + u64 reg_off) { - uint64_t reg = intid / 32; - uint64_t index = intid % 32; - uint64_t attr = reg_off + reg * 4; - uint64_t val; + u64 reg = intid / 32; + u64 index = intid % 32; + u64 attr = reg_off + reg * 4; + u64 val; bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS + u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; if (intid_is_private) { @@ -183,12 +183,12 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, kvm_device_attr_set(gic_fd, group, attr, &val); } -void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) { vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); } -void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) { vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); } diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c index f34d926d973591..b689c4df4a01a9 100644 --- a/tools/testing/selftests/kvm/lib/elf.c +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -156,21 +156,20 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment " "memsize of 0,\n" " phdr index: %u p_memsz: 0x%" PRIx64, - n1, (uint64_t) phdr.p_memsz); - vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); - vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; + n1, (u64)phdr.p_memsz); + gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); + gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; seg_vend |= vm->page_size - 1; size_t seg_size = seg_vend - seg_vstart + 1; - vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, - MEM_REGION_CODE); - TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " + gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE); + TEST_ASSERT(gva == seg_vstart, "Unable to allocate " "virtual memory for segment at requested min addr,\n" " segment idx: %u\n" " seg_vstart: 0x%lx\n" - " vaddr: 0x%lx", - n1, seg_vstart, vaddr); - memset(addr_gva2hva(vm, vaddr), 0, seg_size); + " gva: 0x%lx", + n1, seg_vstart, gva); + memset(addr_gva2hva(vm, gva), 0, seg_size); /* TODO(lhuemill): Set permissions of each memory segment * based on the least-significant 3 bits of phdr.p_flags. */ diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index ce3099630397bb..7a96c43b5704dc 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -20,7 +20,7 @@ void guest_modes_append_default(void) #ifdef __aarch64__ { unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); - uint32_t ipa4k, ipa16k, ipa64k; + u32 ipa4k, ipa16k, ipa64k; int i; aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k); diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c index 74627514c4d44d..7a33965349a7fd 100644 --- a/tools/testing/selftests/kvm/lib/guest_sprintf.c +++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c @@ -35,8 +35,8 @@ static int skip_atoi(const char **s) ({ \ int __res; \ \ - __res = ((uint64_t) n) % (uint32_t) base; \ - n = ((uint64_t) n) / (uint32_t) base; \ + __res = ((u64)n) % (u32)base; \ + n = ((u64)n) / (u32)base; \ __res; \ }) @@ -119,7 +119,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) { char *str, *end; const char *s; - uint64_t num; + u64 num; int i, base; int len; @@ -216,7 +216,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) while (--field_width > 0) APPEND_BUFFER_SAFE(str, end, ' '); APPEND_BUFFER_SAFE(str, end, - (uint8_t)va_arg(args, int)); + (u8)va_arg(args, int)); while (--field_width > 0) APPEND_BUFFER_SAFE(str, end, ' '); continue; @@ -240,7 +240,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) flags |= SPECIAL | SMALL | ZEROPAD; } str = number(str, end, - (uint64_t)va_arg(args, void *), 16, + (u64)va_arg(args, void *), 16, field_width, precision, flags); continue; @@ -284,15 +284,15 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) continue; } if (qualifier == 'l') - num = va_arg(args, uint64_t); + num = va_arg(args, u64); else if (qualifier == 'h') { - num = (uint16_t)va_arg(args, int); + num = (u16)va_arg(args, int); if (flags & SIGN) - num = (int16_t)num; + num = (s16)num; } else if (flags & SIGN) num = va_arg(args, int); else - num = va_arg(args, uint32_t); + num = va_arg(args, u32); str = number(str, end, num, base, field_width, precision, flags); } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index f5e076591c64bd..2a76eca7029d3b 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -20,9 +20,9 @@ #define KVM_UTIL_MIN_PFN 2 -uint32_t guest_random_seed; +u32 guest_random_seed; struct guest_random_state guest_rng; -static uint32_t last_guest_seed; +static u32 last_guest_seed; static size_t vcpu_mmap_sz(void); @@ -165,7 +165,7 @@ unsigned int kvm_check_cap(long cap) return (unsigned int)ret; } -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) +void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size) { if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); @@ -189,7 +189,7 @@ static void vm_open(struct kvm_vm *vm) vm->stats.fd = -1; } -const char *vm_guest_mode_string(uint32_t i) +const char *vm_guest_mode_string(u32 i) { static const char * const strings[] = { [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", @@ -267,7 +267,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) * based on the MSB of the VA. On architectures with this behavior * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. */ -__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +__weak void vm_populate_gva_bitmap(struct kvm_vm *vm) { sparsebit_set_num(vm->vpages_valid, 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); @@ -385,7 +385,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape) /* Limit to VA-bit canonical virtual addresses. */ vm->vpages_valid = sparsebit_alloc(); - vm_vaddr_populate_bitmap(vm); + vm_populate_gva_bitmap(vm); /* Limit physical addresses to PA-bits. */ vm->max_gfn = vm_compute_max_gfn(vm); @@ -396,12 +396,12 @@ struct kvm_vm *____vm_create(struct vm_shape shape) return vm; } -static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, - uint32_t nr_runnable_vcpus, - uint64_t extra_mem_pages) +static u64 vm_nr_pages_required(enum vm_guest_mode mode, + u32 nr_runnable_vcpus, + u64 extra_mem_pages) { - uint64_t page_size = vm_guest_mode_params[mode].page_size; - uint64_t nr_pages; + u64 page_size = vm_guest_mode_params[mode].page_size; + u64 nr_pages; TEST_ASSERT(nr_runnable_vcpus, "Use vm_create_barebones() for VMs that _never_ have vCPUs"); @@ -435,7 +435,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, return vm_adjust_num_guest_pages(mode, nr_pages); } -void kvm_set_files_rlimit(uint32_t nr_vcpus) +void kvm_set_files_rlimit(u32 nr_vcpus) { /* * Each vCPU will open two file descriptors: the vCPU itself and the @@ -476,10 +476,10 @@ static bool is_guest_memfd_required(struct vm_shape shape) #endif } -struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, - uint64_t nr_extra_pages) +struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, + u64 nr_extra_pages) { - uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, + u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, nr_extra_pages); struct userspace_mem_region *slot0; struct kvm_vm *vm; @@ -546,8 +546,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, * extra_mem_pages is only used to calculate the maximum page table size, * no real memory allocation for non-slot0 memory in this function. */ -struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, - uint64_t extra_mem_pages, +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, + u64 extra_mem_pages, void *guest_code, struct kvm_vcpu *vcpus[]) { struct kvm_vm *vm; @@ -566,7 +566,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code) { struct kvm_vcpu *vcpus[1]; @@ -614,7 +614,7 @@ void kvm_vm_restart(struct kvm_vm *vmp) } __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, - uint32_t vcpu_id) + u32 vcpu_id) { return __vm_vcpu_add(vm, vcpu_id); } @@ -636,9 +636,9 @@ int __pin_task_to_cpu(pthread_t task, int cpu) return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset); } -static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) +static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) { - uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); + u32 pcpu = atoi_non_negative("CPU number", cpu_str); TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), "Not allowed to run on pCPU '%d', check cgroups?", pcpu); @@ -662,7 +662,7 @@ void kvm_print_vcpu_pinning_help(void) " (default: no pinning)\n", name, name); } -void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], +void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], int nr_vcpus) { cpu_set_t allowed_mask; @@ -715,15 +715,15 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], * region exists. */ static struct userspace_mem_region * -userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) +userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end) { struct rb_node *node; for (node = vm->regions.gpa_tree.rb_node; node; ) { struct userspace_mem_region *region = container_of(node, struct userspace_mem_region, gpa_node); - uint64_t existing_start = region->region.guest_phys_addr; - uint64_t existing_end = region->region.guest_phys_addr + u64 existing_start = region->region.guest_phys_addr; + u64 existing_end = region->region.guest_phys_addr + region->region.memory_size - 1; if (start <= existing_end && end >= existing_start) return region; @@ -918,8 +918,8 @@ static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, } -int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva) +int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva) { struct kvm_userspace_memory_region region = { .slot = slot, @@ -932,8 +932,8 @@ int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); } -void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva) +void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva) { int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); @@ -945,9 +945,9 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \ "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)") -int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset) +int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset) { struct kvm_userspace_memory_region2 region = { .slot = slot, @@ -964,9 +964,9 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); } -void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset) +void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset) { int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, guest_memfd, guest_memfd_offset); @@ -978,8 +978,8 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags /* FIXME: This thing needs to be ripped apart and rewritten. */ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, - int guest_memfd, uint64_t guest_memfd_offset) + gpa_t gpa, u32 slot, u64 npages, u32 flags, + int guest_memfd, u64 guest_memfd_offset) { int ret; struct userspace_mem_region *region; @@ -1016,8 +1016,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n" " existing gpa: 0x%lx size: 0x%lx", gpa, npages, vm->page_size, - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size); + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size); /* Confirm no region with the requested slot already exists. */ hash_for_each_possible(vm->regions.slot_hash, region, slot_node, @@ -1027,11 +1027,11 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, TEST_FAIL("A mem region with the requested slot " "already exists.\n" - " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" - " existing slot: %u paddr: 0x%lx size: 0x%lx", + " requested slot: %u gpa: 0x%lx npages: 0x%lx\n" + " existing slot: %u gpa: 0x%lx size: 0x%lx", slot, gpa, npages, region->region.slot, - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size); + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size); } /* Allocate and initialize new mem region structure. */ @@ -1085,7 +1085,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, if (flags & KVM_MEM_GUEST_MEMFD) { if (guest_memfd < 0) { - uint32_t guest_memfd_flags = 0; + u32 guest_memfd_flags = 0; TEST_ASSERT(!guest_memfd_offset, "Offset must be zero when creating new guest_memfd"); guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); @@ -1141,8 +1141,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, void vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, - uint32_t flags) + gpa_t gpa, u32 slot, u64 npages, u32 flags) { vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0); } @@ -1163,7 +1162,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, * memory slot ID). */ struct userspace_mem_region * -memslot2region(struct kvm_vm *vm, uint32_t memslot) +memslot2region(struct kvm_vm *vm, u32 memslot) { struct userspace_mem_region *region; @@ -1194,7 +1193,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) * Sets the flags of the memory region specified by the value of slot, * to the values given by flags. */ -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) +void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags) { int ret; struct userspace_mem_region *region; @@ -1210,7 +1209,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) ret, errno, slot, flags); } -void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) +void vm_mem_region_reload(struct kvm_vm *vm, u32 slot) { struct userspace_mem_region *region = memslot2region(vm, slot); struct kvm_userspace_memory_region2 tmp = region->region; @@ -1234,7 +1233,7 @@ void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) * * Change the gpa of a memory region. */ -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) +void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa) { struct userspace_mem_region *region; int ret; @@ -1263,7 +1262,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) * * Delete a memory region. */ -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) +void vm_mem_region_delete(struct kvm_vm *vm, u32 slot) { struct userspace_mem_region *region = memslot2region(vm, slot); @@ -1273,18 +1272,18 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) __vm_mem_region_delete(vm, region); } -void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, +void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size, bool punch_hole) { const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0); struct userspace_mem_region *region; - uint64_t end = base + size; - uint64_t gpa, len; + u64 end = base + size; + gpa_t gpa, len; off_t fd_offset; int ret; for (gpa = base; gpa < end; gpa += len) { - uint64_t offset; + u64 offset; region = userspace_mem_region_find(vm, gpa, gpa); TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, @@ -1292,7 +1291,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, offset = gpa - region->region.guest_phys_addr; fd_offset = region->region.guest_memfd_offset + offset; - len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); + len = min_t(u64, end - gpa, region->region.memory_size - offset); ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx", @@ -1317,7 +1316,7 @@ static size_t vcpu_mmap_sz(void) return ret; } -static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) +static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu; @@ -1333,7 +1332,7 @@ static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. * No additional vCPU setup is done. Returns the vCPU. */ -struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu; @@ -1367,33 +1366,18 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) } /* - * VM Virtual Address Unused Gap - * - * Input Args: - * vm - Virtual Machine - * sz - Size (bytes) - * vaddr_min - Minimum Virtual Address - * - * Output Args: None - * - * Return: - * Lowest virtual address at or above vaddr_min, with at least - * sz unused bytes. TEST_ASSERT failure if no area of at least - * size sz is available. - * - * Within the VM specified by vm, locates the lowest starting virtual - * address >= vaddr_min, that has at least sz unallocated bytes. A + * Within the VM specified by @vm, locates the lowest starting guest virtual + * address >= @min_gva, that has at least @sz unallocated bytes. A * TEST_ASSERT failure occurs for invalid input or no area of at least - * sz unallocated bytes >= vaddr_min is available. + * @sz unallocated bytes >= @min_gva is available. */ -vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min) +gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva) { - uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; + u64 pages = (sz + vm->page_size - 1) >> vm->page_shift; /* Determine lowest permitted virtual page index. */ - uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; - if ((pgidx_start * vm->page_size) < vaddr_min) + u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift; + if ((pgidx_start * vm->page_size) < min_gva) goto no_va_found; /* Loop over section with enough valid virtual page indexes. */ @@ -1430,7 +1414,7 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, } while (pgidx_start != 0); no_va_found: - TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); + TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages); /* NOT REACHED */ return -1; @@ -1452,145 +1436,91 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, return pgidx_start * vm->page_size; } -static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type, - bool protected) +static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type, bool protected) { - uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); + u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm); - vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, - KVM_UTIL_MIN_PFN * vm->page_size, - vm->memslots[type], protected); + gpa_t gpa = __vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + vm->memslots[type], protected); /* * Find an unused range of virtual page addresses of at least * pages in length. */ - vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); + gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva); /* Map the virtual pages. */ - for (vm_vaddr_t vaddr = vaddr_start; pages > 0; - pages--, vaddr += vm->page_size, paddr += vm->page_size) { + for (gva_t gva = gva_start; pages > 0; + pages--, gva += vm->page_size, gpa += vm->page_size) { - virt_pg_map(vm, vaddr, paddr); + virt_pg_map(vm, gva, gpa); } - return vaddr_start; + return gva_start; } -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type) +gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type) { - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, - vm_arch_has_protected_memory(vm)); + return ____vm_alloc(vm, sz, min_gva, type, + vm_arch_has_protected_memory(vm)); } -vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type) +gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type) { - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); + return ____vm_alloc(vm, sz, min_gva, type, false); } /* - * VM Virtual Address Allocate - * - * Input Args: - * vm - Virtual Machine - * sz - Size in bytes - * vaddr_min - Minimum starting virtual address - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least sz bytes within the virtual address space of the vm - * given by vm. The allocated bytes are mapped to a virtual address >= - * the address given by vaddr_min. Note that each allocation uses a - * a unique set of pages, with the minimum real allocation being at least - * a page. The allocated physical space comes from the TEST_DATA memory region. + * Allocates at least sz bytes within the virtual address space of the VM + * given by @vm. The allocated bytes are mapped to a virtual address >= the + * address given by @min_gva. Note that each allocation uses a a unique set + * of pages, with the minimum real allocation being at least a page. The + * allocated physical space comes from the TEST_DATA memory region. */ -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) +gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva) { - return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); + return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA); } -/* - * VM Virtual Address Allocate Pages - * - * Input Args: - * vm - Virtual Machine - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least N system pages worth of bytes within the virtual address - * space of the vm. - */ -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) +gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages) { - return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); + return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); } -vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) +gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) { - return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); + return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); } -/* - * VM Virtual Address Allocate Page - * - * Input Args: - * vm - Virtual Machine - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least one system page worth of bytes within the virtual address - * space of the vm. - */ -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) +gva_t vm_alloc_page(struct kvm_vm *vm) { - return vm_vaddr_alloc_pages(vm, 1); + return vm_alloc_pages(vm, 1); } /* - * Map a range of VM virtual address to the VM's physical address - * - * Input Args: - * vm - Virtual Machine - * vaddr - Virtuall address to map - * paddr - VM Physical Address - * npages - The number of pages to map + * Map a range of VM virtual address to the VM's physical address. * - * Output Args: None - * - * Return: None - * - * Within the VM given by @vm, creates a virtual translation for - * @npages starting at @vaddr to the page range starting at @paddr. + * Within the VM given by @vm, creates a virtual translation for @npages + * starting at @gva to the page range starting at @gpa. */ -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - unsigned int npages) +void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages) { size_t page_size = vm->page_size; size_t size = npages * page_size; - TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + TEST_ASSERT(gva + size > gva, "Vaddr overflow"); + TEST_ASSERT(gpa + size > gpa, "Paddr overflow"); while (npages--) { - virt_pg_map(vm, vaddr, paddr); + virt_pg_map(vm, gva, gpa); - vaddr += page_size; - paddr += page_size; + gva += page_size; + gpa += page_size; } } @@ -1611,7 +1541,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, * address providing the memory to the vm physical address is returned. * A TEST_ASSERT failure occurs if no region containing gpa exists. */ -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) +void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa) { struct userspace_mem_region *region; @@ -1644,7 +1574,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) * VM physical address is returned. A TEST_ASSERT failure occurs if no * region containing hva exists. */ -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) +gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva) { struct rb_node *node; @@ -1655,7 +1585,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) if (hva >= region->host_mem) { if (hva <= (region->host_mem + region->region.memory_size - 1)) - return (vm_paddr_t)((uintptr_t) + return (gpa_t)((uintptr_t) region->region.guest_phys_addr + (hva - (uintptr_t)region->host_mem)); @@ -1687,7 +1617,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) * memory without mapping said memory in the guest's address space. And, for * userfaultfd-based demand paging, to do so without triggering userfaults. */ -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) +void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa) { struct userspace_mem_region *region; uintptr_t offset; @@ -1781,8 +1711,8 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) { - uint32_t page_size = getpagesize(); - uint32_t size = vcpu->vm->dirty_ring_size; + u32 page_size = getpagesize(); + u32 size = vcpu->vm->dirty_ring_size; TEST_ASSERT(size > 0, "Should enable dirty ring first"); @@ -1811,7 +1741,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) * Device Ioctl */ -int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr) { struct kvm_device_attr attribute = { .group = group, @@ -1822,7 +1752,7 @@ int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); } -int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) +int __kvm_test_create_device(struct kvm_vm *vm, u64 type) { struct kvm_create_device create_dev = { .type = type, @@ -1832,7 +1762,7 @@ int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); } -int __kvm_create_device(struct kvm_vm *vm, uint64_t type) +int __kvm_create_device(struct kvm_vm *vm, u64 type) { struct kvm_create_device create_dev = { .type = type, @@ -1846,7 +1776,7 @@ int __kvm_create_device(struct kvm_vm *vm, uint64_t type) return err ? : create_dev.fd; } -int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) +int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val) { struct kvm_device_attr kvmattr = { .group = group, @@ -1858,7 +1788,7 @@ int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); } -int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) +int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val) { struct kvm_device_attr kvmattr = { .group = group, @@ -1874,7 +1804,7 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) * IRQ related functions. */ -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) { struct kvm_irq_level irq_level = { .irq = irq, @@ -1884,7 +1814,7 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); } -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) { int ret = _kvm_irq_line(vm, irq, level); @@ -1906,7 +1836,7 @@ struct kvm_irq_routing *kvm_gsi_routing_create(void) } void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin) + u32 gsi, u32 pin) { int i; @@ -1956,7 +1886,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) * Dumps the current state of the VM given by vm, to the FILE stream * given by stream. */ -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int ctr; struct userspace_mem_region *region; @@ -1969,8 +1899,8 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " "host_virt: %p\n", indent + 2, "", - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size, + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size, region->host_mem); fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); sparsebit_dump(stream, region->unused_phy_pages, 0); @@ -2077,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason) * Input Args: * vm - Virtual Machine * num - number of pages - * paddr_min - Physical address minimum + * min_gpa - Physical address minimum * memslot - Memory region to allocate page from * protected - True if the pages will be used as protected/private memory * @@ -2087,29 +2017,29 @@ const char *exit_reason_str(unsigned int exit_reason) * Starting physical address * * Within the VM specified by vm, locates a range of available physical - * pages at or above paddr_min. If found, the pages are marked as in use + * pages at or above min_gpa. If found, the pages are marked as in use * and their base address is returned. A TEST_ASSERT failure occurs if - * not enough pages are available at or above paddr_min. + * not enough pages are available at or above min_gpa. */ -vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot, - bool protected) +gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + gpa_t min_gpa, u32 memslot, + bool protected) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; TEST_ASSERT(num > 0, "Must allocate at least one page"); - TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " + TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address " "not divisible by page size.\n" - " paddr_min: 0x%lx page_size: 0x%x", - paddr_min, vm->page_size); + " min_gpa: 0x%lx page_size: 0x%x", + min_gpa, vm->page_size); region = memslot2region(vm, memslot); TEST_ASSERT(!protected || region->protected_phy_pages, "Region doesn't support protected memory"); - base = pg = paddr_min >> vm->page_shift; + base = pg = min_gpa >> vm->page_shift; do { for (; pg < base + num; ++pg) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) { @@ -2121,8 +2051,8 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, if (pg == 0) { fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", - paddr_min, vm->page_size, memslot); + "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n", + min_gpa, vm->page_size, memslot); fputs("---- vm dump ----\n", stderr); vm_dump(stderr, vm, 2); abort(); @@ -2137,13 +2067,12 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, return base * vm->page_size; } -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot) +gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot) { - return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); + return vm_phy_pages_alloc(vm, 1, min_gpa, memslot); } -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) +gpa_t vm_alloc_page_table(struct kvm_vm *vm) { return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, vm->memslots[MEM_REGION_PT]); @@ -2161,7 +2090,7 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) * Return: * Equivalent host virtual address */ -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) +void *addr_gva2hva(struct kvm_vm *vm, gva_t gva) { return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); } @@ -2259,7 +2188,7 @@ struct kvm_stats_desc *read_stats_descriptors(int stats_fd, * Read the data values of a specified stat from the binary stats interface. */ void read_stat_data(int stats_fd, struct kvm_stats_header *header, - struct kvm_stats_desc *desc, uint64_t *data, + struct kvm_stats_desc *desc, u64 *data, size_t max_elements) { size_t nr_elements = min_t(ssize_t, desc->size, max_elements); @@ -2280,7 +2209,7 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header, } void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, - uint64_t *data, size_t max_elements) + u64 *data, size_t max_elements) { struct kvm_stats_desc *desc; size_t size_desc; @@ -2357,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void) kvm_selftest_arch_init(); } -bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) +bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa) { sparsebit_idx_t pg = 0; struct userspace_mem_region *region; @@ -2365,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) if (!vm_arch_has_protected_memory(vm)) return false; - region = userspace_mem_region_find(vm, paddr, paddr); - TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); + region = userspace_mem_region_find(vm, gpa, gpa); + TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa); - pg = paddr >> vm->page_shift; + pg = gpa >> vm->page_shift; return sparsebit_is_set(region->protected_phy_pages, pg); } diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c index ee4ad3b1d2a4fb..64d91fb76522d5 100644 --- a/tools/testing/selftests/kvm/lib/loongarch/processor.c +++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c @@ -12,32 +12,32 @@ #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000 -static vm_paddr_t invalid_pgtable[4]; -static vm_vaddr_t exception_handlers; +static gpa_t invalid_pgtable[4]; +static gva_t exception_handlers; -static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level) { unsigned int shift; - uint64_t mask; + u64 mask; shift = level * (vm->page_shift - 3) + vm->page_shift; mask = (1UL << (vm->page_shift - 3)) - 1; return (gva >> shift) & mask; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +static u64 pte_addr(struct kvm_vm *vm, u64 entry) { return entry & ~((0x1UL << vm->page_shift) - 1); } -static uint64_t ptrs_per_pte(struct kvm_vm *vm) +static u64 ptrs_per_pte(struct kvm_vm *vm) { return 1 << (vm->page_shift - 3); } -static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) +static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child) { - uint64_t *ptep; + u64 *ptep; int i, ptrs_per_pte; ptep = addr_gpa2hva(vm, table); @@ -49,7 +49,7 @@ static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t chi void virt_arch_pgd_alloc(struct kvm_vm *vm) { int i; - vm_paddr_t child, table; + gpa_t child, table; if (vm->mmu.pgd_created) return; @@ -67,16 +67,16 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -static int virt_pte_none(uint64_t *ptep, int level) +static int virt_pte_none(u64 *ptep, int level) { return *ptep == invalid_pgtable[level]; } -static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) +static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc) { int level; - uint64_t *ptep; - vm_paddr_t child; + u64 *ptep; + gpa_t child; if (!vm->mmu.pgd_created) goto unmapped_gva; @@ -106,43 +106,42 @@ static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) exit(EXIT_FAILURE); } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep; + u64 *ptep; ptep = virt_populate_pte(vm, gva, 0); - TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva); + TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva); return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint32_t prot_bits; - uint64_t *ptep; + u32 prot_bits; + u64 *ptep; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, + "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); + "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); - ptep = virt_populate_pte(vm, vaddr, 1); + ptep = virt_populate_pte(vm, gva, 1); prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER; - WRITE_ONCE(*ptep, paddr | prot_bits); + WRITE_ONCE(*ptep, gpa | prot_bits); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) { - uint64_t pte, *ptep; + u64 pte, *ptep; static const char * const type[] = { "pte", "pmd", "pud", "pgd"}; if (level < 0) @@ -158,7 +157,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p } } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int level; @@ -169,7 +168,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) pte_dump(stream, vm, indent, vm->mmu.pgd, level); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { } @@ -206,8 +205,9 @@ void vm_init_descriptor_tables(struct kvm_vm *vm) { void *addr; - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), + LOONGARCH_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); addr = addr_gva2hva(vm, vm->handlers); memset(addr, 0, vm->page_size); @@ -223,7 +223,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn hand handlers->exception_handlers[vector] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return csr_read(LOONGARCH_CSR_CPUID); } @@ -241,36 +241,36 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) va_start(ap, num); for (i = 0; i < num; i++) - regs.gpr[i + 4] = va_arg(ap, uint64_t); + regs.gpr[i + 4] = va_arg(ap, u64); va_end(ap); vcpu_regs_set(vcpu, ®s); } -static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { __vcpu_set_reg(vcpu, id, val); } -static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - uint64_t cfgid; + u64 cfgid; cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id; __vcpu_set_reg(vcpu, cfgid, val); } -static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr) { - uint64_t csrid; + u64 csrid; csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; __vcpu_get_reg(vcpu, csrid, addr); } -static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val) { - uint64_t csrid; + u64 csrid; csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; __vcpu_set_reg(vcpu, csrid, val); @@ -354,8 +354,8 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE); /* LOONGARCH_CSR_KS1 is used for exception stack */ - val = __vm_vaddr_alloc(vm, vm->page_size, - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); TEST_ASSERT(val != 0, "No memory for exception stack"); val = val + vm->page_size; loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val); @@ -369,23 +369,23 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { size_t stack_size; - uint64_t stack_vaddr; + u64 stack_gva; struct kvm_regs regs; struct kvm_vcpu *vcpu; vcpu = __vm_vcpu_add(vm, vcpu_id); stack_size = vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); - TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack"); + stack_gva = __vm_alloc(vm, stack_size, + LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + TEST_ASSERT(stack_gva != 0, "No memory for vm stack"); loongarch_vcpu_setup(vcpu); /* Setup guest general purpose registers */ vcpu_regs_get(vcpu, ®s); - regs.gpr[3] = stack_vaddr + stack_size; + regs.gpr[3] = stack_gva + stack_size; vcpu_regs_set(vcpu, ®s); return vcpu; @@ -397,6 +397,6 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) /* Setup guest PC register */ vcpu_regs_get(vcpu, ®s); - regs.pc = (uint64_t)guest_code; + regs.pc = (u64)guest_code; vcpu_regs_set(vcpu, ®s); } diff --git a/tools/testing/selftests/kvm/lib/loongarch/ucall.c b/tools/testing/selftests/kvm/lib/loongarch/ucall.c index fc6cbb50573fe4..cd49a3440ead40 100644 --- a/tools/testing/selftests/kvm/lib/loongarch/ucall.c +++ b/tools/testing/selftests/kvm/lib/loongarch/ucall.c @@ -9,17 +9,17 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -vm_vaddr_t *ucall_exit_mmio_addr; +gva_t *ucall_exit_mmio_addr; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); virt_map(vm, mmio_gva, mmio_gpa, 1); vm->ucall_mmio_addr = mmio_gpa; - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); } void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) @@ -28,10 +28,10 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_MMIO && run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), "Unexpected ucall exit mmio address access"); - return (void *)(*((uint64_t *)run->mmio.data)); + return (void *)(*((u64 *)run->mmio.data)); } return NULL; diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c index 1ea735d66e1509..6dcd15910a0608 100644 --- a/tools/testing/selftests/kvm/lib/memstress.c +++ b/tools/testing/selftests/kvm/lib/memstress.c @@ -16,7 +16,7 @@ struct memstress_args memstress_args; * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; struct vcpu_thread { /* The index of the vCPU. */ @@ -44,15 +44,15 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; * Continuously write to the first 8 bytes of each page in the * specified region. */ -void memstress_guest_code(uint32_t vcpu_idx) +void memstress_guest_code(u32 vcpu_idx) { struct memstress_args *args = &memstress_args; struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; struct guest_random_state rand_state; - uint64_t gva; - uint64_t pages; - uint64_t addr; - uint64_t page; + gva_t gva; + u64 pages; + u64 addr; + u64 page; int i; rand_state = new_guest_random_state(guest_random_seed + vcpu_idx); @@ -76,9 +76,9 @@ void memstress_guest_code(uint32_t vcpu_idx) addr = gva + (page * args->guest_page_size); if (__guest_random_bool(&rand_state, args->write_percent)) - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; else - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); } GUEST_SYNC(1); @@ -87,7 +87,7 @@ void memstress_guest_code(uint32_t vcpu_idx) void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[], - uint64_t vcpu_memory_bytes, + u64 vcpu_memory_bytes, bool partition_vcpu_memory_access) { struct memstress_args *args = &memstress_args; @@ -122,15 +122,15 @@ void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, } struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, - uint64_t vcpu_memory_bytes, int slots, + u64 vcpu_memory_bytes, int slots, enum vm_mem_backing_src_type backing_src, bool partition_vcpu_memory_access) { struct memstress_args *args = &memstress_args; struct kvm_vm *vm; - uint64_t guest_num_pages, slot0_pages = 0; - uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); - uint64_t region_end_gfn; + u64 guest_num_pages, slot0_pages = 0; + u64 backing_src_pagesz = get_backing_src_pagesz(backing_src); + u64 region_end_gfn; int i; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); @@ -202,8 +202,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, /* Add extra memory slots for testing */ for (i = 0; i < slots; i++) { - uint64_t region_pages = guest_num_pages / slots; - vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; + u64 region_pages = guest_num_pages / slots; + gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i; vm_userspace_mem_region_add(vm, backing_src, region_start, MEMSTRESS_MEM_SLOT_INDEX + i, @@ -232,7 +232,7 @@ void memstress_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) +void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent) { memstress_args.write_percent = write_percent; sync_global_to_guest(vm, memstress_args.write_percent); @@ -244,7 +244,7 @@ void memstress_set_random_access(struct kvm_vm *vm, bool random_access) sync_global_to_guest(vm, memstress_args.random_access); } -uint64_t __weak memstress_nested_pages(int nr_vcpus) +u64 __weak memstress_nested_pages(int nr_vcpus) { return 0; } @@ -349,7 +349,7 @@ void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int sl } void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], - int slots, uint64_t pages_per_slot) + int slots, u64 pages_per_slot) { int i; @@ -360,7 +360,7 @@ void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], } } -unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot) +unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot) { unsigned long **bitmaps; int i; diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 067c6b2c15b05d..ded5429f34483d 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -15,9 +15,9 @@ #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 -static vm_vaddr_t exception_handlers; +static gva_t exception_handlers; -bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext) { unsigned long value = 0; int ret; @@ -27,32 +27,32 @@ bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) return !ret && !!value; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +static u64 pte_addr(struct kvm_vm *vm, u64 entry) { return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << PGTBL_PAGE_SIZE_SHIFT; } -static uint64_t ptrs_per_pte(struct kvm_vm *vm) +static u64 ptrs_per_pte(struct kvm_vm *vm) { - return PGTBL_PAGE_SIZE / sizeof(uint64_t); + return PGTBL_PAGE_SIZE / sizeof(u64); } -static uint64_t pte_index_mask[] = { +static u64 pte_index_mask[] = { PGTBL_L0_INDEX_MASK, PGTBL_L1_INDEX_MASK, PGTBL_L2_INDEX_MASK, PGTBL_L3_INDEX_MASK, }; -static uint32_t pte_index_shift[] = { +static u32 pte_index_shift[] = { PGTBL_L0_INDEX_SHIFT, PGTBL_L1_INDEX_SHIFT, PGTBL_L2_INDEX_SHIFT, PGTBL_L3_INDEX_SHIFT, }; -static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level) { TEST_ASSERT(level > -1, "Negative page table level (%d) not possible", level); @@ -75,26 +75,25 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint64_t *ptep, next_ppn; + u64 *ptep, next_ppn; int level = vm->mmu.pgtable_levels - 1; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8; + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8; if (!*ptep) { next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | @@ -104,7 +103,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) while (level > -1) { ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + - pte_index(vm, vaddr, level) * 8; + pte_index(vm, gva, level) * 8; if (!*ptep && level > 0) { next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; @@ -114,14 +113,14 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) level--; } - paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; - *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | + gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) | PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep; + u64 *ptep; int level = vm->mmu.pgtable_levels - 1; if (!vm->mmu.pgd_created) @@ -148,12 +147,12 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) exit(1); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 page, int level) { #ifdef DEBUG static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; - uint64_t pte, *ptep; + u64 pte, *ptep; if (level < 0) return; @@ -170,11 +169,11 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, #endif } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { struct kvm_mmu *mmu = &vm->mmu; int level = mmu->pgtable_levels - 1; - uint64_t pgd, *ptep; + u64 pgd, *ptep; if (!mmu->pgd_created) return; @@ -233,7 +232,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu) vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { struct kvm_riscv_core core; @@ -311,20 +310,20 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { int r; size_t stack_size; - unsigned long stack_vaddr; + unsigned long stack_gva; unsigned long current_gp = 0; struct kvm_mp_state mps; struct kvm_vcpu *vcpu; stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); vcpu = __vm_vcpu_add(vm, vcpu_id); riscv_vcpu_mmu_setup(vcpu); @@ -344,7 +343,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); /* Setup stack pointer and program counter of guest */ - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size); /* Setup sscratch for guest_get_vcpuid() */ vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id); @@ -358,7 +357,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) { va_list ap; - uint64_t id = RISCV_CORE_REG(regs.a0); + u64 id = RISCV_CORE_REG(regs.a0); int i; TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" @@ -393,7 +392,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) id = RISCV_CORE_REG(regs.a7); break; } - vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t)); + vcpu_set_reg(vcpu, id, va_arg(ap, u64)); } va_end(ap); @@ -449,10 +448,10 @@ void vcpu_init_vector_tables(struct kvm_vcpu *vcpu) void vm_init_vector_tables(struct kvm_vm *vm) { - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - vm->page_size, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, + MEM_REGION_DATA); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; } void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) @@ -470,7 +469,7 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle handlers->exception_handlers[1][0] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return csr_read(CSR_SSCRATCH); } @@ -544,10 +543,10 @@ void kvm_selftest_arch_init(void) unsigned long riscv64_get_satp_mode(void) { int kvm_fd, vm_fd, vcpu_fd, err; - uint64_t val; + u64 val; struct kvm_one_reg reg = { .id = RISCV_CONFIG_REG(satp_mode), - .addr = (uint64_t)&val, + .addr = (u64)&val, }; kvm_fd = open_kvm_dev_path_or_exit(); diff --git a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c index 2c432fa164f194..f5480473f19256 100644 --- a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c +++ b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c @@ -13,7 +13,7 @@ static void guest_code(void) { - uint64_t diag318_info = 0x12345678; + u64 diag318_info = 0x12345678; asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info)); } @@ -23,13 +23,13 @@ static void guest_code(void) * we create an ad-hoc VM here to handle the instruction then extract the * necessary data. It is up to the caller to decide what to do with that data. */ -static uint64_t diag318_handler(void) +static u64 diag318_handler(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_run *run; - uint64_t reg; - uint64_t diag318_info; + u64 reg; + u64 diag318_info; vm = vm_create_with_one_vcpu(&vcpu, guest_code); vcpu_run(vcpu); @@ -51,9 +51,9 @@ static uint64_t diag318_handler(void) return diag318_info; } -uint64_t get_diag318_info(void) +u64 get_diag318_info(void) { - static uint64_t diag318_info; + static u64 diag318_info; static bool printed_skip; /* diff --git a/tools/testing/selftests/kvm/lib/s390/facility.c b/tools/testing/selftests/kvm/lib/s390/facility.c index d540812d911ae3..9a778054f07f5c 100644 --- a/tools/testing/selftests/kvm/lib/s390/facility.c +++ b/tools/testing/selftests/kvm/lib/s390/facility.c @@ -10,5 +10,5 @@ #include "facility.h" -uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; +u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; bool stfle_flag; diff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c index 6a9a660413a747..a9adb3782b3528 100644 --- a/tools/testing/selftests/kvm/lib/s390/processor.c +++ b/tools/testing/selftests/kvm/lib/s390/processor.c @@ -12,7 +12,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) { - vm_paddr_t paddr; + gpa_t gpa; TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); @@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) if (vm->mmu.pgd_created) return; - paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, + gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION, KVM_GUEST_PAGE_TABLE_MIN_PADDR, vm->memslots[MEM_REGION_PT]); - memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); + memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size); - vm->mmu.pgd = paddr; + vm->mmu.pgd = gpa; vm->mmu.pgd_created = true; } @@ -34,9 +34,9 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) * a page table (ri == 4). Returns a suitable region/segment table entry * which points to the freshly allocated pages. */ -static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) +static u64 virt_alloc_region(struct kvm_vm *vm, int ri) { - uint64_t taddr; + u64 taddr; taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); @@ -47,26 +47,24 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { int ri, idx; - uint64_t *entry; + u64 *entry; TEST_ASSERT((gva % vm->page_size) == 0, - "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", - gva, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (gva >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", - gva); + "Virtual address not on page boundary,\n" + " gva: 0x%lx vm->page_size: 0x%x", + gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", + " gpa: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", gva, vm->max_gfn, vm->page_size); /* Walk through region and segment tables */ @@ -86,10 +84,10 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) entry[idx] = gpa; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { int ri, idx; - uint64_t *entry; + u64 *entry; TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); @@ -111,10 +109,10 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) return (entry[idx] & ~0xffful) + (gva & 0xffful); } -static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t ptea_start) +static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 ptea_start) { - uint64_t *pte, ptea; + u64 *pte, ptea; for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) { pte = addr_gpa2hva(vm, ptea); @@ -125,10 +123,10 @@ static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, } } -static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t reg_tab_addr) +static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 reg_tab_addr) { - uint64_t addr, *entry; + u64 addr, *entry; for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) { entry = addr_gpa2hva(vm, addr); @@ -147,7 +145,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, } } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { if (!vm->mmu.pgd_created) return; @@ -160,10 +158,10 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu->run->psw_addr = (uintptr_t)guest_code; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); - uint64_t stack_vaddr; + u64 stack_gva; struct kvm_regs regs; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; @@ -171,15 +169,14 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); vcpu = __vm_vcpu_add(vm, vcpu_id); /* Setup guest registers */ vcpu_regs_get(vcpu, ®s); - regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; + regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160; vcpu_regs_set(vcpu, ®s); vcpu_sregs_get(vcpu, &sregs); @@ -206,13 +203,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) vcpu_regs_get(vcpu, ®s); for (i = 0; i < num; i++) - regs.gprs[i + 2] = va_arg(ap, uint64_t); + regs.gprs[i + 2] = va_arg(ap, u64); vcpu_regs_set(vcpu, ®s); va_end(ap); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr); diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index a99188f87a38cf..4d845000de154f 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -76,11 +76,11 @@ * the use of a binary-search tree, where each node contains at least * the following members: * - * typedef uint64_t sparsebit_idx_t; - * typedef uint64_t sparsebit_num_t; + * typedef u64 sparsebit_idx_t; + * typedef u64 sparsebit_num_t; * * sparsebit_idx_t idx; - * uint32_t mask; + * u32 mask; * sparsebit_num_t num_after; * * The idx member contains the bit index of the first bit described by this @@ -162,7 +162,7 @@ #define DUMP_LINE_MAX 100 /* Does not include indent amount */ -typedef uint32_t mask_t; +typedef u32 mask_t; #define MASK_BITS (sizeof(mask_t) * CHAR_BIT) struct node { @@ -2056,9 +2056,9 @@ unsigned char get8(void) return ch; } -uint64_t get64(void) +u64 get64(void) { - uint64_t x; + u64 x; x = get8(); x = (x << 8) | get8(); @@ -2074,9 +2074,9 @@ int main(void) { s = sparsebit_alloc(); for (;;) { - uint8_t op = get8() & 0xf; - uint64_t first = get64(); - uint64_t last = get64(); + u8 op = get8() & 0xf; + u64 first = get64(); + u64 last = get64(); operate(op, first, last); } diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 8a1848586a8570..bab1bd2b775b6d 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -30,15 +30,15 @@ void __attribute__((used)) expect_sigbus_handler(int signum) * Park-Miller LCG using standard constants. */ -struct guest_random_state new_guest_random_state(uint32_t seed) +struct guest_random_state new_guest_random_state(u32 seed) { struct guest_random_state s = {.seed = seed}; return s; } -uint32_t guest_random_u32(struct guest_random_state *state) +u32 guest_random_u32(struct guest_random_state *state) { - state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1); + state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1); return state->seed; } @@ -83,12 +83,12 @@ size_t parse_size(const char *size) return base << shift; } -int64_t timespec_to_ns(struct timespec ts) +s64 timespec_to_ns(struct timespec ts) { - return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec; + return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec; } -struct timespec timespec_add_ns(struct timespec ts, int64_t ns) +struct timespec timespec_add_ns(struct timespec ts, s64 ns) { struct timespec res; @@ -101,15 +101,15 @@ struct timespec timespec_add_ns(struct timespec ts, int64_t ns) struct timespec timespec_add(struct timespec ts1, struct timespec ts2) { - int64_t ns1 = timespec_to_ns(ts1); - int64_t ns2 = timespec_to_ns(ts2); + s64 ns1 = timespec_to_ns(ts1); + s64 ns2 = timespec_to_ns(ts2); return timespec_add_ns((struct timespec){0}, ns1 + ns2); } struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) { - int64_t ns1 = timespec_to_ns(ts1); - int64_t ns2 = timespec_to_ns(ts2); + s64 ns1 = timespec_to_ns(ts1); + s64 ns2 = timespec_to_ns(ts2); return timespec_add_ns((struct timespec){0}, ns1 - ns2); } @@ -123,7 +123,7 @@ struct timespec timespec_elapsed(struct timespec start) struct timespec timespec_div(struct timespec ts, int divisor) { - int64_t ns = timespec_to_ns(ts) / divisor; + s64 ns = timespec_to_ns(ts) / divisor; return timespec_add_ns((struct timespec){0}, ns); } @@ -225,7 +225,7 @@ size_t get_def_hugetlb_pagesz(void) #define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) #define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB) -const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i) { static const struct vm_mem_backing_src_alias aliases[] = { [VM_MEM_SRC_ANONYMOUS] = { @@ -317,9 +317,9 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) #define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) -size_t get_backing_src_pagesz(uint32_t i) +size_t get_backing_src_pagesz(u32 i) { - uint32_t flag = vm_mem_backing_src_alias(i)->flag; + u32 flag = vm_mem_backing_src_alias(i)->flag; switch (i) { case VM_MEM_SRC_ANONYMOUS: @@ -335,7 +335,7 @@ size_t get_backing_src_pagesz(uint32_t i) } } -bool is_backing_src_hugetlb(uint32_t i) +bool is_backing_src_hugetlb(u32 i) { return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); } diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 42151e57195360..029ce21f9f2fa8 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -14,7 +14,7 @@ struct ucall_header { struct ucall ucalls[KVM_MAX_VCPUS]; }; -int ucall_nr_pages_required(uint64_t page_size) +int ucall_nr_pages_required(u64 page_size) { return align_up(sizeof(struct ucall_header), page_size) / page_size; } @@ -25,16 +25,16 @@ int ucall_nr_pages_required(uint64_t page_size) */ static struct ucall_header *ucall_pool; -void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa) { struct ucall_header *hdr; struct ucall *uc; - vm_vaddr_t vaddr; + gva_t gva; int i; - vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, - MEM_REGION_DATA); - hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); + gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, + MEM_REGION_DATA); + hdr = (struct ucall_header *)addr_gva2hva(vm, gva); memset(hdr, 0, sizeof(*hdr)); for (i = 0; i < KVM_MAX_VCPUS; ++i) { @@ -42,7 +42,7 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) uc->hva = uc; } - write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); + write_guest_global(vm, ucall_pool, (struct ucall_header *)gva); ucall_arch_init(vm, mmio_gpa); } @@ -79,7 +79,7 @@ static void ucall_free(struct ucall *uc) clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); } -void ucall_assert(uint64_t cmd, const char *exp, const char *file, +void ucall_assert(u64 cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...) { struct ucall *uc; @@ -88,20 +88,20 @@ void ucall_assert(uint64_t cmd, const char *exp, const char *file, uc = ucall_alloc(); uc->cmd = cmd; - WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); - WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); + WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp)); + WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file)); WRITE_ONCE(uc->args[GUEST_LINE], line); va_start(va, fmt); guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -void ucall_fmt(uint64_t cmd, const char *fmt, ...) +void ucall_fmt(u64 cmd, const char *fmt, ...) { struct ucall *uc; va_list va; @@ -113,12 +113,12 @@ void ucall_fmt(uint64_t cmd, const char *fmt, ...) guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -void ucall(uint64_t cmd, int nargs, ...) +void ucall(u64 cmd, int nargs, ...) { struct ucall *uc; va_list va; @@ -132,15 +132,15 @@ void ucall(uint64_t cmd, int nargs, ...) va_start(va, nargs); for (i = 0; i < nargs; ++i) - WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); + WRITE_ONCE(uc->args[i], va_arg(va, u64)); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) +u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) { struct ucall ucall; void *addr; diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c index 5bde176cedd59b..ef8d76f71f8370 100644 --- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c +++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c @@ -27,7 +27,7 @@ static void *uffd_handler_thread_fn(void *arg) { struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg; int uffd = reader_args->uffd; - int64_t pages = 0; + s64 pages = 0; struct timespec start; struct timespec ts_diff; struct epoll_event evt; @@ -100,8 +100,8 @@ static void *uffd_handler_thread_fn(void *arg) } struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - void *hva, uint64_t len, - uint64_t num_readers, + void *hva, u64 len, + u64 num_readers, uffd_handler_t handler) { struct uffd_desc *uffd_desc; @@ -109,7 +109,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, int uffd; struct uffdio_api uffdio_api; struct uffdio_register uffdio_register; - uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; + u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY; int ret, i; PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", @@ -132,7 +132,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, /* In order to get minor faults, prefault via the alias. */ if (is_minor) - expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; + expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE; uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); @@ -141,9 +141,9 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, uffdio_api.features = 0; TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, "ioctl UFFDIO_API failed: %" PRIu64, - (uint64_t)uffdio_api.api); + (u64)uffdio_api.api); - uffdio_register.range.start = (uint64_t)hva; + uffdio_register.range.start = (u64)hva; uffdio_register.range.len = len; uffdio_register.mode = uffd_mode; TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, diff --git a/tools/testing/selftests/kvm/lib/x86/apic.c b/tools/testing/selftests/kvm/lib/x86/apic.c index 89153a333e83c2..5182fd0d6a76cf 100644 --- a/tools/testing/selftests/kvm/lib/x86/apic.c +++ b/tools/testing/selftests/kvm/lib/x86/apic.c @@ -14,7 +14,7 @@ void apic_disable(void) void xapic_enable(void) { - uint64_t val = rdmsr(MSR_IA32_APICBASE); + u64 val = rdmsr(MSR_IA32_APICBASE); /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ if (val & MSR_IA32_APICBASE_EXTD) { diff --git a/tools/testing/selftests/kvm/lib/x86/hyperv.c b/tools/testing/selftests/kvm/lib/x86/hyperv.c index 15bc8cd583aa42..d200c5c26e2eaa 100644 --- a/tools/testing/selftests/kvm/lib/x86/hyperv.c +++ b/tools/testing/selftests/kvm/lib/x86/hyperv.c @@ -76,23 +76,23 @@ bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature) } struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva) + gva_t *p_hv_pages_gva) { - vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); + gva_t hv_pages_gva = vm_alloc_page(vm); struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); /* Setup of a region of guest memory for the VP Assist page. */ - hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); + hv->vp_assist = (void *)vm_alloc_page(vm); hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); /* Setup of a region of guest memory for the partition assist page. */ - hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); + hv->partition_assist = (void *)vm_alloc_page(vm); hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); /* Setup of a region of guest memory for the enlightened VMCS. */ - hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); + hv->enlightened_vmcs = (void *)vm_alloc_page(vm); hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); @@ -100,9 +100,9 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, return hv; } -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) +int enable_vp_assist(u64 vp_assist_pa, void *vp_assist) { - uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | + u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c index f53414ba710341..61cf952cd2dc2e 100644 --- a/tools/testing/selftests/kvm/lib/x86/memstress.c +++ b/tools/testing/selftests/kvm/lib/x86/memstress.c @@ -16,7 +16,7 @@ #include "svm_util.h" #include "vmx.h" -void memstress_l2_guest_code(uint64_t vcpu_id) +void memstress_l2_guest_code(u64 vcpu_id) { memstress_guest_code(vcpu_id); vmcall(); @@ -32,7 +32,7 @@ __asm__( #define L2_GUEST_STACK_SIZE 64 -static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id) +static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; unsigned long *rsp; @@ -51,7 +51,7 @@ static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id) GUEST_DONE(); } -static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id) +static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; unsigned long *rsp; @@ -67,7 +67,7 @@ static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id) } -static void memstress_l1_guest_code(void *data, uint64_t vcpu_id) +static void memstress_l1_guest_code(void *data, u64 vcpu_id) { if (this_cpu_has(X86_FEATURE_VMX)) l1_vmx_code(data, vcpu_id); @@ -75,7 +75,7 @@ static void memstress_l1_guest_code(void *data, uint64_t vcpu_id) l1_svm_code(data, vcpu_id); } -uint64_t memstress_nested_pages(int nr_vcpus) +u64 memstress_nested_pages(int nr_vcpus) { /* * 513 page tables is enough to identity-map 256 TiB of L2 with 1G @@ -87,7 +87,7 @@ uint64_t memstress_nested_pages(int nr_vcpus) static void memstress_setup_ept_mappings(struct kvm_vm *vm) { - uint64_t start, end; + u64 start, end; /* * Identity map the first 4G and the test region with 1G pages so that @@ -104,7 +104,7 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm) void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) { struct kvm_regs regs; - vm_vaddr_t nested_gva; + gva_t nested_gva; int vcpu_id; TEST_REQUIRE(kvm_cpu_has_tdp()); diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c index 34cb57d1d67189..0851b74b4e46df 100644 --- a/tools/testing/selftests/kvm/lib/x86/pmu.c +++ b/tools/testing/selftests/kvm/lib/x86/pmu.c @@ -11,7 +11,7 @@ #include "processor.h" #include "pmu.h" -const uint64_t intel_pmu_arch_events[] = { +const u64 intel_pmu_arch_events[] = { INTEL_ARCH_CPU_CYCLES, INTEL_ARCH_INSTRUCTIONS_RETIRED, INTEL_ARCH_REFERENCE_CYCLES, @@ -28,7 +28,7 @@ const uint64_t intel_pmu_arch_events[] = { }; kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); -const uint64_t amd_pmu_zen_events[] = { +const u64 amd_pmu_zen_events[] = { AMD_ZEN_CORE_CYCLES, AMD_ZEN_INSTRUCTIONS_RETIRED, AMD_ZEN_BRANCHES_RETIRED, @@ -50,7 +50,7 @@ kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); * be overcounted on these certain instructions, but for Clearwater Forest * only "Instruction Retired" event is overcounted on these instructions. */ -static uint64_t get_pmu_errata(void) +static u64 get_pmu_errata(void) { if (!this_cpu_is_intel()) return 0; @@ -72,7 +72,7 @@ static uint64_t get_pmu_errata(void) } } -uint64_t pmu_errata_mask; +u64 pmu_errata_mask; void kvm_init_pmu_errata(void) { diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c index 01f0f97d443043..b51467d70f6e7d 100644 --- a/tools/testing/selftests/kvm/lib/x86/processor.c +++ b/tools/testing/selftests/kvm/lib/x86/processor.c @@ -21,13 +21,13 @@ #define KERNEL_DS 0x10 #define KERNEL_TSS 0x18 -vm_vaddr_t exception_handlers; +gva_t exception_handlers; bool host_cpu_is_amd; bool host_cpu_is_intel; bool host_cpu_is_hygon; bool host_cpu_is_amd_compatible; bool is_forced_emulation_enabled; -uint64_t guest_tsc_khz; +u64 guest_tsc_khz; const char *ex_str(int vector) { @@ -62,7 +62,7 @@ const char *ex_str(int vector) } } -static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) +static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent) { fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " "rcx: 0x%.16llx rdx: 0x%.16llx\n", @@ -86,7 +86,7 @@ static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) } static void segment_dump(FILE *stream, struct kvm_segment *segment, - uint8_t indent) + u8 indent) { fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " "selector: 0x%.4x type: 0x%.2x\n", @@ -103,7 +103,7 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment, } static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, - uint8_t indent) + u8 indent) { fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " "padding: 0x%.4x 0x%.4x 0x%.4x\n", @@ -111,7 +111,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, dtable->padding[0], dtable->padding[1], dtable->padding[2]); } -static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) +static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent) { unsigned int i; @@ -207,37 +207,37 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, } static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu, - uint64_t *parent_pte, uint64_t vaddr, int level) + u64 *parent_pte, gva_t gva, int level) { - uint64_t pt_gpa = PTE_GET_PA(*parent_pte); - uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); - int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; + u64 pt_gpa = PTE_GET_PA(*parent_pte); + u64 *page_table = addr_gpa2hva(vm, pt_gpa); + int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu; TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte), "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", - level + 1, vaddr); + level + 1, gva); return &page_table[index]; } -static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, - struct kvm_mmu *mmu, - uint64_t *parent_pte, - uint64_t vaddr, - uint64_t paddr, - int current_level, - int target_level) +static u64 *virt_create_upper_pte(struct kvm_vm *vm, + struct kvm_mmu *mmu, + u64 *parent_pte, + gva_t gva, + gpa_t gpa, + int current_level, + int target_level) { - uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level); + u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level); - paddr = vm_untag_gpa(vm, paddr); + gpa = vm_untag_gpa(vm, gpa); if (!is_present_pte(mmu, pte)) { *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | PTE_ALWAYS_SET_MASK(mmu); if (current_level == target_level) - *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); + *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); else *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; } else { @@ -247,39 +247,39 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, * this level. */ TEST_ASSERT(current_level != target_level, - "Cannot create hugepage at level: %u, vaddr: 0x%lx", - current_level, vaddr); + "Cannot create hugepage at level: %u, gva: 0x%lx", + current_level, gva); TEST_ASSERT(!is_huge_pte(mmu, pte), - "Cannot create page table at level: %u, vaddr: 0x%lx", - current_level, vaddr); + "Cannot create page table at level: %u, gva: 0x%lx", + current_level, gva); } return pte; } -void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, - uint64_t paddr, int level) +void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, + gpa_t gpa, int level) { - const uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t *pte = &mmu->pgd; + const u64 pg_size = PG_LEVEL_SIZE(level); + u64 *pte = &mmu->pgd; int current_level; TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, "Unknown or unsupported guest mode: 0x%x", vm->mode); - TEST_ASSERT((vaddr % pg_size) == 0, + TEST_ASSERT((gva % pg_size) == 0, "Virtual address not aligned,\n" - "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % pg_size) == 0, + "gva: 0x%lx page size: 0x%lx", gva, pg_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % pg_size) == 0, "Physical address not aligned,\n" - " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + " gpa: 0x%lx page size: 0x%lx", gpa, pg_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, - "Unexpected bits in paddr: %lx", paddr); + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); + TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa, + "Unexpected bits in gpa: %lx", gpa); TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu), "X and NX bit masks cannot be used simultaneously"); @@ -291,40 +291,40 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, for (current_level = mmu->pgtable_levels; current_level > PG_LEVEL_4K; current_level--) { - pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr, + pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa, current_level, level); if (is_huge_pte(mmu, pte)) return; } /* Fill in page table entry. */ - pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); + pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); TEST_ASSERT(!is_present_pte(mmu, pte), - "PTE already present for 4k page at vaddr: 0x%lx", vaddr); + "PTE already present for 4k page at gva: 0x%lx", gva); *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | - PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); + PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); /* * Neither SEV nor TDX supports shared page tables, so only the final * leaf PTE needs manually set the C/S-bit. */ - if (vm_is_gpa_protected(vm, paddr)) + if (vm_is_gpa_protected(vm, gpa)) *pte |= PTE_C_BIT_MASK(mmu); else *pte |= PTE_S_BIT_MASK(mmu); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K); + __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K); } -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level) +void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 nr_bytes, int level) { - uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t nr_pages = nr_bytes / pg_size; + u64 pg_size = PG_LEVEL_SIZE(level); + u64 nr_pages = nr_bytes / pg_size; int i; TEST_ASSERT(nr_bytes % pg_size == 0, @@ -332,16 +332,16 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, nr_bytes, pg_size); for (i = 0; i < nr_pages; i++) { - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level); - sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, + __virt_pg_map(vm, &vm->mmu, gva, gpa, level); + sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift, nr_bytes / PAGE_SIZE); - vaddr += pg_size; - paddr += pg_size; + gva += pg_size; + gpa += pg_size; } } -static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, +static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte, int *level, int current_level) { if (is_huge_pte(mmu, pte)) { @@ -354,13 +354,13 @@ static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, return *level == current_level; } -static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, - struct kvm_mmu *mmu, - uint64_t vaddr, - int *level) +static u64 *__vm_get_page_table_entry(struct kvm_vm *vm, + struct kvm_mmu *mmu, + gva_t gva, + int *level) { int va_width = 12 + (mmu->pgtable_levels) * 9; - uint64_t *pte = &mmu->pgd; + u64 *pte = &mmu->pgd; int current_level; TEST_ASSERT(!vm->arch.is_pt_protected, @@ -371,49 +371,46 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, "Unknown or unsupported guest mode: 0x%x", vm->mode); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", - vaddr); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); /* - * Check that the vaddr is a sign-extended va_width value. + * Check that the gva is a sign-extended va_width value. */ - TEST_ASSERT(vaddr == - (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))), + TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))), "Canonical check failed. The virtual address is invalid."); for (current_level = mmu->pgtable_levels; current_level > PG_LEVEL_4K; current_level--) { - pte = virt_get_pte(vm, mmu, pte, vaddr, current_level); + pte = virt_get_pte(vm, mmu, pte, gva, current_level); if (vm_is_target_pte(mmu, pte, level, current_level)) return pte; } - return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); + return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); } -uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa) +u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa) { int level = PG_LEVEL_4K; return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level); } -uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr) +u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva) { int level = PG_LEVEL_4K; - return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level); + return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { struct kvm_mmu *mmu = &vm->mmu; - uint64_t *pml4e, *pml4e_start; - uint64_t *pdpe, *pdpe_start; - uint64_t *pde, *pde_start; - uint64_t *pte, *pte_start; + u64 *pml4e, *pml4e_start; + u64 *pdpe, *pdpe_start; + u64 *pde, *pde_start; + u64 *pte, *pte_start; if (!mmu->pgd_created) return; @@ -423,8 +420,8 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) fprintf(stream, "%*s index hvaddr gpaddr " "addr w exec dirty\n", indent, ""); - pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd); - for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { + pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd); + for (u16 n1 = 0; n1 <= 0x1ffu; n1++) { pml4e = &pml4e_start[n1]; if (!is_present_pte(mmu, pml4e)) continue; @@ -436,7 +433,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e)); pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); - for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { + for (u16 n2 = 0; n2 <= 0x1ffu; n2++) { pdpe = &pdpe_start[n2]; if (!is_present_pte(mmu, pdpe)) continue; @@ -449,7 +446,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_nx_pte(mmu, pdpe)); pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); - for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { + for (u16 n3 = 0; n3 <= 0x1ffu; n3++) { pde = &pde_start[n3]; if (!is_present_pte(mmu, pde)) continue; @@ -461,7 +458,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_nx_pte(mmu, pde)); pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); - for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { + for (u16 n4 = 0; n4 <= 0x1ffu; n4++) { pte = &pte_start[n4]; if (!is_present_pte(mmu, pte)) continue; @@ -475,10 +472,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_writable_pte(mmu, pte), is_nx_pte(mmu, pte), is_dirty_pte(mmu, pte), - ((uint64_t) n1 << 27) - | ((uint64_t) n2 << 18) - | ((uint64_t) n3 << 9) - | ((uint64_t) n4)); + ((u64)n1 << 27) + | ((u64)n2 << 18) + | ((u64)n3 << 9) + | ((u64)n4)); } } } @@ -498,26 +495,24 @@ bool kvm_cpu_has_tdp(void) return kvm_cpu_has_ept() || kvm_cpu_has_npt(); } -void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, - uint64_t size, int level) +void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level) { size_t page_size = PG_LEVEL_SIZE(level); size_t npages = size / page_size; - TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow"); + TEST_ASSERT(gpa + size > gpa, "GPA overflow"); while (npages--) { - __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level); - nested_paddr += page_size; - paddr += page_size; + __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level); + l2_gpa += page_size; + gpa += page_size; } } -void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, - uint64_t size) +void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size) { - __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K); + __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K); } /* Prepare an identity extended page table that maps all the @@ -525,7 +520,7 @@ void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, */ void tdp_identity_map_default_memslots(struct kvm_vm *vm) { - uint32_t s, memslot = 0; + u32 s, memslot = 0; sparsebit_idx_t i, last; struct userspace_mem_region *region = memslot2region(vm, memslot); @@ -540,13 +535,13 @@ void tdp_identity_map_default_memslots(struct kvm_vm *vm) if (i > last) break; - tdp_map(vm, (uint64_t)i << vm->page_shift, - (uint64_t)i << vm->page_shift, 1 << vm->page_shift); + tdp_map(vm, (u64)i << vm->page_shift, + (u64)i << vm->page_shift, 1 << vm->page_shift); } } /* Identity map a region with 1GiB Pages. */ -void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size) +void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size) { __tdp_map(vm, addr, addr, size, PG_LEVEL_1G); } @@ -618,10 +613,10 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp) segp->present = true; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { int level = PG_LEVEL_NONE; - uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); + u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); TEST_ASSERT(is_present_pte(&vm->mmu, pte), "Leaf PTE not PRESENT for gva: 0x%08lx", gva); @@ -633,7 +628,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); } -static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) +static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp) { memset(segp, 0, sizeof(*segp)); segp->base = base; @@ -746,16 +741,16 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm) struct kvm_segment seg; int i; - vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA); /* Handlers have the same address in both address spaces.*/ for (i = 0; i < NUM_INTERRUPTS; i++) set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; kvm_seg_set_kernel_code_64bit(&seg); kvm_seg_fill_gdt_64bit(vm, &seg); @@ -770,9 +765,9 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm) void vm_install_exception_handler(struct kvm_vm *vm, int vector, void (*handler)(struct ex_regs *)) { - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); + gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers); - handlers[vector] = (vm_vaddr_t)handler; + handlers[vector] = (gva_t)handler; } void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) @@ -821,18 +816,17 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu_regs_set(vcpu, ®s); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_mp_state mp_state; struct kvm_regs regs; - vm_vaddr_t stack_vaddr; + gva_t stack_gva; struct kvm_vcpu *vcpu; - stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), - DEFAULT_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), + DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); - stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); + stack_gva += DEFAULT_STACK_PGS * getpagesize(); /* * Align stack to match calling sequence requirements in section "The @@ -843,9 +837,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) * If this code is ever used to launch a vCPU with 32-bit entry point it * may need to subtract 4 bytes instead of 8 bytes. */ - TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), - "__vm_vaddr_alloc() did not provide a page-aligned address"); - stack_vaddr -= 8; + TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE), + "__vm_alloc() did not provide a page-aligned address"); + stack_gva -= 8; vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); @@ -855,7 +849,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) /* Setup guest general purpose registers */ vcpu_regs_get(vcpu, ®s); regs.rflags = regs.rflags | 0x2; - regs.rsp = stack_vaddr; + regs.rsp = stack_gva; vcpu_regs_set(vcpu, ®s); /* Setup the MP state */ @@ -872,7 +866,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); @@ -907,9 +901,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void) return kvm_supported_cpuid; } -static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) +static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, + u32 function, u32 index, + u8 reg, u8 lo, u8 hi) { const struct kvm_cpuid_entry2 *entry; int i; @@ -936,14 +930,14 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, feature.reg, feature.bit, feature.bit); } -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property) +u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property) { return __kvm_cpu_has(cpuid, property.function, property.index, property.reg, property.lo_bit, property.hi_bit); } -uint64_t kvm_get_feature_msr(uint64_t msr_index) +u64 kvm_get_feature_msr(u64 msr_index) { struct { struct kvm_msrs header; @@ -962,7 +956,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index) return buffer.entry.data; } -void __vm_xsave_require_permission(uint64_t xfeature, const char *name) +void __vm_xsave_require_permission(u64 xfeature, const char *name) { int kvm_fd; u64 bitmask; @@ -1019,7 +1013,7 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_property property, - uint32_t value) + u32 value) { struct kvm_cpuid_entry2 *entry; @@ -1034,7 +1028,7 @@ void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); } -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) { struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); @@ -1063,7 +1057,7 @@ void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, vcpu_set_cpuid(vcpu); } -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) +u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index) { struct { struct kvm_msrs header; @@ -1078,7 +1072,7 @@ uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) return buffer.entry.data; } -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) +int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value) { struct { struct kvm_msrs header; @@ -1106,28 +1100,28 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) vcpu_regs_get(vcpu, ®s); if (num >= 1) - regs.rdi = va_arg(ap, uint64_t); + regs.rdi = va_arg(ap, u64); if (num >= 2) - regs.rsi = va_arg(ap, uint64_t); + regs.rsi = va_arg(ap, u64); if (num >= 3) - regs.rdx = va_arg(ap, uint64_t); + regs.rdx = va_arg(ap, u64); if (num >= 4) - regs.rcx = va_arg(ap, uint64_t); + regs.rcx = va_arg(ap, u64); if (num >= 5) - regs.r8 = va_arg(ap, uint64_t); + regs.r8 = va_arg(ap, u64); if (num >= 6) - regs.r9 = va_arg(ap, uint64_t); + regs.r9 = va_arg(ap, u64); vcpu_regs_set(vcpu, ®s); va_end(ap); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { struct kvm_regs regs; struct kvm_sregs sregs; @@ -1196,7 +1190,7 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void) return list; } -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) +bool kvm_msr_is_in_save_restore_list(u32 msr_index) { const struct kvm_msr_list *list = kvm_get_msr_index_list(); int i; @@ -1327,7 +1321,7 @@ void kvm_init_vm_address_properties(struct kvm_vm *vm) } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index) + u32 function, u32 index) { int i; @@ -1344,7 +1338,7 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, #define X86_HYPERCALL(inputs...) \ ({ \ - uint64_t r; \ + u64 r; \ \ asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ "jnz 1f\n\t" \ @@ -1359,18 +1353,17 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, r; \ }) -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3) +u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) { return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); } -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +u64 __xen_hypercall(u64 nr, u64 a0, void *a1) { return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); } -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +void xen_hypercall(u64 nr, u64 a0, void *a1) { GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); } @@ -1379,7 +1372,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) { const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ unsigned long ht_gfn, max_gfn, max_pfn; - uint8_t maxphyaddr, guest_maxphyaddr; + u8 maxphyaddr, guest_maxphyaddr; /* * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR @@ -1453,8 +1446,7 @@ bool kvm_arch_has_default_irqchip(void) return true; } -void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, - uint64_t smram_gpa, +void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, const void *smi_handler, size_t handler_size) { vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa, diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c index c3a9838f4806a5..93f91690346174 100644 --- a/tools/testing/selftests/kvm/lib/x86/sev.c +++ b/tools/testing/selftests/kvm/lib/x86/sev.c @@ -15,10 +15,10 @@ * expression would cause us to quit the loop. */ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, - uint8_t page_type, bool private) + u8 page_type, bool private) { const struct sparsebit *protected_phy_pages = region->protected_phy_pages; - const vm_paddr_t gpa_base = region->region.guest_phys_addr; + const gpa_t gpa_base = region->region.guest_phys_addr; const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; sparsebit_idx_t i, j; @@ -29,15 +29,15 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio sev_register_encrypted_memory(vm, region); sparsebit_for_each_set_range(protected_phy_pages, i, j) { - const uint64_t size = (j - i + 1) * vm->page_size; - const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; + const u64 size = (j - i + 1) * vm->page_size; + const u64 offset = (i - lowest_page_in_region) * vm->page_size; if (private) vm_mem_set_private(vm, gpa_base + offset, size); if (is_sev_snp_vm(vm)) snp_launch_update_data(vm, gpa_base + offset, - (uint64_t)addr_gpa2hva(vm, gpa_base + offset), + (u64)addr_gpa2hva(vm, gpa_base + offset), size, page_type); else sev_launch_update_data(vm, gpa_base + offset, size); @@ -79,7 +79,7 @@ void snp_vm_init(struct kvm_vm *vm) vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); } -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) +void sev_vm_launch(struct kvm_vm *vm, u32 policy) { struct kvm_sev_launch_start launch_start = { .policy = policy, @@ -103,7 +103,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) vm->arch.is_pt_protected = true; } -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) +void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement) { struct kvm_sev_launch_measure launch_measure; struct kvm_sev_guest_status guest_status; @@ -131,7 +131,7 @@ void sev_vm_launch_finish(struct kvm_vm *vm) TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); } -void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy) +void snp_vm_launch_start(struct kvm_vm *vm, u64 policy) { struct kvm_sev_snp_launch_start launch_start = { .policy = policy, @@ -158,7 +158,7 @@ void snp_vm_launch_finish(struct kvm_vm *vm) vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); } -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, +struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, struct kvm_vcpu **cpu) { struct vm_shape shape = { @@ -174,7 +174,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, return vm; } -void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement) +void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement) { if (is_sev_snp_vm(vm)) { vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE)); diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c index eb20b00112c769..3b01605ab016c0 100644 --- a/tools/testing/selftests/kvm/lib/x86/svm.c +++ b/tools/testing/selftests/kvm/lib/x86/svm.c @@ -28,20 +28,20 @@ u64 rflags; * Pointer to structure with the addresses of the SVM areas. */ struct svm_test_data * -vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) +vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva) { - vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); + gva_t svm_gva = vm_alloc_page(vm); struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); - svm->vmcb = (void *)vm_vaddr_alloc_page(vm); + svm->vmcb = (void *)vm_alloc_page(vm); svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); - svm->save_area = (void *)vm_vaddr_alloc_page(vm); + svm->save_area = (void *)vm_alloc_page(vm); svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); - svm->msr = (void *)vm_vaddr_alloc_page(vm); + svm->msr = (void *)vm_alloc_page(vm); svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); memset(svm->msr_hva, 0, getpagesize()); @@ -84,14 +84,14 @@ void vm_enable_npt(struct kvm_vm *vm) void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) { struct vmcb *vmcb = svm->vmcb; - uint64_t vmcb_gpa = svm->vmcb_gpa; + u64 vmcb_gpa = svm->vmcb_gpa; struct vmcb_save_area *save = &vmcb->save; struct vmcb_control_area *ctrl = &vmcb->control; u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; - uint64_t efer; + u64 efer; efer = rdmsr(MSR_EFER); wrmsr(MSR_EFER, efer | EFER_SVME); @@ -158,7 +158,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r * for now. registers involved in LOAD/SAVE_GPR_C are eventually * unmodified so they do not need to be in the clobber list. */ -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) +void run_guest(struct vmcb *vmcb, u64 vmcb_gpa) { asm volatile ( "vmload %[vmcb_gpa]\n\t" diff --git a/tools/testing/selftests/kvm/lib/x86/ucall.c b/tools/testing/selftests/kvm/lib/x86/ucall.c index 1265cecc7dd104..e7dd5791959ba1 100644 --- a/tools/testing/selftests/kvm/lib/x86/ucall.c +++ b/tools/testing/selftests/kvm/lib/x86/ucall.c @@ -6,9 +6,9 @@ */ #include "kvm_util.h" -#define UCALL_PIO_PORT ((uint16_t)0x1000) +#define UCALL_PIO_PORT ((u16)0x1000) -void ucall_arch_do_ucall(vm_vaddr_t uc) +void ucall_arch_do_ucall(gva_t uc) { /* * FIXME: Revert this hack (the entire commit that added it) once nVMX diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c index c87b340362a900..67642759e4a05f 100644 --- a/tools/testing/selftests/kvm/lib/x86/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86/vmx.c @@ -27,7 +27,7 @@ struct hv_vp_assist_page *current_vp_assist; int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) { - uint16_t evmcs_ver; + u16 evmcs_ver; vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, (unsigned long)&evmcs_ver); @@ -79,39 +79,39 @@ void vm_enable_ept(struct kvm_vm *vm) * Pointer to structure with the addresses of the VMX areas. */ struct vmx_pages * -vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) +vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva) { - vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); + gva_t vmx_gva = vm_alloc_page(vm); struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); /* Setup of a region of guest memory for the vmxon region. */ - vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); + vmx->vmxon = (void *)vm_alloc_page(vm); vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); /* Setup of a region of guest memory for a vmcs. */ - vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->vmcs = (void *)vm_alloc_page(vm); vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); /* Setup of a region of guest memory for the MSR bitmap. */ - vmx->msr = (void *)vm_vaddr_alloc_page(vm); + vmx->msr = (void *)vm_alloc_page(vm); vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); memset(vmx->msr_hva, 0, getpagesize()); /* Setup of a region of guest memory for the shadow VMCS. */ - vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->shadow_vmcs = (void *)vm_alloc_page(vm); vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ - vmx->vmread = (void *)vm_vaddr_alloc_page(vm); + vmx->vmread = (void *)vm_alloc_page(vm); vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); memset(vmx->vmread_hva, 0, getpagesize()); - vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); + vmx->vmwrite = (void *)vm_alloc_page(vm); vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); memset(vmx->vmwrite_hva, 0, getpagesize()); @@ -125,8 +125,8 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) bool prepare_for_vmx_operation(struct vmx_pages *vmx) { - uint64_t feature_control; - uint64_t required; + u64 feature_control; + u64 required; unsigned long cr0; unsigned long cr4; @@ -160,7 +160,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); /* Enter VMX root operation. */ - *(uint32_t *)(vmx->vmxon) = vmcs_revision(); + *(u32 *)(vmx->vmxon) = vmcs_revision(); if (vmxon(vmx->vmxon_gpa)) return false; @@ -170,7 +170,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) bool load_vmcs(struct vmx_pages *vmx) { /* Load a VMCS. */ - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); + *(u32 *)(vmx->vmcs) = vmcs_revision(); if (vmclear(vmx->vmcs_gpa)) return false; @@ -178,14 +178,14 @@ bool load_vmcs(struct vmx_pages *vmx) return false; /* Setup shadow VMCS, do not load it yet. */ - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; + *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; if (vmclear(vmx->shadow_vmcs_gpa)) return false; return true; } -static bool ept_vpid_cap_supported(uint64_t mask) +static bool ept_vpid_cap_supported(u64 mask) { return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; } @@ -200,7 +200,7 @@ bool ept_1g_pages_supported(void) */ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) { - uint32_t sec_exec_ctl = 0; + u32 sec_exec_ctl = 0; vmwrite(VIRTUAL_PROCESSOR_ID, 0); vmwrite(POSTED_INTR_NV, 0); @@ -208,7 +208,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); if (vmx->eptp_gpa) { - uint64_t eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; + u64 eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; TEST_ASSERT((vmx->eptp_gpa & ~PHYSICAL_PAGE_MASK) == 0, "Illegal bits set in vmx->eptp_gpa"); @@ -259,7 +259,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) */ static inline void init_vmcs_host_state(void) { - uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); + u32 exit_controls = vmreadz(VM_EXIT_CONTROLS); vmwrite(HOST_ES_SELECTOR, get_es()); vmwrite(HOST_CS_SELECTOR, get_cs()); @@ -358,8 +358,8 @@ static inline void init_vmcs_guest_state(void *rip, void *rsp) vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); vmwrite(GUEST_DR7, 0x400); - vmwrite(GUEST_RSP, (uint64_t)rsp); - vmwrite(GUEST_RIP, (uint64_t)rip); + vmwrite(GUEST_RSP, (u64)rsp); + vmwrite(GUEST_RIP, (u64)rip); vmwrite(GUEST_RFLAGS, 2); vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); @@ -375,7 +375,7 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) bool kvm_cpu_has_ept(void) { - uint64_t ctrl; + u64 ctrl; if (!kvm_cpu_has(X86_FEATURE_VMX)) return false; @@ -390,7 +390,7 @@ bool kvm_cpu_has_ept(void) void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) { - vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); + vmx->apic_access = (void *)vm_alloc_page(vm); vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); } diff --git a/tools/testing/selftests/kvm/loongarch/arch_timer.c b/tools/testing/selftests/kvm/loongarch/arch_timer.c index 355ecac30954eb..a7279ded851892 100644 --- a/tools/testing/selftests/kvm/loongarch/arch_timer.c +++ b/tools/testing/selftests/kvm/loongarch/arch_timer.c @@ -27,8 +27,8 @@ static void do_idle(void) static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid; - uint32_t cpu = guest_get_vcpuid(); - uint64_t xcnt, val, cfg, xcnt_diff_us; + u32 cpu = guest_get_vcpuid(); + u64 xcnt, val, cfg, xcnt_diff_us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; intid = !!(regs->estat & BIT(INT_TI)); @@ -62,10 +62,10 @@ static void guest_irq_handler(struct ex_regs *regs) WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); } -static void guest_test_period_timer(uint32_t cpu) +static void guest_test_period_timer(u32 cpu) { - uint32_t irq_iter, config_iter; - uint64_t us; + u32 irq_iter, config_iter; + u64 us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; shared_data->nr_iter = test_args.nr_iter; @@ -86,10 +86,10 @@ static void guest_test_period_timer(uint32_t cpu) irq_iter); } -static void guest_test_oneshot_timer(uint32_t cpu) +static void guest_test_oneshot_timer(u32 cpu) { - uint32_t irq_iter, config_iter; - uint64_t us; + u32 irq_iter, config_iter; + u64 us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; shared_data->nr_iter = 0; @@ -112,10 +112,10 @@ static void guest_test_oneshot_timer(uint32_t cpu) } } -static void guest_test_emulate_timer(uint32_t cpu) +static void guest_test_emulate_timer(u32 cpu) { - uint32_t config_iter; - uint64_t xcnt_diff_us, us; + u32 config_iter; + u64 xcnt_diff_us, us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; local_irq_disable(); @@ -136,9 +136,9 @@ static void guest_test_emulate_timer(uint32_t cpu) local_irq_enable(); } -static void guest_time_count_test(uint32_t cpu) +static void guest_time_count_test(u32 cpu) { - uint32_t config_iter; + u32 config_iter; unsigned long start, end, prev, us; /* Assuming that test case starts to run in 1 second */ @@ -165,7 +165,7 @@ static void guest_time_count_test(uint32_t cpu) static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); /* must run at first */ guest_time_count_test(cpu); diff --git a/tools/testing/selftests/kvm/loongarch/pmu_test.c b/tools/testing/selftests/kvm/loongarch/pmu_test.c index 88bb530e336e19..ec3fefb9ea9708 100644 --- a/tools/testing/selftests/kvm/loongarch/pmu_test.c +++ b/tools/testing/selftests/kvm/loongarch/pmu_test.c @@ -15,7 +15,7 @@ static int pmu_irq_count; /* Check PMU support */ static bool has_pmu_support(void) { - uint32_t cfg6; + u32 cfg6; /* Read CPUCFG6 to check PMU */ cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); @@ -34,7 +34,7 @@ static bool has_pmu_support(void) /* Dump PMU capabilities */ static void dump_pmu_caps(void) { - uint32_t cfg6; + u32 cfg6; int nr_counters, counter_bits; cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); @@ -51,8 +51,8 @@ static void dump_pmu_caps(void) static void guest_pmu_base_test(void) { int i; - uint32_t cfg6, pmnum; - uint64_t cnt[4]; + u32 cfg6, pmnum; + u64 cnt[4]; cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); pmnum = (cfg6 >> 4) & 0xf; @@ -114,7 +114,7 @@ static void guest_irq_handler(struct ex_regs *regs) static void guest_pmu_interrupt_test(void) { - uint64_t cnt; + u64 cnt; csr_write(PMU_OVERFLOW - 1, LOONGARCH_CSR_PERFCNTR0); csr_write(PMU_ENVENT_ENABLED | CSR_PERFCTRL_PMIE | LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0); diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 3cdfa3b19b85fc..9c7578a098c3e1 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -30,7 +30,7 @@ static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) { @@ -55,10 +55,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) } static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, - uint64_t nr_modifications) + u64 nr_modifications) { - uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; - uint64_t gpa; + u64 pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; + gpa_t gpa; int i; /* @@ -78,7 +78,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, struct test_params { useconds_t delay; - uint64_t nr_iterations; + u64 nr_iterations; bool partition_vcpu_memory_access; bool disable_slot_zap_quirk; }; diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 5087d082c4b00b..3d02db3714229f 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -85,17 +85,17 @@ struct vm_data { struct kvm_vm *vm; struct kvm_vcpu *vcpu; pthread_t vcpu_thread; - uint32_t nslots; - uint64_t npages; - uint64_t pages_per_slot; + u32 nslots; + u64 npages; + u64 pages_per_slot; void **hva_slots; bool mmio_ok; - uint64_t mmio_gpa_min; - uint64_t mmio_gpa_max; + u64 mmio_gpa_min; + u64 mmio_gpa_max; }; struct sync_area { - uint32_t guest_page_size; + u32 guest_page_size; atomic_bool start_flag; atomic_bool exit_flag; atomic_bool sync_flag; @@ -186,12 +186,12 @@ static void wait_for_vcpu(void) "sem_timedwait() failed: %d", errno); } -static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) +static void *vm_gpa2hva(struct vm_data *data, gpa_t gpa, u64 *rempages) { - uint64_t gpage, pgoffs; - uint32_t slot, slotoffs; + gpa_t gpage, pgoffs; + u32 slot, slotoffs; void *base; - uint32_t guest_page_size = data->vm->page_size; + u32 guest_page_size = data->vm->page_size; TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, @@ -200,11 +200,11 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) gpage = gpa / guest_page_size; pgoffs = gpa % guest_page_size; - slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); + slot = min(gpage / data->pages_per_slot, (u64)data->nslots - 1); slotoffs = gpage - (slot * data->pages_per_slot); if (rempages) { - uint64_t slotpages; + u64 slotpages; if (slot == data->nslots - 1) slotpages = data->npages - slot * data->pages_per_slot; @@ -217,12 +217,12 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) } base = data->hva_slots[slot]; - return (uint8_t *)base + slotoffs * guest_page_size + pgoffs; + return (u8 *)base + slotoffs * guest_page_size + pgoffs; } -static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) +static u64 vm_slot2gpa(struct vm_data *data, u32 slot) { - uint32_t guest_page_size = data->vm->page_size; + u32 guest_page_size = data->vm->page_size; TEST_ASSERT(slot < data->nslots, "Too high slot number"); @@ -243,8 +243,8 @@ static struct vm_data *alloc_vm(void) return data; } -static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, - uint64_t pages_per_slot, uint64_t rempages) +static bool check_slot_pages(u32 host_page_size, u32 guest_page_size, + u64 pages_per_slot, u64 rempages) { if (!pages_per_slot) return false; @@ -259,11 +259,11 @@ static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, } -static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) +static u64 get_max_slots(struct vm_data *data, u32 host_page_size) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t mempages, pages_per_slot, rempages; - uint64_t slots; + u32 guest_page_size = data->vm->page_size; + u64 mempages, pages_per_slot, rempages; + u64 slots; mempages = data->npages; slots = data->nslots; @@ -281,13 +281,13 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) return 0; } -static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, - void *guest_code, uint64_t mem_size, +static bool prepare_vm(struct vm_data *data, int nslots, u64 *maxslots, + void *guest_code, u64 mem_size, struct timespec *slot_runtime) { - uint64_t mempages, rempages; - uint64_t guest_addr; - uint32_t slot, host_page_size, guest_page_size; + u64 mempages, rempages; + u64 guest_addr; + u32 slot, host_page_size, guest_page_size; struct timespec tstart; struct sync_area *sync; @@ -317,7 +317,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, clock_gettime(CLOCK_MONOTONIC, &tstart); for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { - uint64_t npages; + u64 npages; npages = data->pages_per_slot; if (slot == data->nslots) @@ -331,8 +331,8 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, *slot_runtime = timespec_elapsed(tstart); for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { - uint64_t npages; - uint64_t gpa; + u64 npages; + gpa_t gpa; npages = data->pages_per_slot; if (slot == data->nslots) @@ -448,7 +448,7 @@ static bool guest_perform_sync(void) static void guest_code_test_memslot_move(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); GUEST_SYNC(0); @@ -460,7 +460,7 @@ static void guest_code_test_memslot_move(void) for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; /* * No host sync here since the MMIO exits are so expensive @@ -477,7 +477,7 @@ static void guest_code_test_memslot_move(void) static void guest_code_test_memslot_map(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); GUEST_SYNC(0); @@ -489,7 +489,7 @@ static void guest_code_test_memslot_map(void) for (ptr = MEM_TEST_GPA; ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; @@ -497,7 +497,7 @@ static void guest_code_test_memslot_map(void) for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_2; + *(u64 *)ptr = MEM_TEST_VAL_2; if (!guest_perform_sync()) break; @@ -526,13 +526,13 @@ static void guest_code_test_memslot_unmap(void) * * Just access a single page to be on the safe side. */ - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; ptr += MEM_TEST_UNMAP_SIZE / 2; - *(uint64_t *)ptr = MEM_TEST_VAL_2; + *(u64 *)ptr = MEM_TEST_VAL_2; if (!guest_perform_sync()) break; @@ -544,7 +544,7 @@ static void guest_code_test_memslot_unmap(void) static void guest_code_test_memslot_rw(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); GUEST_SYNC(0); @@ -555,17 +555,17 @@ static void guest_code_test_memslot_rw(void) for (ptr = MEM_TEST_GPA; ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; for (ptr = MEM_TEST_GPA + page_size / 2; ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) { - uint64_t val = *(uint64_t *)ptr; + u64 val = *(u64 *)ptr; GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2); - *(uint64_t *)ptr = 0; + *(u64 *)ptr = 0; } if (!guest_perform_sync()) @@ -577,10 +577,10 @@ static void guest_code_test_memslot_rw(void) static bool test_memslot_move_prepare(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots, bool isactive) + u64 *maxslots, bool isactive) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t movesrcgpa, movetestgpa; + u32 guest_page_size = data->vm->page_size; + u64 movesrcgpa, movetestgpa; #ifdef __x86_64__ if (disable_slot_zap_quirk) @@ -590,7 +590,7 @@ static bool test_memslot_move_prepare(struct vm_data *data, movesrcgpa = vm_slot2gpa(data, data->nslots - 1); if (isactive) { - uint64_t lastpages; + u64 lastpages; vm_gpa2hva(data, movesrcgpa, &lastpages); if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) { @@ -613,21 +613,21 @@ static bool test_memslot_move_prepare(struct vm_data *data, static bool test_memslot_move_prepare_active(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots) + u64 *maxslots) { return test_memslot_move_prepare(data, sync, maxslots, true); } static bool test_memslot_move_prepare_inactive(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots) + u64 *maxslots) { return test_memslot_move_prepare(data, sync, maxslots, false); } static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) { - uint64_t movesrcgpa; + u64 movesrcgpa; movesrcgpa = vm_slot2gpa(data, data->nslots - 1); vm_mem_region_move(data->vm, data->nslots - 1 + 1, @@ -636,13 +636,13 @@ static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) } static void test_memslot_do_unmap(struct vm_data *data, - uint64_t offsp, uint64_t count) + u64 offsp, u64 count) { - uint64_t gpa, ctr; - uint32_t guest_page_size = data->vm->page_size; + gpa_t gpa, ctr; + u32 guest_page_size = data->vm->page_size; for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) { - uint64_t npages; + u64 npages; void *hva; int ret; @@ -661,11 +661,11 @@ static void test_memslot_do_unmap(struct vm_data *data, } static void test_memslot_map_unmap_check(struct vm_data *data, - uint64_t offsp, uint64_t valexp) + u64 offsp, u64 valexp) { - uint64_t gpa; - uint64_t *val; - uint32_t guest_page_size = data->vm->page_size; + gpa_t gpa; + u64 *val; + u32 guest_page_size = data->vm->page_size; if (!map_unmap_verify) return; @@ -680,8 +680,8 @@ static void test_memslot_map_unmap_check(struct vm_data *data, static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; + u32 guest_page_size = data->vm->page_size; + u64 guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; /* * Unmap the second half of the test area while guest writes to (maps) @@ -718,11 +718,11 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) static void test_memslot_unmap_loop_common(struct vm_data *data, struct sync_area *sync, - uint64_t chunk) + u64 chunk) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; - uint64_t ctr; + u32 guest_page_size = data->vm->page_size; + u64 guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; + u64 ctr; /* * Wait for the guest to finish mapping page(s) in the first half @@ -746,9 +746,9 @@ static void test_memslot_unmap_loop_common(struct vm_data *data, static void test_memslot_unmap_loop(struct vm_data *data, struct sync_area *sync) { - uint32_t host_page_size = getpagesize(); - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_chunk_pages = guest_page_size >= host_page_size ? + u32 host_page_size = getpagesize(); + u32 guest_page_size = data->vm->page_size; + u64 guest_chunk_pages = guest_page_size >= host_page_size ? 1 : host_page_size / guest_page_size; test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); @@ -757,27 +757,27 @@ static void test_memslot_unmap_loop(struct vm_data *data, static void test_memslot_unmap_loop_chunked(struct vm_data *data, struct sync_area *sync) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; + u32 guest_page_size = data->vm->page_size; + u64 guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); } static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) { - uint64_t gptr; - uint32_t guest_page_size = data->vm->page_size; + u64 gptr; + u32 guest_page_size = data->vm->page_size; for (gptr = MEM_TEST_GPA + guest_page_size / 2; gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) - *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; + *(u64 *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; host_perform_sync(sync); for (gptr = MEM_TEST_GPA; gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) { - uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); - uint64_t val = *vptr; + u64 *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); + u64 val = *vptr; TEST_ASSERT(val == MEM_TEST_VAL_1, "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")", @@ -790,21 +790,21 @@ static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) struct test_data { const char *name; - uint64_t mem_size; + u64 mem_size; void (*guest_code)(void); bool (*prepare)(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots); + u64 *maxslots); void (*loop)(struct vm_data *data, struct sync_area *sync); }; -static bool test_execute(int nslots, uint64_t *maxslots, +static bool test_execute(int nslots, u64 *maxslots, unsigned int maxtime, const struct test_data *tdata, - uint64_t *nloops, + u64 *nloops, struct timespec *slot_runtime, struct timespec *guest_runtime) { - uint64_t mem_size = tdata->mem_size ? : MEM_SIZE; + u64 mem_size = tdata->mem_size ? : MEM_SIZE; struct vm_data *data; struct sync_area *sync; struct timespec tstart; @@ -924,8 +924,8 @@ static void help(char *name, struct test_args *targs) static bool check_memory_sizes(void) { - uint32_t host_page_size = getpagesize(); - uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; + u32 host_page_size = getpagesize(); + u32 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; if (host_page_size > SZ_64K || guest_page_size > SZ_64K) { pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n", @@ -961,7 +961,7 @@ static bool check_memory_sizes(void) static bool parse_args(int argc, char *argv[], struct test_args *targs) { - uint32_t max_mem_slots; + u32 max_mem_slots; int opt; while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) { @@ -1040,8 +1040,8 @@ static bool parse_args(int argc, char *argv[], struct test_result { struct timespec slot_runtime, guest_runtime, iter_runtime; - int64_t slottimens, runtimens; - uint64_t nloops; + s64 slottimens, runtimens; + u64 nloops; }; static bool test_loop(const struct test_data *data, @@ -1049,7 +1049,7 @@ static bool test_loop(const struct test_data *data, struct test_result *rbestslottime, struct test_result *rbestruntime) { - uint64_t maxslots; + u64 maxslots; struct test_result result = {}; if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index 51c070556f3efe..54d281419d3104 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -20,19 +20,19 @@ static bool mprotect_ro_done; static bool all_vcpus_hit_ro_fault; -static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) +static void guest_code(u64 start_gpa, u64 end_gpa, u64 stride) { - uint64_t gpa; + gpa_t gpa; int i; for (i = 0; i < 2; i++) { for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); GUEST_SYNC(i); } for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - *((volatile uint64_t *)gpa); + *((volatile u64 *)gpa); GUEST_SYNC(2); /* @@ -55,7 +55,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) #elif defined(__aarch64__) asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); #else - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); #endif } while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault)); @@ -68,7 +68,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) #endif for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); GUEST_SYNC(4); GUEST_ASSERT(0); @@ -76,8 +76,8 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) struct vcpu_info { struct kvm_vcpu *vcpu; - uint64_t start_gpa; - uint64_t end_gpa; + u64 start_gpa; + u64 end_gpa; }; static int nr_vcpus; @@ -203,10 +203,10 @@ static void *vcpu_worker(void *data) } static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, - uint64_t start_gpa, uint64_t end_gpa) + u64 start_gpa, u64 end_gpa) { struct vcpu_info *info; - uint64_t gpa, nr_bytes; + gpa_t gpa, nr_bytes; pthread_t *threads; int i; @@ -217,7 +217,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & - ~((uint64_t)vm->page_size - 1); + ~((u64)vm->page_size - 1); TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { @@ -278,11 +278,11 @@ int main(int argc, char *argv[]) * just below the 4gb boundary. This test could create memory at * 1gb-3gb,but it's simpler to skip straight to 4gb. */ - const uint64_t start_gpa = SZ_4G; + const u64 start_gpa = SZ_4G; const int first_slot = 1; struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw; - uint64_t max_gpa, gpa, slot_size, max_mem, i; + u64 max_gpa, gpa, slot_size, max_mem, i; int max_slots, slot, opt, fd; bool hugepages = false; struct kvm_vcpu **vcpus; @@ -347,7 +347,7 @@ int main(int argc, char *argv[]) /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ for (i = 0; i < slot_size; i += vm->page_size) - ((uint8_t *)mem)[i] = 0xaa; + ((u8 *)mem)[i] = 0xaa; gpa = 0; for (slot = first_slot; slot < max_slots; slot++) { diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c index f3de0386ba7b08..fcb57fd034e672 100644 --- a/tools/testing/selftests/kvm/pre_fault_memory_test.c +++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c @@ -17,13 +17,13 @@ #define TEST_NPAGES (TEST_SIZE / PAGE_SIZE) #define TEST_SLOT 10 -static void guest_code(uint64_t base_gva) +static void guest_code(u64 base_gva) { - volatile uint64_t val __used; + volatile u64 val __used; int i; for (i = 0; i < TEST_NPAGES; i++) { - uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE); + u64 *src = (u64 *)(base_gva + i * PAGE_SIZE); val = *src; } @@ -33,8 +33,8 @@ static void guest_code(uint64_t base_gva) struct slot_worker_data { struct kvm_vm *vm; - u64 gpa; - uint32_t flags; + gpa_t gpa; + u32 flags; bool worker_ready; bool prefault_ready; bool recreate_slot; @@ -161,7 +161,7 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset, static void __test_pre_fault_memory(unsigned long vm_type, bool private) { - uint64_t gpa, gva, alignment, guest_page_size; + gpa_t gpa, gva, alignment, guest_page_size; const struct vm_shape shape = { .mode = VM_MODE_DEFAULT, .type = vm_type, diff --git a/tools/testing/selftests/kvm/riscv/arch_timer.c b/tools/testing/selftests/kvm/riscv/arch_timer.c index f962fefc48fadf..d67c918ee3102f 100644 --- a/tools/testing/selftests/kvm/riscv/arch_timer.c +++ b/tools/testing/selftests/kvm/riscv/arch_timer.c @@ -17,9 +17,9 @@ static int timer_irq = IRQ_S_TIMER; static void guest_irq_handler(struct pt_regs *regs) { - uint64_t xcnt, xcnt_diff_us, cmp; + u64 xcnt, xcnt_diff_us, cmp; unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; timer_irq_disable(); @@ -40,7 +40,7 @@ static void guest_irq_handler(struct pt_regs *regs) static void guest_run(struct test_vcpu_shared_data *shared_data) { - uint32_t irq_iter, config_iter; + u32 irq_iter, config_iter; shared_data->nr_iter = 0; shared_data->guest_stage = 0; @@ -66,7 +66,7 @@ static void guest_run(struct test_vcpu_shared_data *shared_data) static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; timer_irq_disable(); diff --git a/tools/testing/selftests/kvm/riscv/ebreak_test.c b/tools/testing/selftests/kvm/riscv/ebreak_test.c index 739d17befb5ac8..3f44b045a22e36 100644 --- a/tools/testing/selftests/kvm/riscv/ebreak_test.c +++ b/tools/testing/selftests/kvm/riscv/ebreak_test.c @@ -8,10 +8,10 @@ #include "kvm_util.h" #include "ucall_common.h" -#define LABEL_ADDRESS(v) ((uint64_t)&(v)) +#define LABEL_ADDRESS(v) ((u64)&(v)) extern unsigned char sw_bp_1, sw_bp_2; -static uint64_t sw_bp_addr; +static u64 sw_bp_addr; static void guest_code(void) { @@ -37,7 +37,7 @@ int main(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t pc; + u64 pc; struct kvm_guest_debug debug = { .control = KVM_GUESTDBG_ENABLE, }; diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 8d6b951434eb02..8d6fdb5d38b89e 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -162,7 +162,7 @@ bool check_reject_set(int err) } static int override_vector_reg_size(struct kvm_vcpu *vcpu, struct vcpu_reg_sublist *s, - uint64_t feature) + u64 feature) { unsigned long vlenb_reg = 0; int rc; @@ -197,7 +197,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; struct vcpu_reg_sublist *s; - uint64_t feature; + u64 feature; int rc; for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c index cec1621ace232c..e56a3dd6a51e53 100644 --- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c +++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c @@ -24,7 +24,7 @@ union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS]; /* Snapshot shared memory data */ #define PMU_SNAPSHOT_GPA_BASE BIT(30) static void *snapshot_gva; -static vm_paddr_t snapshot_gpa; +static gpa_t snapshot_gpa; static int vcpu_shared_irq_count; static int counter_in_use; @@ -86,7 +86,7 @@ unsigned long pmu_csr_read_num(int csr_num) #undef switchcase_csr_read } -static inline void dummy_func_loop(uint64_t iter) +static inline void dummy_func_loop(u64 iter) { int i = 0; @@ -259,7 +259,7 @@ static inline void verify_sbi_requirement_assert(void) __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot"); } -static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags) +static void snapshot_set_shmem(gpa_t gpa, unsigned long flags) { unsigned long lo = (unsigned long)gpa; #if __riscv_xlen == 32 @@ -610,7 +610,7 @@ static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu) virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1); snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE); - snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva); + snapshot_gpa = addr_gva2gpa(vcpu->vm, (gva_t)snapshot_gva); sync_global_to_guest(vcpu->vm, snapshot_gva); sync_global_to_guest(vcpu->vm, snapshot_gpa); } diff --git a/tools/testing/selftests/kvm/s390/debug_test.c b/tools/testing/selftests/kvm/s390/debug_test.c index ad80959686014b..751c61c0f05611 100644 --- a/tools/testing/selftests/kvm/s390/debug_test.c +++ b/tools/testing/selftests/kvm/s390/debug_test.c @@ -17,7 +17,7 @@ asm("int_handler:\n" "j .\n"); static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, - size_t new_psw_off, uint64_t *new_psw) + size_t new_psw_off, u64 *new_psw) { struct kvm_guest_debug debug = {}; struct kvm_regs regs; @@ -27,7 +27,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, vm = vm_create_with_one_vcpu(vcpu, guest_code); lowcore = addr_gpa2hva(vm, 0); new_psw[0] = (*vcpu)->run->psw_mask; - new_psw[1] = (uint64_t)int_handler; + new_psw[1] = (u64)int_handler; memcpy(lowcore + new_psw_off, new_psw, 16); vcpu_regs_get(*vcpu, ®s); regs.gprs[2] = -1; @@ -42,7 +42,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, static void test_step_int(void *guest_code, size_t new_psw_off) { struct kvm_vcpu *vcpu; - uint64_t new_psw[2]; + u64 new_psw[2]; struct kvm_vm *vm; vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw); @@ -79,7 +79,7 @@ static void test_step_pgm_diag(void) .u.pgm.code = PGM_SPECIFICATION, }; struct kvm_vcpu *vcpu; - uint64_t new_psw[2]; + u64 new_psw[2]; struct kvm_vm *vm; vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code, diff --git a/tools/testing/selftests/kvm/s390/irq_routing.c b/tools/testing/selftests/kvm/s390/irq_routing.c index 7819a0af19a8eb..f3839284ac0818 100644 --- a/tools/testing/selftests/kvm/s390/irq_routing.c +++ b/tools/testing/selftests/kvm/s390/irq_routing.c @@ -27,7 +27,7 @@ static void test(void) struct kvm_irq_routing *routing; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_paddr_t mem; + gpa_t mem; int ret; struct kvm_irq_routing_entry ue = { diff --git a/tools/testing/selftests/kvm/s390/memop.c b/tools/testing/selftests/kvm/s390/memop.c index 4374b4cd2a8071..0244848621b349 100644 --- a/tools/testing/selftests/kvm/s390/memop.c +++ b/tools/testing/selftests/kvm/s390/memop.c @@ -34,7 +34,7 @@ enum mop_access_mode { struct mop_desc { uintptr_t gaddr; uintptr_t gaddr_v; - uint64_t set_flags; + u64 set_flags; unsigned int f_check : 1; unsigned int f_inject : 1; unsigned int f_key : 1; @@ -42,19 +42,19 @@ struct mop_desc { unsigned int _set_flags : 1; unsigned int _sida_offset : 1; unsigned int _ar : 1; - uint32_t size; + u32 size; enum mop_target target; enum mop_access_mode mode; void *buf; - uint32_t sida_offset; + u32 sida_offset; void *old; - uint8_t old_value[16]; + u8 old_value[16]; bool *cmpxchg_success; - uint8_t ar; - uint8_t key; + u8 ar; + u8 key; }; -const uint8_t NO_KEY = 0xff; +const u8 NO_KEY = 0xff; static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) { @@ -85,7 +85,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; if (desc->mode == CMPXCHG) { ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG; - ksmo.old_addr = (uint64_t)desc->old; + ksmo.old_addr = (u64)desc->old; memcpy(desc->old_value, desc->old, desc->size); } break; @@ -230,8 +230,8 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo, #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) -static uint8_t __aligned(PAGE_SIZE) mem1[65536]; -static uint8_t __aligned(PAGE_SIZE) mem2[65536]; +static u8 __aligned(PAGE_SIZE) mem1[65536]; +static u8 __aligned(PAGE_SIZE) mem2[65536]; struct test_default { struct kvm_vm *kvm_vm; @@ -296,7 +296,7 @@ static void prepare_mem12(void) TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu, - enum mop_target mop_target, uint32_t size, uint8_t key) + enum mop_target mop_target, u32 size, u8 key) { prepare_mem12(); CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, @@ -308,7 +308,7 @@ static void default_write_read(struct test_info copy_cpu, struct test_info mop_c } static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, - enum mop_target mop_target, uint32_t size, uint8_t key) + enum mop_target mop_target, u32 size, u8 key) { prepare_mem12(); CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1)); @@ -318,12 +318,12 @@ static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, ASSERT_MEM_EQ(mem1, mem2, size); } -static void default_cmpxchg(struct test_default *test, uint8_t key) +static void default_cmpxchg(struct test_default *test, u8 key) { for (int size = 1; size <= 16; size *= 2) { for (int offset = 0; offset < 16; offset += size) { - uint8_t __aligned(16) new[16] = {}; - uint8_t __aligned(16) old[16]; + u8 __aligned(16) new[16] = {}; + u8 __aligned(16) old[16]; bool succ; prepare_mem12(); @@ -400,7 +400,7 @@ static void test_copy_access_register(void) kvm_vm_free(t.kvm_vm); } -static void set_storage_key_range(void *addr, size_t len, uint8_t key) +static void set_storage_key_range(void *addr, size_t len, u8 key) { uintptr_t _addr, abs, i; int not_mapped = 0; @@ -483,13 +483,13 @@ static __uint128_t cut_to_size(int size, __uint128_t val) { switch (size) { case 1: - return (uint8_t)val; + return (u8)val; case 2: - return (uint16_t)val; + return (u16)val; case 4: - return (uint32_t)val; + return (u32)val; case 8: - return (uint64_t)val; + return (u64)val; case 16: return val; } @@ -501,10 +501,10 @@ static bool popcount_eq(__uint128_t a, __uint128_t b) { unsigned int count_a, count_b; - count_a = __builtin_popcountl((uint64_t)(a >> 64)) + - __builtin_popcountl((uint64_t)a); - count_b = __builtin_popcountl((uint64_t)(b >> 64)) + - __builtin_popcountl((uint64_t)b); + count_a = __builtin_popcountl((u64)(a >> 64)) + + __builtin_popcountl((u64)a); + count_b = __builtin_popcountl((u64)(b >> 64)) + + __builtin_popcountl((u64)b); return count_a == count_b; } @@ -553,7 +553,7 @@ static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old) if (swap) { int i, j; __uint128_t new; - uint8_t byte0, byte1; + u8 byte0, byte1; rand = rand * 3 + 1; i = rand % size; @@ -585,28 +585,28 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t switch (size) { case 4: { - uint32_t old = *old_addr; + u32 old = *old_addr; asm volatile ("cs %[old],%[new],%[address]" : [old] "+d" (old), - [address] "+Q" (*(uint32_t *)(target)) - : [new] "d" ((uint32_t)new) + [address] "+Q" (*(u32 *)(target)) + : [new] "d" ((u32)new) : "cc" ); - ret = old == (uint32_t)*old_addr; + ret = old == (u32)*old_addr; *old_addr = old; return ret; } case 8: { - uint64_t old = *old_addr; + u64 old = *old_addr; asm volatile ("csg %[old],%[new],%[address]" : [old] "+d" (old), - [address] "+Q" (*(uint64_t *)(target)) - : [new] "d" ((uint64_t)new) + [address] "+Q" (*(u64 *)(target)) + : [new] "d" ((u64)new) : "cc" ); - ret = old == (uint64_t)*old_addr; + ret = old == (u64)*old_addr; *old_addr = old; return ret; } @@ -811,10 +811,10 @@ static void test_errors_cmpxchg_key(void) static void test_termination(void) { struct test_default t = test_default_init(guest_error_key); - uint64_t prefix; - uint64_t teid; - uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); - uint64_t psw[2]; + u64 prefix; + u64 teid; + u64 teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); + u64 psw[2]; HOST_SYNC(t.vcpu, STAGE_INITED); HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); @@ -855,7 +855,7 @@ static void test_errors_key_storage_prot_override(void) kvm_vm_free(t.kvm_vm); } -const uint64_t last_page_addr = -PAGE_SIZE; +const u64 last_page_addr = -PAGE_SIZE; static void guest_copy_key_fetch_prot_override(void) { @@ -878,10 +878,10 @@ static void guest_copy_key_fetch_prot_override(void) static void test_copy_key_fetch_prot_override(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; @@ -917,10 +917,10 @@ static void test_copy_key_fetch_prot_override(void) static void test_errors_key_fetch_prot_override_not_enabled(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; @@ -938,10 +938,10 @@ static void test_errors_key_fetch_prot_override_not_enabled(void) static void test_errors_key_fetch_prot_override_enabled(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; diff --git a/tools/testing/selftests/kvm/s390/resets.c b/tools/testing/selftests/kvm/s390/resets.c index b58f75b381e5a7..e3c7a2f148f9ad 100644 --- a/tools/testing/selftests/kvm/s390/resets.c +++ b/tools/testing/selftests/kvm/s390/resets.c @@ -20,7 +20,7 @@ struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS]; -static uint8_t regs_null[512]; +static u8 regs_null[512]; static void guest_code_initial(void) { @@ -57,9 +57,9 @@ static void guest_code_initial(void) ); } -static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value) +static void test_one_reg(struct kvm_vcpu *vcpu, u64 id, u64 value) { - uint64_t eval_reg; + u64 eval_reg; eval_reg = vcpu_get_reg(vcpu, id); TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); diff --git a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c index bba0d9a6dcc874..a9e5a01200b8a2 100644 --- a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c +++ b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c @@ -13,7 +13,7 @@ #include "kselftest.h" #include "ucall_common.h" -static void set_storage_key(void *addr, uint8_t skey) +static void set_storage_key(void *addr, u8 skey) { asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); } diff --git a/tools/testing/selftests/kvm/s390/tprot.c b/tools/testing/selftests/kvm/s390/tprot.c index 12d5e1cb62e349..8054d2b178f050 100644 --- a/tools/testing/selftests/kvm/s390/tprot.c +++ b/tools/testing/selftests/kvm/s390/tprot.c @@ -14,12 +14,12 @@ #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) -static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE]; -static uint8_t *const page_store_prot = pages[0]; -static uint8_t *const page_fetch_prot = pages[1]; +static __aligned(PAGE_SIZE) u8 pages[2][PAGE_SIZE]; +static u8 *const page_store_prot = pages[0]; +static u8 *const page_fetch_prot = pages[1]; /* Nonzero return value indicates that address not mapped */ -static int set_storage_key(void *addr, uint8_t key) +static int set_storage_key(void *addr, u8 key) { int not_mapped = 0; @@ -44,9 +44,9 @@ enum permission { TRANSL_UNAVAIL = 3, }; -static enum permission test_protection(void *addr, uint8_t key) +static enum permission test_protection(void *addr, u8 key) { - uint64_t mask; + u64 mask; asm volatile ( "tprot %[addr], 0(%[key])\n" @@ -72,7 +72,7 @@ enum stage { struct test { enum stage stage; void *addr; - uint8_t key; + u8 key; enum permission expected; } tests[] = { /* @@ -146,7 +146,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0) /* * Some fetch protection override tests require that page 0 * be mapped, however, when the hosts tries to map that page via - * vm_vaddr_alloc, it may happen that some other page gets mapped + * vm_alloc, it may happen that some other page gets mapped * instead. * In order to skip these tests we detect this inside the guest */ @@ -207,7 +207,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_run *run; - vm_vaddr_t guest_0_page; + gva_t guest_0_page; ksft_print_header(); ksft_set_plan(STAGE_END); @@ -216,10 +216,10 @@ int main(int argc, char *argv[]) run = vcpu->run; HOST_SYNC(vcpu, STAGE_INIT_SIMPLE); - mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); + mprotect(addr_gva2hva(vm, (gva_t)pages), PAGE_SIZE * 2, PROT_READ); HOST_SYNC(vcpu, TEST_SIMPLE); - guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0); + guest_0_page = vm_alloc(vm, PAGE_SIZE, 0); if (guest_0_page != 0) { /* Use NO_TAP so we don't get a PASS print */ HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); @@ -229,7 +229,7 @@ int main(int argc, char *argv[]) HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); } if (guest_0_page == 0) - mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ); + mprotect(addr_gva2hva(vm, (gva_t)0), PAGE_SIZE, PROT_READ); run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; run->kvm_dirty_regs = KVM_SYNC_CRS; HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE); diff --git a/tools/testing/selftests/kvm/s390/ucontrol_test.c b/tools/testing/selftests/kvm/s390/ucontrol_test.c index 50bc1c38225a63..b8c6f37b53e0dc 100644 --- a/tools/testing/selftests/kvm/s390/ucontrol_test.c +++ b/tools/testing/selftests/kvm/s390/ucontrol_test.c @@ -111,7 +111,7 @@ FIXTURE(uc_kvm) uintptr_t base_hva; uintptr_t code_hva; int kvm_run_size; - vm_paddr_t pgd; + gpa_t pgd; void *vm_mem; int vcpu_fd; int kvm_fd; @@ -269,7 +269,7 @@ TEST(uc_cap_hpage) } /* calculate host virtual addr from guest physical addr */ -static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa) +static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, gpa_t gpa) { return (void *)(self->base_hva - self->base_gpa + gpa); } @@ -571,7 +571,7 @@ TEST_F(uc_kvm, uc_skey) { struct kvm_s390_sie_block *sie_block = self->sie_block; struct kvm_sync_regs *sync_regs = &self->run->s.regs; - u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); + u64 test_gva = VM_MEM_SIZE - (SZ_1M / 2); struct kvm_run *run = self->run; const u8 skeyvalue = 0x34; @@ -583,7 +583,7 @@ TEST_F(uc_kvm, uc_skey) /* set register content for test_skey_asm to access not mapped memory */ sync_regs->gprs[1] = skeyvalue; sync_regs->gprs[5] = self->base_gpa; - sync_regs->gprs[6] = test_vaddr; + sync_regs->gprs[6] = test_gva; run->kvm_dirty_regs |= KVM_SYNC_GPRS; /* DAT disabled + 64 bit mode */ diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index a398dc3a8c4be1..9b919a231c9370 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -30,19 +30,19 @@ #define MEM_REGION_GPA 0xc0000000 #define MEM_REGION_SLOT 10 -static const uint64_t MMIO_VAL = 0xbeefull; +static const u64 MMIO_VAL = 0xbeefull; -extern const uint64_t final_rip_start; -extern const uint64_t final_rip_end; +extern const u64 final_rip_start; +extern const u64 final_rip_end; static sem_t vcpu_ready; -static inline uint64_t guest_spin_on_val(uint64_t spin_val) +static inline u64 guest_spin_on_val(u64 spin_val) { - uint64_t val; + u64 val; do { - val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); + val = READ_ONCE(*((u64 *)MEM_REGION_GPA)); } while (val == spin_val); GUEST_SYNC(0); @@ -54,7 +54,7 @@ static void *vcpu_worker(void *data) struct kvm_vcpu *vcpu = data; struct kvm_run *run = vcpu->run; struct ucall uc; - uint64_t cmd; + u64 cmd; /* * Loop until the guest is done. Re-enter the guest on all MMIO exits, @@ -111,8 +111,8 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, void *guest_code) { struct kvm_vm *vm; - uint64_t *hva; - uint64_t gpa; + u64 *hva; + gpa_t gpa; vm = vm_create_with_one_vcpu(vcpu, guest_code); @@ -144,7 +144,7 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, static void guest_code_move_memory_region(void) { - uint64_t val; + u64 val; GUEST_SYNC(0); @@ -180,7 +180,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk) pthread_t vcpu_thread; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t *hva; + u64 *hva; vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); @@ -224,7 +224,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk) static void guest_code_delete_memory_region(void) { struct desc_ptr idt; - uint64_t val; + u64 val; /* * Clobber the IDT so that a #PF due to the memory region being deleted @@ -345,8 +345,8 @@ static void test_zero_memory_regions(void) static void test_invalid_memory_region_flags(void) { - uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; - const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; + u32 supported_flags = KVM_MEM_LOG_DIRTY_PAGES; + const u32 v2_only_flags = KVM_MEM_GUEST_MEMFD; struct kvm_vm *vm; int r, i; @@ -410,8 +410,8 @@ static void test_add_max_memory_regions(void) { int ret; struct kvm_vm *vm; - uint32_t max_mem_slots; - uint32_t slot; + u32 max_mem_slots; + u32 slot; void *mem, *mem_aligned, *mem_extra; size_t alignment = 1; @@ -434,16 +434,16 @@ static void test_add_max_memory_regions(void) for (slot = 0; slot < max_mem_slots; slot++) vm_set_user_memory_region(vm, slot, 0, - ((uint64_t)slot * MEM_REGION_SIZE), + ((u64)slot * MEM_REGION_SIZE), MEM_REGION_SIZE, - mem_aligned + (uint64_t)slot * MEM_REGION_SIZE); + mem_aligned + (u64)slot * MEM_REGION_SIZE); /* Check it cannot be added memory slots beyond the limit */ mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1); ret = __vm_set_user_memory_region(vm, max_mem_slots, 0, - (uint64_t)max_mem_slots * MEM_REGION_SIZE, + (u64)max_mem_slots * MEM_REGION_SIZE, MEM_REGION_SIZE, mem_extra); TEST_ASSERT(ret == -1 && errno == EINVAL, "Adding one more memory slot should fail with EINVAL"); @@ -556,7 +556,7 @@ static void guest_code_mmio_during_vectoring(void) set_idt(&idt_desc); /* Generate a #GP by dereferencing a non-canonical address */ - *((uint8_t *)NONCANONICAL) = 0x1; + *((u8 *)NONCANONICAL) = 0x1; GUEST_ASSERT(0); } diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index efe56a10d13e83..7df2bc8eec0221 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -25,7 +25,7 @@ #define ST_GPA_BASE (1 << 30) static void *st_gva[NR_VCPUS]; -static uint64_t guest_stolen_time[NR_VCPUS]; +static u64 guest_stolen_time[NR_VCPUS]; #if defined(__x86_64__) @@ -42,9 +42,9 @@ static void check_status(struct kvm_steal_time *st) static void guest_code(int cpu) { struct kvm_steal_time *st = st_gva[cpu]; - uint32_t version; + u32 version; - GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); + GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED)); memset(st, 0, sizeof(*st)); GUEST_SYNC(0); @@ -67,7 +67,7 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { /* ST_GPA_BASE is identity mapped */ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); @@ -76,7 +76,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -118,12 +118,12 @@ static void check_steal_time_uapi(void) #define PV_TIME_ST 0xc5000021 struct st_time { - uint32_t rev; - uint32_t attr; - uint64_t st_time; + u32 rev; + u32 attr; + u64 st_time; }; -static int64_t smccc(uint32_t func, uint64_t arg) +static s64 smccc(u32 func, u64 arg) { struct arm_smccc_res res; @@ -140,7 +140,7 @@ static void check_status(struct st_time *st) static void guest_code(int cpu) { struct st_time *st; - int64_t status; + s64 status; status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); GUEST_ASSERT_EQ(status, 0); @@ -175,15 +175,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { struct kvm_vm *vm = vcpu->vm; - uint64_t st_ipa; + u64 st_ipa; struct kvm_device_attr dev = { .group = KVM_ARM_VCPU_PVTIME_CTRL, .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&st_ipa, + .addr = (u64)&st_ipa, }; /* ST_GPA_BASE is identity mapped */ @@ -194,7 +194,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -208,7 +208,7 @@ static void check_steal_time_uapi(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t st_ipa; + u64 st_ipa; int ret; vm = vm_create_with_one_vcpu(&vcpu, NULL); @@ -216,7 +216,7 @@ static void check_steal_time_uapi(void) struct kvm_device_attr dev = { .group = KVM_ARM_VCPU_PVTIME_CTRL, .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&st_ipa, + .addr = (u64)&st_ipa, }; vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); @@ -239,17 +239,17 @@ static void check_steal_time_uapi(void) /* SBI STA shmem must have 64-byte alignment */ #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) -static vm_paddr_t st_gpa[NR_VCPUS]; +static gpa_t st_gpa[NR_VCPUS]; struct sta_struct { - uint32_t sequence; - uint32_t flags; - uint64_t steal; - uint8_t preempted; - uint8_t pad[47]; + u32 sequence; + u32 flags; + u64 steal; + u8 preempted; + u8 pad[47]; } __packed; -static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) +static void sta_set_shmem(gpa_t gpa, unsigned long flags) { unsigned long lo = (unsigned long)gpa; #if __riscv_xlen == 32 @@ -272,7 +272,7 @@ static void check_status(struct sta_struct *st) static void guest_code(int cpu) { struct sta_struct *st = st_gva[cpu]; - uint32_t sequence; + u32 sequence; long out_val = 0; bool probe; @@ -297,7 +297,7 @@ static void guest_code(int cpu) static bool is_steal_time_supported(struct kvm_vcpu *vcpu) { - uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); + u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); unsigned long enabled = vcpu_get_reg(vcpu, id); TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); @@ -305,16 +305,16 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return enabled; } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { /* ST_GPA_BASE is identity mapped */ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); - st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); + st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]); sync_global_to_guest(vcpu->vm, st_gva[i]); sync_global_to_guest(vcpu->vm, st_gpa[i]); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); int i; @@ -335,7 +335,7 @@ static void check_steal_time_uapi(void) struct kvm_vm *vm; struct kvm_vcpu *vcpu; struct kvm_one_reg reg; - uint64_t shmem; + u64 shmem; int ret; vm = vm_create_with_one_vcpu(&vcpu, NULL); @@ -345,7 +345,7 @@ static void check_steal_time_uapi(void) KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo); - reg.addr = (uint64_t)&shmem; + reg.addr = (u64)&shmem; shmem = ST_GPA_BASE + 1; ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); @@ -388,7 +388,7 @@ static void check_status(struct kvm_steal_time *st) static void guest_code(int cpu) { - uint32_t version; + u32 version; struct kvm_steal_time *st = st_gva[cpu]; memset(st, 0, sizeof(*st)); @@ -410,11 +410,11 @@ static void guest_code(int cpu) static bool is_steal_time_supported(struct kvm_vcpu *vcpu) { int err; - uint64_t val; + u64 val; struct kvm_device_attr attr = { .group = KVM_LOONGARCH_VCPU_CPUCFG, .attr = CPUCFG_KVM_FEATURE, - .addr = (uint64_t)&val, + .addr = (u64)&val, }; err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); @@ -428,15 +428,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return val & BIT(KVM_FEATURE_STEAL_TIME); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { int err; - uint64_t st_gpa; + u64 st_gpa; struct kvm_vm *vm = vcpu->vm; struct kvm_device_attr attr = { .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, - .addr = (uint64_t)&st_gpa, + .addr = (u64)&st_gpa, }; /* ST_GPA_BASE is identity mapped */ @@ -451,7 +451,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -461,6 +461,11 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) ksft_print_msg(" version: %d\n", st->version); ksft_print_msg(" preempted: %d\n", st->preempted); } + +static void check_steal_time_uapi(void) +{ + +} #endif static void *do_steal_time(void *arg) diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c index 513d421a9bff85..dc5e30b7b77fa3 100644 --- a/tools/testing/selftests/kvm/system_counter_offset_test.c +++ b/tools/testing/selftests/kvm/system_counter_offset_test.c @@ -17,7 +17,7 @@ #ifdef __x86_64__ struct test_case { - uint64_t tsc_offset; + u64 tsc_offset; }; static struct test_case test_cases[] = { @@ -39,12 +39,12 @@ static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test) &test->tsc_offset); } -static uint64_t guest_read_system_counter(struct test_case *test) +static u64 guest_read_system_counter(struct test_case *test) { return rdtsc(); } -static uint64_t host_read_guest_system_counter(struct test_case *test) +static u64 host_read_guest_system_counter(struct test_case *test) { return rdtsc() + test->tsc_offset; } @@ -69,9 +69,9 @@ static void guest_main(void) } } -static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end) +static void handle_sync(struct ucall *uc, u64 start, u64 end) { - uint64_t obs = uc->args[2]; + u64 obs = uc->args[2]; TEST_ASSERT(start <= obs && obs <= end, "unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]", @@ -88,7 +88,7 @@ static void handle_abort(struct ucall *uc) static void enter_guest(struct kvm_vcpu *vcpu) { - uint64_t start, end; + u64 start, end; struct ucall uc; int i; diff --git a/tools/testing/selftests/kvm/x86/amx_test.c b/tools/testing/selftests/kvm/x86/amx_test.c index 37b166260ee3f1..4e63da2b1889cb 100644 --- a/tools/testing/selftests/kvm/x86/amx_test.c +++ b/tools/testing/selftests/kvm/x86/amx_test.c @@ -80,10 +80,10 @@ static inline void __tilerelease(void) asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::); } -static inline void __xsavec(struct xstate *xstate, uint64_t rfbm) +static inline void __xsavec(struct xstate *xstate, u64 rfbm) { - uint32_t rfbm_lo = rfbm; - uint32_t rfbm_hi = rfbm >> 32; + u32 rfbm_lo = rfbm; + u32 rfbm_hi = rfbm >> 32; asm volatile("xsavec (%%rdi)" : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi) @@ -236,7 +236,7 @@ int main(int argc, char *argv[]) struct kvm_x86_state *state; struct kvm_x86_state *tile_state = NULL; int xsave_restore_size; - vm_vaddr_t amx_cfg, tiledata, xstate; + gva_t amx_cfg, tiledata, xstate; struct ucall uc; int ret; @@ -263,15 +263,15 @@ int main(int argc, char *argv[]) vcpu_regs_get(vcpu, ®s1); /* amx cfg for guest_code */ - amx_cfg = vm_vaddr_alloc_page(vm); + amx_cfg = vm_alloc_page(vm); memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize()); /* amx tiledata for guest_code */ - tiledata = vm_vaddr_alloc_pages(vm, 2); + tiledata = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize()); /* XSAVE state for guest_code */ - xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); + xstate = vm_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate); diff --git a/tools/testing/selftests/kvm/x86/aperfmperf_test.c b/tools/testing/selftests/kvm/x86/aperfmperf_test.c index 8b15a13df93949..c91660103137b0 100644 --- a/tools/testing/selftests/kvm/x86/aperfmperf_test.c +++ b/tools/testing/selftests/kvm/x86/aperfmperf_test.c @@ -35,9 +35,9 @@ static int open_dev_msr(int cpu) return open_path_or_exit(path, O_RDONLY); } -static uint64_t read_dev_msr(int msr_fd, uint32_t msr) +static u64 read_dev_msr(int msr_fd, u32 msr) { - uint64_t data; + u64 data; ssize_t rc; rc = pread(msr_fd, &data, sizeof(data), msr); @@ -107,8 +107,8 @@ static void guest_code(void *nested_test_data) static void guest_no_aperfmperf(void) { - uint64_t msr_val; - uint8_t vector; + u64 msr_val; + u8 vector; vector = rdmsr_safe(MSR_IA32_APERF, &msr_val); GUEST_ASSERT(vector == GP_VECTOR); @@ -122,8 +122,8 @@ static void guest_no_aperfmperf(void) int main(int argc, char *argv[]) { const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); - uint64_t host_aperf_before, host_mperf_before; - vm_vaddr_t nested_test_data_gva; + u64 host_aperf_before, host_mperf_before; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; int msr_fd, cpu, i; @@ -166,8 +166,8 @@ int main(int argc, char *argv[]) host_mperf_before = read_dev_msr(msr_fd, MSR_IA32_MPERF); for (i = 0; i <= NUM_ITERATIONS * (1 + has_nested); i++) { - uint64_t host_aperf_after, host_mperf_after; - uint64_t guest_aperf, guest_mperf; + u64 host_aperf_after, host_mperf_after; + u64 guest_aperf, guest_mperf; struct ucall uc; vcpu_run(vcpu); diff --git a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c index f8916bb3440501..404f0028e110ef 100644 --- a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c +++ b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c @@ -19,8 +19,8 @@ * timer frequency. */ static const struct { - const uint32_t tdcr; - const uint32_t divide_count; + const u32 tdcr; + const u32 divide_count; } tdcrs[] = { {0x0, 2}, {0x1, 4}, @@ -42,12 +42,12 @@ static void apic_enable(void) xapic_enable(); } -static uint32_t apic_read_reg(unsigned int reg) +static u32 apic_read_reg(unsigned int reg) { return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg); } -static void apic_write_reg(unsigned int reg, uint32_t val) +static void apic_write_reg(unsigned int reg, u32 val) { if (is_x2apic) x2apic_write_reg(reg, val); @@ -55,12 +55,12 @@ static void apic_write_reg(unsigned int reg, uint32_t val) xapic_write_reg(reg, val); } -static void apic_guest_code(uint64_t apic_hz, uint64_t delay_ms) +static void apic_guest_code(u64 apic_hz, u64 delay_ms) { - uint64_t tsc_hz = guest_tsc_khz * 1000; - const uint32_t tmict = ~0u; - uint64_t tsc0, tsc1, freq; - uint32_t tmcct; + u64 tsc_hz = guest_tsc_khz * 1000; + const u32 tmict = ~0u; + u64 tsc0, tsc1, freq; + u32 tmcct; int i; apic_enable(); @@ -121,7 +121,7 @@ static void test_apic_bus_clock(struct kvm_vcpu *vcpu) } } -static void run_apic_bus_clock_test(uint64_t apic_hz, uint64_t delay_ms, +static void run_apic_bus_clock_test(u64 apic_hz, u64 delay_ms, bool x2apic) { struct kvm_vcpu *vcpu; @@ -168,8 +168,8 @@ int main(int argc, char *argv[]) * Arbitrarilty default to 25MHz for the APIC bus frequency, which is * different enough from the default 1GHz to be interesting. */ - uint64_t apic_hz = 25 * 1000 * 1000; - uint64_t delay_ms = 100; + u64 apic_hz = 25 * 1000 * 1000; + u64 delay_ms = 100; int opt; TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_APIC_BUS_CYCLES_NS)); diff --git a/tools/testing/selftests/kvm/x86/cpuid_test.c b/tools/testing/selftests/kvm/x86/cpuid_test.c index f9ed14996977ab..ef0ddd24088746 100644 --- a/tools/testing/selftests/kvm/x86/cpuid_test.c +++ b/tools/testing/selftests/kvm/x86/cpuid_test.c @@ -140,10 +140,10 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) } } -struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) +struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, gva_t *p_gva, struct kvm_cpuid2 *cpuid) { int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); - vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); + gva_t gva = vm_alloc(vm, size, KVM_UTIL_MIN_VADDR); struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); memcpy(guest_cpuids, cpuid, size); @@ -217,7 +217,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu) int main(void) { struct kvm_vcpu *vcpu; - vm_vaddr_t cpuid_gva; + gva_t cpuid_gva; struct kvm_vm *vm; int stage; diff --git a/tools/testing/selftests/kvm/x86/debug_regs.c b/tools/testing/selftests/kvm/x86/debug_regs.c index 2d814c1d1dc442..0dfaf03cd0a026 100644 --- a/tools/testing/selftests/kvm/x86/debug_regs.c +++ b/tools/testing/selftests/kvm/x86/debug_regs.c @@ -16,7 +16,7 @@ #define IRQ_VECTOR 0xAA /* For testing data access debug BP */ -uint32_t guest_value; +u32 guest_value; extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start; @@ -86,7 +86,7 @@ int main(void) struct kvm_run *run; struct kvm_vm *vm; struct ucall uc; - uint64_t cmd; + u64 cmd; int i; /* Instruction lengths starting at ss_start */ int ss_size[6] = { diff --git a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c index b0d2b04a7ff2be..388ba4101f9736 100644 --- a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c +++ b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c @@ -23,7 +23,7 @@ #define SLOTS 2 #define ITERATIONS 2 -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB; @@ -33,10 +33,10 @@ static int iteration; static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; struct kvm_page_stats { - uint64_t pages_4k; - uint64_t pages_2m; - uint64_t pages_1g; - uint64_t hugepages; + u64 pages_4k; + u64 pages_2m; + u64 pages_1g; + u64 hugepages; }; static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage) @@ -89,9 +89,9 @@ static void run_test(enum vm_guest_mode mode, void *unused) { struct kvm_vm *vm; unsigned long **bitmaps; - uint64_t guest_num_pages; - uint64_t host_num_pages; - uint64_t pages_per_slot; + u64 guest_num_pages; + u64 host_num_pages; + u64 pages_per_slot; int i; struct kvm_page_stats stats_populated; struct kvm_page_stats stats_dirty_logging_enabled; diff --git a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c index af7c901033966a..5b3aef109cfc5c 100644 --- a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c +++ b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c @@ -29,13 +29,13 @@ * SMI handler: runs in real-address mode. * Reports SMRAM_STAGE via port IO, then does RSM. */ -static uint8_t smi_handler[] = { +static u8 smi_handler[] = { 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 0x0f, 0xaa, /* rsm */ }; -static inline void sync_with_host(uint64_t phase) +static inline void sync_with_host(u64 phase) { asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n" : "+a" (phase)); @@ -73,7 +73,7 @@ static void guest_code(struct vmx_pages *vmx_pages, int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; struct hyperv_test_pages *hv; struct hv_enlightened_vmcs *evmcs; struct kvm_vcpu *vcpu; diff --git a/tools/testing/selftests/kvm/x86/fastops_test.c b/tools/testing/selftests/kvm/x86/fastops_test.c index 8926cfe0e20999..c0d30ccd876746 100644 --- a/tools/testing/selftests/kvm/x86/fastops_test.c +++ b/tools/testing/selftests/kvm/x86/fastops_test.c @@ -15,7 +15,7 @@ "pop %[flags]\n\t" #define flags_constraint(flags_val) [flags]"=r"(flags_val) -#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) +#define bt_constraint(__bt_val) [bt_val]"rm"((u32)__bt_val) #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ ({ \ @@ -28,17 +28,17 @@ #define guest_test_fastop_1(insn, type_t, __val) \ ({ \ type_t val = __val, ex_val = __val, input = __val; \ - uint64_t flags, ex_flags; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_1("", insn, ex_val, ex_flags); \ guest_execute_fastop_1(KVM_FEP, insn, val, flags); \ \ __GUEST_ASSERT(val == ex_val, \ "Wanted 0x%lx for '%s 0x%lx', got 0x%lx", \ - (uint64_t)ex_val, insn, (uint64_t)input, (uint64_t)val); \ + (u64)ex_val, insn, (u64)input, (u64)val); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)input, flags); \ + ex_flags, insn, (u64)input, flags); \ }) #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ @@ -52,18 +52,18 @@ #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ ({ \ type_t input = __val1, input2 = __val2, output = __val2, ex_output = __val2; \ - uint64_t flags, ex_flags; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_2("", insn, input, ex_output, ex_flags); \ guest_execute_fastop_2(KVM_FEP, insn, input, output, flags); \ \ __GUEST_ASSERT(output == ex_output, \ "Wanted 0x%lx for '%s 0x%lx 0x%lx', got 0x%lx", \ - (uint64_t)ex_output, insn, (uint64_t)input, \ - (uint64_t)input2, (uint64_t)output); \ + (u64)ex_output, insn, (u64)input, \ + (u64)input2, (u64)output); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%lx, 0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)input, (uint64_t)input2, flags); \ + ex_flags, insn, (u64)input, (u64)input2, flags); \ }) #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ @@ -77,25 +77,25 @@ #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ ({ \ type_t output = __val2, ex_output = __val2, input = __val2; \ - uint8_t shift = __val1; \ - uint64_t flags, ex_flags; \ + u8 shift = __val1; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_cl("", insn, shift, ex_output, ex_flags); \ guest_execute_fastop_cl(KVM_FEP, insn, shift, output, flags); \ \ __GUEST_ASSERT(output == ex_output, \ "Wanted 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ - (uint64_t)ex_output, insn, shift, (uint64_t)input, \ - (uint64_t)output); \ + (u64)ex_output, insn, shift, (u64)input, \ + (u64)output); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ - ex_flags, insn, shift, (uint64_t)input, flags); \ + ex_flags, insn, shift, (u64)input, flags); \ }) #define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \ : "+a"(__a), "+d"(__d), flags_constraint(__flags), \ @@ -109,8 +109,8 @@ ({ \ type_t _a = __val1, _d = __val1, rm = __val2; \ type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \ - uint64_t flags, ex_flags; \ - uint8_t v, ex_v; \ + u64 flags, ex_flags; \ + u8 v, ex_v; \ \ ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \ v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \ @@ -118,17 +118,17 @@ GUEST_ASSERT_EQ(v, ex_v); \ __GUEST_ASSERT(v == ex_v, \ "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \ - ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \ + ex_v, insn, (u64)_a, (u64)_d, (u64)rm, v); \ __GUEST_ASSERT(a == ex_a && d == ex_d, \ "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\ - (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \ - (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \ + (u64)ex_a, (u64)ex_d, insn, (u64)_a, \ + (u64)_d, (u64)rm, (u64)a, (u64)d); \ __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \ "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\ + ex_flags, insn, (u64)_a, (u64)_d, (u64)rm, flags);\ }) -static const uint64_t vals[] = { +static const u64 vals[] = { 0, 1, 2, @@ -185,10 +185,10 @@ if (sizeof(type_t) != 1) { \ static void guest_code(void) { - guest_test_fastops(uint8_t, "b"); - guest_test_fastops(uint16_t, "w"); - guest_test_fastops(uint32_t, "l"); - guest_test_fastops(uint64_t, "q"); + guest_test_fastops(u8, "b"); + guest_test_fastops(u16, "w"); + guest_test_fastops(u32, "l"); + guest_test_fastops(u64, "q"); GUEST_DONE(); } diff --git a/tools/testing/selftests/kvm/x86/feature_msrs_test.c b/tools/testing/selftests/kvm/x86/feature_msrs_test.c index a72f13ae2edbb0..15855070177186 100644 --- a/tools/testing/selftests/kvm/x86/feature_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/feature_msrs_test.c @@ -12,7 +12,7 @@ #include "kvm_util.h" #include "processor.h" -static bool is_kvm_controlled_msr(uint32_t msr) +static bool is_kvm_controlled_msr(u32 msr) { return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1; } @@ -21,7 +21,7 @@ static bool is_kvm_controlled_msr(uint32_t msr) * For VMX MSRs with a "true" variant, KVM requires userspace to set the "true" * MSR, and doesn't allow setting the hidden version. */ -static bool is_hidden_vmx_msr(uint32_t msr) +static bool is_hidden_vmx_msr(u32 msr) { switch (msr) { case MSR_IA32_VMX_PINBASED_CTLS: @@ -34,15 +34,15 @@ static bool is_hidden_vmx_msr(uint32_t msr) } } -static bool is_quirked_msr(uint32_t msr) +static bool is_quirked_msr(u32 msr) { return msr != MSR_AMD64_DE_CFG; } -static void test_feature_msr(uint32_t msr) +static void test_feature_msr(u32 msr) { - const uint64_t supported_mask = kvm_get_feature_msr(msr); - uint64_t reset_value = is_quirked_msr(msr) ? supported_mask : 0; + const u64 supported_mask = kvm_get_feature_msr(msr); + u64 reset_value = is_quirked_msr(msr) ? supported_mask : 0; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c index 00b6e85735dd01..753a0e730ea8d8 100644 --- a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c @@ -26,18 +26,18 @@ static void guest_ud_handler(struct ex_regs *regs) regs->rip += HYPERCALL_INSN_SIZE; } -static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; -static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; +static const u8 vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; +static const u8 svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; -extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE]; -static uint64_t do_sched_yield(uint8_t apic_id) +extern u8 hypercall_insn[HYPERCALL_INSN_SIZE]; +static u64 do_sched_yield(u8 apic_id) { - uint64_t ret; + u64 ret; asm volatile("hypercall_insn:\n\t" ".byte 0xcc,0xcc,0xcc\n\t" : "=a"(ret) - : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) + : "a"((u64)KVM_HC_SCHED_YIELD), "b"((u64)apic_id) : "memory"); return ret; @@ -45,9 +45,9 @@ static uint64_t do_sched_yield(uint8_t apic_id) static void guest_main(void) { - const uint8_t *native_hypercall_insn; - const uint8_t *other_hypercall_insn; - uint64_t ret; + const u8 *native_hypercall_insn; + const u8 *other_hypercall_insn; + u64 ret; if (host_cpu_is_intel) { native_hypercall_insn = vmx_vmcall; @@ -72,7 +72,7 @@ static void guest_main(void) * the "right" hypercall. */ if (quirk_disabled) { - GUEST_ASSERT(ret == (uint64_t)-EFAULT); + GUEST_ASSERT(ret == (u64)-EFAULT); GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); } else { diff --git a/tools/testing/selftests/kvm/x86/flds_emulation.h b/tools/testing/selftests/kvm/x86/flds_emulation.h index 37b1a9f5286447..fd6b6c67199a1d 100644 --- a/tools/testing/selftests/kvm/x86/flds_emulation.h +++ b/tools/testing/selftests/kvm/x86/flds_emulation.h @@ -12,7 +12,7 @@ * KVM to emulate the instruction (e.g. by providing an MMIO address) to * exercise emulation failures. */ -static inline void flds(uint64_t address) +static inline void flds(u64 address) { __asm__ __volatile__(FLDS_MEM_EAX :: "a"(address)); } @@ -21,8 +21,8 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_regs regs; - uint8_t *insn_bytes; - uint64_t flags; + u8 *insn_bytes; + u64 flags; TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); diff --git a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c index 10b1b0ba374e66..8e20a03b332987 100644 --- a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c +++ b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c @@ -10,11 +10,11 @@ void test_hwcr_bit(struct kvm_vcpu *vcpu, unsigned int bit) { - const uint64_t ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); - const uint64_t valid = BIT_ULL(18) | BIT_ULL(24); - const uint64_t legal = ignored | valid; - uint64_t val = BIT_ULL(bit); - uint64_t actual; + const u64 ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); + const u64 valid = BIT_ULL(18) | BIT_ULL(24); + const u64 legal = ignored | valid; + u64 val = BIT_ULL(bit); + u64 actual; int r; r = _vcpu_set_msr(vcpu, MSR_K7_HWCR, val); diff --git a/tools/testing/selftests/kvm/x86/hyperv_clock.c b/tools/testing/selftests/kvm/x86/hyperv_clock.c index e058bc676cd693..c083cea546dcec 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86/hyperv_clock.c @@ -98,7 +98,7 @@ static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page) GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); } -static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa) +static void guest_main(struct ms_hyperv_tsc_page *tsc_page, gpa_t tsc_page_gpa) { u64 tsc_scale, tsc_offset; @@ -208,7 +208,7 @@ int main(void) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; - vm_vaddr_t tsc_page_gva; + gva_t tsc_page_gva; int stage; TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME)); @@ -218,7 +218,7 @@ int main(void) vcpu_set_hv_cpuid(vcpu); - tsc_page_gva = vm_vaddr_alloc_page(vm); + tsc_page_gva = vm_alloc_page(vm); memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, "TSC page has to be page aligned"); diff --git a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c index 74cf1966130908..c7fa114aee20fc 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c +++ b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c @@ -30,7 +30,7 @@ static void guest_nmi_handler(struct ex_regs *regs) { } -static inline void rdmsr_from_l2(uint32_t msr) +static inline void rdmsr_from_l2(u32 msr) { /* Currently, L1 doesn't preserve GPRs during vmexits. */ __asm__ __volatile__ ("rdmsr" : : "c"(msr) : @@ -76,7 +76,7 @@ void l2_guest_code(void) } void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages, - vm_vaddr_t hv_hcall_page_gpa) + gpa_t hv_hcall_page_gpa) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; @@ -231,8 +231,8 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; - vm_vaddr_t hcall_page; + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; + gva_t hcall_page; struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -246,7 +246,7 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu, guest_code); - hcall_page = vm_vaddr_alloc_pages(vm, 1); + hcall_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); vcpu_set_hv_cpuid(vcpu); diff --git a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c index 949e08e98f3158..ae047db7b1be03 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c +++ b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c @@ -15,19 +15,19 @@ /* Any value is fine */ #define EXT_CAPABILITIES 0xbull -static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, - vm_vaddr_t out_pg_gva) +static void guest_code(gpa_t in_pg_gpa, gpa_t out_pg_gpa, + gva_t out_pg_gva) { - uint64_t *output_gva; + u64 *output_gva; wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa); - output_gva = (uint64_t *)out_pg_gva; + output_gva = (u64 *)out_pg_gva; hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa); - /* TLFS states output will be a uint64_t value */ + /* TLFS states output will be a u64 value */ GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES); GUEST_DONE(); @@ -35,12 +35,12 @@ static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, int main(void) { - vm_vaddr_t hcall_out_page; - vm_vaddr_t hcall_in_page; + gva_t hcall_out_page; + gva_t hcall_in_page; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; - uint64_t *outval; + u64 *outval; struct ucall uc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID)); @@ -57,11 +57,11 @@ int main(void) vcpu_set_hv_cpuid(vcpu); /* Hypercall input */ - hcall_in_page = vm_vaddr_alloc_pages(vm, 1); + hcall_in_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); /* Hypercall output */ - hcall_out_page = vm_vaddr_alloc_pages(vm, 1); + hcall_out_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), diff --git a/tools/testing/selftests/kvm/x86/hyperv_features.c b/tools/testing/selftests/kvm/x86/hyperv_features.c index 130b9ce7e5ddd4..7347f1fe515738 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86/hyperv_features.c @@ -22,27 +22,27 @@ KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) struct msr_data { - uint32_t idx; + u32 idx; bool fault_expected; bool write; u64 write_val; }; struct hcall_data { - uint64_t control; - uint64_t expect; + u64 control; + u64 expect; bool ud_expected; }; -static bool is_write_only_msr(uint32_t msr) +static bool is_write_only_msr(u32 msr) { return msr == HV_X64_MSR_EOI; } static void guest_msr(struct msr_data *msr) { - uint8_t vector = 0; - uint64_t msr_val = 0; + u8 vector = 0; + u64 msr_val = 0; GUEST_ASSERT(msr->idx); @@ -82,10 +82,10 @@ static void guest_msr(struct msr_data *msr) GUEST_DONE(); } -static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) +static void guest_hcall(gpa_t pgs_gpa, struct hcall_data *hcall) { u64 res, input, output; - uint8_t vector; + u8 vector; GUEST_ASSERT_NE(hcall->control, 0); @@ -134,14 +134,14 @@ static void guest_test_msrs_access(void) struct kvm_vm *vm; struct ucall uc; int stage = 0; - vm_vaddr_t msr_gva; + gva_t msr_gva; struct msr_data *msr; bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_msr); - msr_gva = vm_vaddr_alloc_page(vm); + msr_gva = vm_alloc_page(vm); memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); msr = addr_gva2hva(vm, msr_gva); @@ -523,17 +523,17 @@ static void guest_test_hcalls_access(void) struct kvm_vm *vm; struct ucall uc; int stage = 0; - vm_vaddr_t hcall_page, hcall_params; + gva_t hcall_page, hcall_params; struct hcall_data *hcall; while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall_page = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); - hcall_params = vm_vaddr_alloc_page(vm); + hcall_params = vm_alloc_page(vm); memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); hcall = addr_gva2hva(vm, hcall_params); diff --git a/tools/testing/selftests/kvm/x86/hyperv_ipi.c b/tools/testing/selftests/kvm/x86/hyperv_ipi.c index ca61836c4e325a..771535f9aad3c3 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_ipi.c +++ b/tools/testing/selftests/kvm/x86/hyperv_ipi.c @@ -18,7 +18,7 @@ #define IPI_VECTOR 0xfe -static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; +static volatile u64 ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; struct hv_vpset { u64 format; @@ -45,13 +45,13 @@ struct hv_send_ipi_ex { struct hv_vpset vp_set; }; -static inline void hv_init(vm_vaddr_t pgs_gpa) +static inline void hv_init(gpa_t pgs_gpa) { wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); } -static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa) +static void receiver_code(void *hcall_page, gpa_t pgs_gpa) { u32 vcpu_id; @@ -85,7 +85,7 @@ static inline void nop_loop(void) asm volatile("nop"); } -static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa) +static void sender_guest_code(void *hcall_page, gpa_t pgs_gpa) { struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page; struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page; @@ -243,7 +243,7 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; - vm_vaddr_t hcall_page; + gva_t hcall_page; pthread_t threads[2]; int stage = 1, r; struct ucall uc; @@ -253,7 +253,7 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall_page = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); diff --git a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c index 0ddb63229bcbb8..7a62f6a9d606d2 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c +++ b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c @@ -21,7 +21,7 @@ #define L2_GUEST_STACK_SIZE 256 /* Exit to L1 from L2 with RDMSR instruction */ -static inline void rdmsr_from_l2(uint32_t msr) +static inline void rdmsr_from_l2(u32 msr) { /* Currently, L1 doesn't preserve GPRs during vmexits. */ __asm__ __volatile__ ("rdmsr" : : "c"(msr) : @@ -67,7 +67,7 @@ void l2_guest_code(void) static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, struct hyperv_test_pages *hv_pages, - vm_vaddr_t pgs_gpa) + gpa_t pgs_gpa) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; struct vmcb *vmcb = svm->vmcb; @@ -149,8 +149,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0, hv_pages_gva = 0; - vm_vaddr_t hcall_page; + gva_t nested_gva = 0, hv_pages_gva = 0; + gva_t hcall_page; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; @@ -165,7 +165,7 @@ int main(int argc, char *argv[]) vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); - hcall_page = vm_vaddr_alloc_pages(vm, 1); + hcall_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page)); diff --git a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c index c542cc4762b154..15ee8b7bfc114b 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c +++ b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c @@ -61,14 +61,14 @@ struct hv_tlb_flush_ex { * - GVAs of the test pages' PTEs */ struct test_data { - vm_vaddr_t hcall_gva; - vm_paddr_t hcall_gpa; - vm_vaddr_t test_pages; - vm_vaddr_t test_pages_pte[NTEST_PAGES]; + gva_t hcall_gva; + gpa_t hcall_gpa; + gva_t test_pages; + gva_t test_pages_pte[NTEST_PAGES]; }; /* 'Worker' vCPU code checking the contents of the test page */ -static void worker_guest_code(vm_vaddr_t test_data) +static void worker_guest_code(gva_t test_data) { struct test_data *data = (struct test_data *)test_data; u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); @@ -133,12 +133,12 @@ static void set_expected_val(void *addr, u64 val, int vcpu_id) * Update PTEs swapping two test pages. * TODO: use swap()/xchg() when these are provided. */ -static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2) +static void swap_two_test_pages(gpa_t pte_gva1, gpa_t pte_gva2) { - uint64_t tmp = *(uint64_t *)pte_gva1; + u64 tmp = *(u64 *)pte_gva1; - *(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2; - *(uint64_t *)pte_gva2 = tmp; + *(u64 *)pte_gva1 = *(u64 *)pte_gva2; + *(u64 *)pte_gva2 = tmp; } /* @@ -196,12 +196,12 @@ static inline void post_test(struct test_data *data, u64 exp1, u64 exp2) #define TESTVAL2 0x0202020202020202 /* Main vCPU doing the test */ -static void sender_guest_code(vm_vaddr_t test_data) +static void sender_guest_code(gva_t test_data) { struct test_data *data = (struct test_data *)test_data; struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva; - vm_paddr_t hcall_gpa = data->hcall_gpa; + gpa_t hcall_gpa = data->hcall_gpa; int i, stage = 1; wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); @@ -581,9 +581,9 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; pthread_t threads[2]; - vm_vaddr_t test_data_page, gva; - vm_paddr_t gpa; - uint64_t *pte; + gva_t test_data_page, gva; + gpa_t gpa; + u64 *pte; struct test_data *data; struct ucall uc; int stage = 1, r, i; @@ -593,11 +593,11 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); /* Test data page */ - test_data_page = vm_vaddr_alloc_page(vm); + test_data_page = vm_alloc_page(vm); data = (struct test_data *)addr_gva2hva(vm, test_data_page); /* Hypercall input/output */ - data->hcall_gva = vm_vaddr_alloc_pages(vm, 2); + data->hcall_gva = vm_alloc_pages(vm, 2); data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva); memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE); @@ -606,7 +606,7 @@ int main(int argc, char *argv[]) * and the test will swap their mappings. The third page keeps the indication * about the current state of mappings. */ - data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1); + data->test_pages = vm_alloc_pages(vm, NTEST_PAGES + 1); for (i = 0; i < NTEST_PAGES; i++) memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i), (u8)(i + 1), PAGE_SIZE); @@ -617,7 +617,7 @@ int main(int argc, char *argv[]) * Get PTE pointers for test pages and map them inside the guest. * Use separate page for each PTE for simplicity. */ - gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); + gva = vm_unused_gva_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); for (i = 0; i < NTEST_PAGES; i++) { pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE); gpa = addr_hva2gpa(vm, pte); diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c index d88500c118eb0c..52014a3210c886 100644 --- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c @@ -73,7 +73,7 @@ static void guest_code(void *test_data) int main(int argc, char *argv[]) { const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/kvm_clock_test.c b/tools/testing/selftests/kvm/x86/kvm_clock_test.c index 5bc12222d87af6..5ad4aeb8e373b6 100644 --- a/tools/testing/selftests/kvm/x86/kvm_clock_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_clock_test.c @@ -17,8 +17,8 @@ #include "processor.h" struct test_case { - uint64_t kvmclock_base; - int64_t realtime_offset; + u64 kvmclock_base; + s64 realtime_offset; }; static struct test_case test_cases[] = { @@ -31,7 +31,7 @@ static struct test_case test_cases[] = { #define GUEST_SYNC_CLOCK(__stage, __val) \ GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) -static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti) +static void guest_main(gpa_t pvti_pa, struct pvclock_vcpu_time_info *pvti) { int i; @@ -52,7 +52,7 @@ static inline void assert_flags(struct kvm_clock_data *data) static void handle_sync(struct ucall *uc, struct kvm_clock_data *start, struct kvm_clock_data *end) { - uint64_t obs, exp_lo, exp_hi; + u64 obs, exp_lo, exp_hi; obs = uc->args[2]; exp_lo = start->clock; @@ -135,8 +135,8 @@ static void enter_guest(struct kvm_vcpu *vcpu) int main(void) { struct kvm_vcpu *vcpu; - vm_vaddr_t pvti_gva; - vm_paddr_t pvti_gpa; + gva_t pvti_gva; + gpa_t pvti_gpa; struct kvm_vm *vm; int flags; @@ -147,7 +147,7 @@ int main(void) vm = vm_create_with_one_vcpu(&vcpu, guest_main); - pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); + pvti_gva = vm_alloc(vm, getpagesize(), 0x10000); pvti_gpa = addr_gva2gpa(vm, pvti_gva); vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva); diff --git a/tools/testing/selftests/kvm/x86/kvm_pv_test.c b/tools/testing/selftests/kvm/x86/kvm_pv_test.c index 1b805cbdb47bf4..8ed5fa6350211d 100644 --- a/tools/testing/selftests/kvm/x86/kvm_pv_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_pv_test.c @@ -13,7 +13,7 @@ #include "processor.h" struct msr_data { - uint32_t idx; + u32 idx; const char *name; }; @@ -40,8 +40,8 @@ static struct msr_data msrs_to_test[] = { static void test_msr(struct msr_data *msr) { - uint64_t ignored; - uint8_t vector; + u64 ignored; + u8 vector; PR_MSR(msr); @@ -53,7 +53,7 @@ static void test_msr(struct msr_data *msr) } struct hcall_data { - uint64_t nr; + u64 nr; const char *name; }; @@ -73,7 +73,7 @@ static struct hcall_data hcalls_to_test[] = { static void test_hcall(struct hcall_data *hc) { - uint64_t r; + u64 r; PR_HCALL(hc); r = kvm_hypercall(hc->nr, 0, 0, 0, 0); diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c index e45c028d2a7ec0..9c156cf7db0ea3 100644 --- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c @@ -67,7 +67,7 @@ static void guest_monitor_wait(void *arg) int main(int argc, char *argv[]) { - uint64_t disabled_quirks; + u64 disabled_quirks; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; diff --git a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c index f001cb836bfa38..761fec29340803 100644 --- a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c +++ b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c @@ -67,7 +67,7 @@ static void l1_guest_code(void *data) int main(int argc, char *argv[]) { - vm_vaddr_t guest_gva; + gva_t guest_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c index 619229bbd693b2..0e67cce8357013 100644 --- a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c @@ -47,10 +47,10 @@ #define TEST_SYNC_WRITE_FAULT BIT(1) #define TEST_SYNC_NO_FAULT BIT(2) -static void l2_guest_code(vm_vaddr_t base) +static void l2_guest_code(gva_t base) { - vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0); - vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1); + gva_t page0 = TEST_GUEST_ADDR(base, 0); + gva_t page1 = TEST_GUEST_ADDR(base, 1); READ_ONCE(*(u64 *)page0); GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT); @@ -143,7 +143,7 @@ static void l1_guest_code(void *data) static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, unsigned long *bmap) { - vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1); + gva_t gva = arg & ~(PAGE_SIZE - 1); int page_nr, i; /* @@ -198,7 +198,7 @@ static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, static void test_dirty_log(bool nested_tdp) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; unsigned long *bmap; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_emulation_test.c b/tools/testing/selftests/kvm/x86/nested_emulation_test.c index abc824dba04fd4..fb7dcbe53ac73d 100644 --- a/tools/testing/selftests/kvm/x86/nested_emulation_test.c +++ b/tools/testing/selftests/kvm/x86/nested_emulation_test.c @@ -13,8 +13,8 @@ enum { struct emulated_instruction { const char name[32]; - uint8_t opcode[15]; - uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS]; + u8 opcode[15]; + u32 exit_reason[NR_VIRTUALIZATION_FLAVORS]; }; static struct emulated_instruction instructions[] = { @@ -32,13 +32,13 @@ static struct emulated_instruction instructions[] = { }, }; -static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ -static uint8_t l2_guest_code[sizeof(kvm_fep) + 15]; -static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; +static u8 kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ +static u8 l2_guest_code[sizeof(kvm_fep) + 15]; +static u8 *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; -static uint32_t get_instruction_length(struct emulated_instruction *insn) +static u32 get_instruction_length(struct emulated_instruction *insn) { - uint32_t i; + u32 i; for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++) ; @@ -81,8 +81,8 @@ static void guest_code(void *test_data) for (i = 0; i < ARRAY_SIZE(instructions); i++) { struct emulated_instruction *insn = &instructions[i]; - uint32_t insn_len = get_instruction_length(insn); - uint32_t exit_insn_len; + u32 insn_len = get_instruction_length(insn); + u32 exit_insn_len; u32 exit_reason; /* @@ -122,7 +122,7 @@ static void guest_code(void *test_data) int main(int argc, char *argv[]) { - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c index 3641a42934acb7..186e980aa8eee6 100644 --- a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c @@ -72,7 +72,7 @@ static void l2_ss_injected_tf_test(void) } static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, - uint32_t error_code) + u32 error_code) { struct vmcb *vmcb = svm->vmcb; struct vmcb_control_area *ctrl = &vmcb->control; @@ -111,7 +111,7 @@ static void l1_svm_code(struct svm_test_data *svm) GUEST_DONE(); } -static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) +static void vmx_run_l2(void *l2_code, int vector, u32 error_code) { GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code)); @@ -216,7 +216,7 @@ static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject) */ int main(int argc, char *argv[]) { - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu_events events; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c index a6b6da9cf7fe1d..11fd2467d82335 100644 --- a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c +++ b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c @@ -78,7 +78,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t guest_gva = 0; + gva_t guest_gva = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/nested_set_state_test.c b/tools/testing/selftests/kvm/x86/nested_set_state_test.c index 0f2102b4362915..831380732671a4 100644 --- a/tools/testing/selftests/kvm/x86/nested_set_state_test.c +++ b/tools/testing/selftests/kvm/x86/nested_set_state_test.c @@ -250,14 +250,14 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu) static void vcpu_efer_enable_svm(struct kvm_vcpu *vcpu) { - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); vcpu_set_msr(vcpu, MSR_EFER, old_efer | EFER_SVME); } static void vcpu_efer_disable_svm(struct kvm_vcpu *vcpu) { - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); vcpu_set_msr(vcpu, MSR_EFER, old_efer & ~EFER_SVME); } diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c index 2839f650e5c9da..f0e4adac47510a 100644 --- a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c @@ -53,9 +53,9 @@ enum { /* The virtual machine object. */ static struct kvm_vm *vm; -static void check_ia32_tsc_adjust(int64_t max) +static void check_ia32_tsc_adjust(s64 max) { - int64_t adjust; + s64 adjust; adjust = rdmsr(MSR_IA32_TSC_ADJUST); GUEST_SYNC(adjust); @@ -64,7 +64,7 @@ static void check_ia32_tsc_adjust(int64_t max) static void l2_guest_code(void) { - uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; + u64 l1_tsc = rdtsc() - TSC_OFFSET_VALUE; wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); @@ -88,7 +88,7 @@ static void l1_guest_code(void *data) */ if (this_cpu_has(X86_FEATURE_VMX)) { struct vmx_pages *vmx_pages = data; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -117,7 +117,7 @@ static void l1_guest_code(void *data) GUEST_DONE(); } -static void report(int64_t val) +static void report(s64 val) { pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); @@ -125,7 +125,7 @@ static void report(int64_t val) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva; + gva_t nested_gva; struct kvm_vcpu *vcpu; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c index 4260c9e4f48918..190e93af20a140 100644 --- a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c +++ b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c @@ -19,7 +19,7 @@ /* L2 is scaled up (from L1's perspective) by this factor */ #define L2_SCALE_FACTOR 4ULL -#define TSC_OFFSET_L2 ((uint64_t) -33125236320908) +#define TSC_OFFSET_L2 ((u64)-33125236320908) #define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48) #define L2_GUEST_STACK_SIZE 64 @@ -35,9 +35,9 @@ enum { USLEEP, UCHECK_L1, UCHECK_L2 }; * measurements, a difference of 1% between the actual and the expected value * is tolerated. */ -static void compare_tsc_freq(uint64_t actual, uint64_t expected) +static void compare_tsc_freq(u64 actual, u64 expected) { - uint64_t tolerance, thresh_low, thresh_high; + u64 tolerance, thresh_low, thresh_high; tolerance = expected / 100; thresh_low = expected - tolerance; @@ -55,7 +55,7 @@ static void compare_tsc_freq(uint64_t actual, uint64_t expected) static void check_tsc_freq(int level) { - uint64_t tsc_start, tsc_end, tsc_freq; + u64 tsc_start, tsc_end, tsc_freq; /* * Reading the TSC twice with about a second's difference should give @@ -106,7 +106,7 @@ static void l1_svm_code(struct svm_test_data *svm) static void l1_vmx_code(struct vmx_pages *vmx_pages) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; /* check that L1's frequency looks alright before launching L2 */ check_tsc_freq(UCHECK_L1); @@ -152,14 +152,14 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t guest_gva = 0; - - uint64_t tsc_start, tsc_end; - uint64_t tsc_khz; - uint64_t l1_scale_factor; - uint64_t l0_tsc_freq = 0; - uint64_t l1_tsc_freq = 0; - uint64_t l2_tsc_freq = 0; + gva_t guest_gva = 0; + + u64 tsc_start, tsc_end; + u64 tsc_khz; + u64 l1_scale_factor; + u64 l0_tsc_freq = 0; + u64 l1_tsc_freq = 0; + u64 l2_tsc_freq = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c index 71717118d6924a..85d3f4cc76f395 100644 --- a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c +++ b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c @@ -128,7 +128,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct vmcb *test_vmcb[2]; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c index c0d84827f7364e..70950067b989a8 100644 --- a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c +++ b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c @@ -32,7 +32,7 @@ #define RETURN_OPCODE 0xC3 /* Call the specified memory address. */ -static void guest_do_CALL(uint64_t target) +static void guest_do_CALL(u64 target) { ((void (*)(void)) target)(); } @@ -46,14 +46,14 @@ static void guest_do_CALL(uint64_t target) */ void guest_code(void) { - uint64_t hpage_1 = HPAGE_GVA; - uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512); - uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512); + u64 hpage_1 = HPAGE_GVA; + u64 hpage_2 = hpage_1 + (PAGE_SIZE * 512); + u64 hpage_3 = hpage_2 + (PAGE_SIZE * 512); - READ_ONCE(*(uint64_t *)hpage_1); + READ_ONCE(*(u64 *)hpage_1); GUEST_SYNC(1); - READ_ONCE(*(uint64_t *)hpage_2); + READ_ONCE(*(u64 *)hpage_2); GUEST_SYNC(2); guest_do_CALL(hpage_1); @@ -62,10 +62,10 @@ void guest_code(void) guest_do_CALL(hpage_3); GUEST_SYNC(4); - READ_ONCE(*(uint64_t *)hpage_1); + READ_ONCE(*(u64 *)hpage_1); GUEST_SYNC(5); - READ_ONCE(*(uint64_t *)hpage_3); + READ_ONCE(*(u64 *)hpage_3); GUEST_SYNC(6); } @@ -107,7 +107,7 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages, { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t nr_bytes; + u64 nr_bytes; void *hva; int r; diff --git a/tools/testing/selftests/kvm/x86/platform_info_test.c b/tools/testing/selftests/kvm/x86/platform_info_test.c index 9cbf283ebc55f4..80bb07e6531cfe 100644 --- a/tools/testing/selftests/kvm/x86/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86/platform_info_test.c @@ -23,8 +23,8 @@ static void guest_code(void) { - uint64_t msr_platform_info; - uint8_t vector; + u64 msr_platform_info; + u8 vector; GUEST_SYNC(true); msr_platform_info = rdmsr(MSR_PLATFORM_INFO); @@ -42,7 +42,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t msr_platform_info; + u64 msr_platform_info; struct ucall uc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO)); diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 3eaa216b96c00b..dc6afac3aa9195 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -30,9 +30,9 @@ #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) /* Track which architectural events are supported by hardware. */ -static uint32_t hardware_pmu_arch_events; +static u32 hardware_pmu_arch_events; -static uint8_t kvm_pmu_version; +static u8 kvm_pmu_version; static bool kvm_has_perf_caps; #define X86_PMU_FEATURE_NULL \ @@ -57,7 +57,7 @@ struct kvm_intel_pmu_event { * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the * compiler often thinks the feature definitions aren't compile-time constants. */ -static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) +static struct kvm_intel_pmu_event intel_event_to_feature(u8 idx) { const struct kvm_intel_pmu_event __intel_event_to_feature[] = { [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, @@ -89,8 +89,8 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, void *guest_code, - uint8_t pmu_version, - uint64_t perf_capabilities) + u8 pmu_version, + u64 perf_capabilities) { struct kvm_vm *vm; @@ -132,7 +132,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu) } while (uc.cmd != UCALL_DONE); } -static uint8_t guest_get_pmu_version(void) +static u8 guest_get_pmu_version(void) { /* * Return the effective PMU version, i.e. the minimum between what KVM @@ -141,7 +141,7 @@ static uint8_t guest_get_pmu_version(void) * supported by KVM to verify KVM doesn't freak out and do something * bizarre with an architecturally valid, but unsupported, version. */ - return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); + return min_t(u8, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); } /* @@ -153,9 +153,9 @@ static uint8_t guest_get_pmu_version(void) * Sanity check that in all cases, the event doesn't count when it's disabled, * and that KVM correctly emulates the write of an arbitrary value. */ -static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr) +static void guest_assert_event_count(u8 idx, u32 pmc, u32 pmc_msr) { - uint64_t count; + u64 count; count = _rdpmc(pmc); if (!(hardware_pmu_arch_events & BIT(idx))) @@ -236,7 +236,7 @@ do { \ FEP "xor %%eax, %%eax\n\t" \ FEP "xor %%edx, %%edx\n\t" \ "wrmsr\n\t" \ - :: "a"((uint32_t)_value), "d"(_value >> 32), \ + :: "a"((u32)_value), "d"(_value >> 32), \ "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \ ); \ } while (0) @@ -255,8 +255,8 @@ do { \ guest_assert_event_count(_idx, _pmc, _pmc_msr); \ } while (0) -static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, - uint32_t ctrl_msr, uint64_t ctrl_msr_value) +static void __guest_test_arch_event(u8 idx, u32 pmc, u32 pmc_msr, + u32 ctrl_msr, u64 ctrl_msr_value) { GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); @@ -264,14 +264,14 @@ static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); } -static void guest_test_arch_event(uint8_t idx) +static void guest_test_arch_event(u8 idx) { - uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); - uint32_t pmu_version = guest_get_pmu_version(); + u32 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + u32 pmu_version = guest_get_pmu_version(); /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ bool guest_has_perf_global_ctrl = pmu_version >= 2; struct kvm_x86_pmu_feature gp_event, fixed_event; - uint32_t base_pmc_msr; + u32 base_pmc_msr; unsigned int i; /* The host side shouldn't invoke this without a guest PMU. */ @@ -289,7 +289,7 @@ static void guest_test_arch_event(uint8_t idx) GUEST_ASSERT(nr_gp_counters); for (i = 0; i < nr_gp_counters; i++) { - uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS | + u64 eventsel = ARCH_PERFMON_EVENTSEL_OS | ARCH_PERFMON_EVENTSEL_ENABLE | intel_pmu_arch_events[idx]; @@ -320,7 +320,7 @@ static void guest_test_arch_event(uint8_t idx) static void guest_test_arch_events(void) { - uint8_t i; + u8 i; for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) guest_test_arch_event(i); @@ -328,8 +328,8 @@ static void guest_test_arch_events(void) GUEST_DONE(); } -static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t length, uint32_t unavailable_mask) +static void test_arch_events(u8 pmu_version, u64 perf_capabilities, + u8 length, u32 unavailable_mask) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -373,11 +373,11 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ msr, expected, val); -static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, - uint64_t expected_val) +static void guest_test_rdpmc(u32 rdpmc_idx, bool expect_success, + u64 expected_val) { - uint8_t vector; - uint64_t val; + u8 vector; + u64 val; vector = rdpmc_safe(rdpmc_idx, &val); GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector); @@ -393,19 +393,19 @@ static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); } -static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, - uint8_t nr_counters, uint32_t or_mask) +static void guest_rd_wr_counters(u32 base_msr, u8 nr_possible_counters, + u8 nr_counters, u32 or_mask) { const bool pmu_has_fast_mode = !guest_get_pmu_version(); - uint8_t i; + u8 i; for (i = 0; i < nr_possible_counters; i++) { /* * TODO: Test a value that validates full-width writes and the * width of the counters. */ - const uint64_t test_val = 0xffff; - const uint32_t msr = base_msr + i; + const u64 test_val = 0xffff; + const u32 msr = base_msr + i; /* * Fixed counters are supported if the counter is less than the @@ -418,12 +418,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are * unsupported, i.e. doesn't #GP and reads back '0'. */ - const uint64_t expected_val = expect_success ? test_val : 0; + const u64 expected_val = expect_success ? test_val : 0; const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && msr != MSR_P6_PERFCTR1; - uint32_t rdpmc_idx; - uint8_t vector; - uint64_t val; + u32 rdpmc_idx; + u8 vector; + u64 val; vector = wrmsr_safe(msr, test_val); GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); @@ -461,9 +461,9 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters static void guest_test_gp_counters(void) { - uint8_t pmu_version = guest_get_pmu_version(); - uint8_t nr_gp_counters = 0; - uint32_t base_msr; + u8 pmu_version = guest_get_pmu_version(); + u8 nr_gp_counters = 0; + u32 base_msr; if (pmu_version) nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); @@ -477,7 +477,7 @@ static void guest_test_gp_counters(void) * counters, of which there are none. */ if (pmu_version > 1) { - uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); + u64 global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); if (nr_gp_counters) GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0)); @@ -495,8 +495,8 @@ static void guest_test_gp_counters(void) GUEST_DONE(); } -static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t nr_gp_counters) +static void test_gp_counters(u8 pmu_version, u64 perf_capabilities, + u8 nr_gp_counters) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -514,9 +514,9 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void guest_test_fixed_counters(void) { - uint64_t supported_bitmask = 0; - uint8_t nr_fixed_counters = 0; - uint8_t i; + u64 supported_bitmask = 0; + u8 nr_fixed_counters = 0; + u8 i; /* Fixed counters require Architectural vPMU Version 2+. */ if (guest_get_pmu_version() >= 2) @@ -533,8 +533,8 @@ static void guest_test_fixed_counters(void) nr_fixed_counters, supported_bitmask); for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) { - uint8_t vector; - uint64_t val; + u8 vector; + u64 val; if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) { vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, @@ -561,9 +561,8 @@ static void guest_test_fixed_counters(void) GUEST_DONE(); } -static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t nr_fixed_counters, - uint32_t supported_bitmask) +static void test_fixed_counters(u8 pmu_version, u64 perf_capabilities, + u8 nr_fixed_counters, u32 supported_bitmask) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -583,14 +582,14 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void test_intel_counters(void) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); - uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); - uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + u8 pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); unsigned int i; - uint8_t v, j; - uint32_t k; + u8 v, j; + u32 k; - const uint64_t perf_caps[] = { + const u64 perf_caps[] = { 0, PMU_CAP_FW_WRITES, }; @@ -602,7 +601,7 @@ static void test_intel_counters(void) * as alternating bit sequencues, e.g. to detect if KVM is checking the * wrong bit(s). */ - const uint32_t unavailable_masks[] = { + const u32 unavailable_masks[] = { 0x0, 0xffffffffu, 0xaaaaaaaau, @@ -620,7 +619,7 @@ static void test_intel_counters(void) * Intel, i.e. is the last version that is guaranteed to be backwards * compatible with KVM's existing behavior. */ - uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); + u8 max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); /* * Detect the existence of events that aren't supported by selftests. diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c index 93b61c07799112..c1232344fda89e 100644 --- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c @@ -53,11 +53,11 @@ static const struct __kvm_pmu_event_filter base_event_filter = { }; struct { - uint64_t loads; - uint64_t stores; - uint64_t loads_stores; - uint64_t branches_retired; - uint64_t instructions_retired; + u64 loads; + u64 stores; + u64 loads_stores; + u64 branches_retired; + u64 instructions_retired; } pmc_results; /* @@ -75,9 +75,9 @@ static void guest_gp_handler(struct ex_regs *regs) * * Return on success. GUEST_SYNC(0) on error. */ -static void check_msr(uint32_t msr, uint64_t bits_to_flip) +static void check_msr(u32 msr, u64 bits_to_flip) { - uint64_t v = rdmsr(msr) ^ bits_to_flip; + u64 v = rdmsr(msr) ^ bits_to_flip; wrmsr(msr, v); if (rdmsr(msr) != v) @@ -89,10 +89,10 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) GUEST_SYNC(-EIO); } -static void run_and_measure_loop(uint32_t msr_base) +static void run_and_measure_loop(u32 msr_base) { - const uint64_t branches_retired = rdmsr(msr_base + 0); - const uint64_t insn_retired = rdmsr(msr_base + 1); + const u64 branches_retired = rdmsr(msr_base + 0); + const u64 insn_retired = rdmsr(msr_base + 1); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); @@ -147,7 +147,7 @@ static void amd_guest_code(void) * Run the VM to the next GUEST_SYNC(value), and return the value passed * to the sync. Any other exit from the guest is fatal. */ -static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) +static u64 run_vcpu_to_sync(struct kvm_vcpu *vcpu) { struct ucall uc; @@ -161,7 +161,7 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) { - uint64_t r; + u64 r; memset(&pmc_results, 0, sizeof(pmc_results)); sync_global_to_guest(vcpu->vm, pmc_results); @@ -182,7 +182,7 @@ static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) */ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) { - uint64_t r; + u64 r; vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); r = run_vcpu_to_sync(vcpu); @@ -195,7 +195,7 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) * Remove the first occurrence of 'event' (if any) from the filter's * event list. */ -static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) +static void remove_event(struct __kvm_pmu_event_filter *f, u64 event) { bool found = false; int i; @@ -212,8 +212,8 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ - uint64_t br = pmc_results.branches_retired; \ - uint64_t ir = pmc_results.instructions_retired; \ + u64 br = pmc_results.branches_retired; \ + u64 ir = pmc_results.instructions_retired; \ bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \ br >= NUM_BRANCHES : br == NUM_BRANCHES; \ \ @@ -228,8 +228,8 @@ do { \ #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ - uint64_t br = pmc_results.branches_retired; \ - uint64_t ir = pmc_results.instructions_retired; \ + u64 br = pmc_results.branches_retired; \ + u64 ir = pmc_results.instructions_retired; \ \ TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \ __func__, br); \ @@ -378,7 +378,7 @@ static bool use_amd_pmu(void) static bool supports_event_mem_inst_retired(void) { - uint32_t eax, ebx, ecx, edx; + u32 eax, ebx, ecx, edx; cpuid(1, &eax, &ebx, &ecx, &edx); if (x86_family(eax) == 0x6) { @@ -415,15 +415,15 @@ static bool supports_event_mem_inst_retired(void) #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) -static void masked_events_guest_test(uint32_t msr_base) +static void masked_events_guest_test(u32 msr_base) { /* * The actual value of the counters don't determine the outcome of * the test. Only that they are zero or non-zero. */ - const uint64_t loads = rdmsr(msr_base + 0); - const uint64_t stores = rdmsr(msr_base + 1); - const uint64_t loads_stores = rdmsr(msr_base + 2); + const u64 loads = rdmsr(msr_base + 0); + const u64 stores = rdmsr(msr_base + 1); + const u64 loads_stores = rdmsr(msr_base + 2); int val; @@ -476,7 +476,7 @@ static void amd_masked_events_guest_code(void) } static void run_masked_events_test(struct kvm_vcpu *vcpu, - const uint64_t masked_events[], + const u64 masked_events[], const int nmasked_events) { struct __kvm_pmu_event_filter f = { @@ -485,7 +485,7 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu, .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS, }; - memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events); + memcpy(f.events, masked_events, sizeof(u64) * nmasked_events); test_with_filter(vcpu, &f); } @@ -494,12 +494,12 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu, #define ALLOW_LOADS_STORES BIT(2) struct masked_events_test { - uint64_t intel_events[MAX_TEST_EVENTS]; - uint64_t intel_event_end; - uint64_t amd_events[MAX_TEST_EVENTS]; - uint64_t amd_event_end; + u64 intel_events[MAX_TEST_EVENTS]; + u64 intel_event_end; + u64 amd_events[MAX_TEST_EVENTS]; + u64 amd_event_end; const char *msg; - uint32_t flags; + u32 flags; }; /* @@ -582,9 +582,9 @@ const struct masked_events_test test_cases[] = { }; static int append_test_events(const struct masked_events_test *test, - uint64_t *events, int nevents) + u64 *events, int nevents) { - const uint64_t *evts; + const u64 *evts; int i; evts = use_intel_pmu() ? test->intel_events : test->amd_events; @@ -603,7 +603,7 @@ static bool bool_eq(bool a, bool b) return a == b; } -static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, +static void run_masked_events_tests(struct kvm_vcpu *vcpu, u64 *events, int nevents) { int ntests = ARRAY_SIZE(test_cases); @@ -630,7 +630,7 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, } } -static void add_dummy_events(uint64_t *events, int nevents) +static void add_dummy_events(u64 *events, int nevents) { int i; @@ -650,7 +650,7 @@ static void add_dummy_events(uint64_t *events, int nevents) static void test_masked_events(struct kvm_vcpu *vcpu) { int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS; - uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; + u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; /* Run the test cases against a sparse PMU event filter. */ run_masked_events_tests(vcpu, events, 0); @@ -668,8 +668,8 @@ static int set_pmu_event_filter(struct kvm_vcpu *vcpu, return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); } -static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, - uint32_t flags, uint32_t action) +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, u64 event, + u32 flags, u32 action) { struct __kvm_pmu_event_filter f = { .nevents = 1, @@ -685,9 +685,9 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, static void test_filter_ioctl(struct kvm_vcpu *vcpu) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); struct __kvm_pmu_event_filter f; - uint64_t e = ~0ul; + u64 e = ~0ul; int r; /* @@ -729,7 +729,7 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); } -static void intel_run_fixed_counter_guest_code(uint8_t idx) +static void intel_run_fixed_counter_guest_code(u8 idx) { for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); @@ -745,8 +745,8 @@ static void intel_run_fixed_counter_guest_code(uint8_t idx) } } -static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, - uint32_t action, uint32_t bitmap) +static u64 test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, + u32 action, u32 bitmap) { struct __kvm_pmu_event_filter f = { .action = action, @@ -757,9 +757,9 @@ static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, return run_vcpu_to_sync(vcpu); } -static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, - uint32_t action, - uint32_t bitmap) +static u64 test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, + u32 action, + u32 bitmap) { struct __kvm_pmu_event_filter f = base_event_filter; @@ -770,12 +770,12 @@ static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, return run_vcpu_to_sync(vcpu); } -static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, - uint8_t nr_fixed_counters) +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, u8 idx, + u8 nr_fixed_counters) { unsigned int i; - uint32_t bitmap; - uint64_t count; + u32 bitmap; + u64 count; TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8, "Invalid nr_fixed_counters"); @@ -815,10 +815,10 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, static void test_fixed_counter_bitmap(void) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint8_t idx; + u8 idx; /* * Check that pmu_event_filter works as expected when it's applied to diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c index 1969f4ab9b280d..1d2f5d4fd45d7f 100644 --- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c +++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c @@ -23,13 +23,13 @@ #include #define BASE_DATA_SLOT 10 -#define BASE_DATA_GPA ((uint64_t)(1ull << 32)) -#define PER_CPU_DATA_SIZE ((uint64_t)(SZ_2M + PAGE_SIZE)) +#define BASE_DATA_GPA ((u64)(1ull << 32)) +#define PER_CPU_DATA_SIZE ((u64)(SZ_2M + PAGE_SIZE)) /* Horrific macro so that the line info is captured accurately :-( */ #define memcmp_g(gpa, pattern, size) \ do { \ - uint8_t *mem = (uint8_t *)gpa; \ + u8 *mem = (u8 *)gpa; \ size_t i; \ \ for (i = 0; i < size; i++) \ @@ -38,7 +38,7 @@ do { \ pattern, i, gpa + i, mem[i]); \ } while (0) -static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size) +static void memcmp_h(u8 *mem, gpa_t gpa, u8 pattern, size_t size) { size_t i; @@ -70,13 +70,13 @@ enum ucall_syncs { SYNC_PRIVATE, }; -static void guest_sync_shared(uint64_t gpa, uint64_t size, - uint8_t current_pattern, uint8_t new_pattern) +static void guest_sync_shared(gpa_t gpa, u64 size, + u8 current_pattern, u8 new_pattern) { GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern); } -static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) +static void guest_sync_private(gpa_t gpa, u64 size, u8 pattern) { GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern); } @@ -86,10 +86,10 @@ static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) #define MAP_GPA_SHARED BIT(1) #define MAP_GPA_DO_FALLOCATE BIT(2) -static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, +static void guest_map_mem(gpa_t gpa, u64 size, bool map_shared, bool do_fallocate) { - uint64_t flags = MAP_GPA_SET_ATTRIBUTES; + u64 flags = MAP_GPA_SET_ATTRIBUTES; if (map_shared) flags |= MAP_GPA_SHARED; @@ -98,19 +98,19 @@ static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, kvm_hypercall_map_gpa_range(gpa, size, flags); } -static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate) +static void guest_map_shared(gpa_t gpa, u64 size, bool do_fallocate) { guest_map_mem(gpa, size, true, do_fallocate); } -static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate) +static void guest_map_private(gpa_t gpa, u64 size, bool do_fallocate) { guest_map_mem(gpa, size, false, do_fallocate); } struct { - uint64_t offset; - uint64_t size; + u64 offset; + u64 size; } static const test_ranges[] = { GUEST_STAGE(0, PAGE_SIZE), GUEST_STAGE(0, SZ_2M), @@ -119,11 +119,11 @@ struct { GUEST_STAGE(SZ_2M, PAGE_SIZE), }; -static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) +static void guest_test_explicit_conversion(u64 base_gpa, bool do_fallocate) { - const uint8_t def_p = 0xaa; - const uint8_t init_p = 0xcc; - uint64_t j; + const u8 def_p = 0xaa; + const u8 init_p = 0xcc; + u64 j; int i; /* Memory should be shared by default. */ @@ -134,12 +134,12 @@ static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE); for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { - uint64_t gpa = base_gpa + test_ranges[i].offset; - uint64_t size = test_ranges[i].size; - uint8_t p1 = 0x11; - uint8_t p2 = 0x22; - uint8_t p3 = 0x33; - uint8_t p4 = 0x44; + gpa_t gpa = base_gpa + test_ranges[i].offset; + u64 size = test_ranges[i].size; + u8 p1 = 0x11; + u8 p2 = 0x22; + u8 p3 = 0x33; + u8 p4 = 0x44; /* * Set the test region to pattern one to differentiate it from @@ -214,10 +214,10 @@ static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) } } -static void guest_punch_hole(uint64_t gpa, uint64_t size) +static void guest_punch_hole(gpa_t gpa, u64 size) { /* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */ - uint64_t flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; + u64 flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; kvm_hypercall_map_gpa_range(gpa, size, flags); } @@ -227,9 +227,9 @@ static void guest_punch_hole(uint64_t gpa, uint64_t size) * proper conversion. Freeing (PUNCH_HOLE) should zap SPTEs, and reallocating * (subsequent fault) should zero memory. */ -static void guest_test_punch_hole(uint64_t base_gpa, bool precise) +static void guest_test_punch_hole(u64 base_gpa, bool precise) { - const uint8_t init_p = 0xcc; + const u8 init_p = 0xcc; int i; /* @@ -239,8 +239,8 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise) guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false); for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { - uint64_t gpa = base_gpa + test_ranges[i].offset; - uint64_t size = test_ranges[i].size; + gpa_t gpa = base_gpa + test_ranges[i].offset; + u64 size = test_ranges[i].size; /* * Free all memory before each iteration, even for the !precise @@ -268,7 +268,7 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise) } } -static void guest_code(uint64_t base_gpa) +static void guest_code(u64 base_gpa) { /* * Run the conversion test twice, with and without doing fallocate() on @@ -289,8 +289,8 @@ static void guest_code(uint64_t base_gpa) static void handle_exit_hypercall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint64_t gpa = run->hypercall.args[0]; - uint64_t size = run->hypercall.args[1] * PAGE_SIZE; + gpa_t gpa = run->hypercall.args[0]; + u64 size = run->hypercall.args[1] * PAGE_SIZE; bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES; bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED; bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE; @@ -337,7 +337,7 @@ static void *__test_mem_conversions(void *__vcpu) case UCALL_ABORT: REPORT_GUEST_ASSERT(uc); case UCALL_SYNC: { - uint64_t gpa = uc.args[1]; + gpa_t gpa = uc.args[1]; size_t size = uc.args[2]; size_t i; @@ -347,7 +347,7 @@ static void *__test_mem_conversions(void *__vcpu) for (i = 0; i < size; i += vm->page_size) { size_t nr_bytes = min_t(size_t, vm->page_size, size - i); - uint8_t *hva = addr_gpa2hva(vm, gpa + i); + u8 *hva = addr_gpa2hva(vm, gpa + i); /* In all cases, the host should observe the shared data. */ memcmp_h(hva, gpa + i, uc.args[3], nr_bytes); @@ -366,8 +366,8 @@ static void *__test_mem_conversions(void *__vcpu) } } -static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus, - uint32_t nr_memslots) +static void test_mem_conversions(enum vm_mem_backing_src_type src_type, u32 nr_vcpus, + u32 nr_memslots) { /* * Allocate enough memory so that each vCPU's chunk of memory can be @@ -402,7 +402,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t KVM_MEM_GUEST_MEMFD, memfd, slot_size * i); for (i = 0; i < nr_vcpus; i++) { - uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size; + gpa_t gpa = BASE_DATA_GPA + i * per_cpu_size; vcpu_args_set(vcpus[i], 1, gpa); @@ -450,8 +450,8 @@ static void usage(const char *cmd) int main(int argc, char *argv[]) { enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC; - uint32_t nr_memslots = 1; - uint32_t nr_vcpus = 1; + u32 nr_memslots = 1; + u32 nr_vcpus = 1; int opt; TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)); diff --git a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c index 13e72fcec8dd2f..10db9fe6d90630 100644 --- a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c +++ b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c @@ -17,17 +17,17 @@ #define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE) #define EXITS_TEST_SLOT 10 -static uint64_t guest_repeatedly_read(void) +static u64 guest_repeatedly_read(void) { - volatile uint64_t value; + volatile u64 value; while (true) - value = *((uint64_t *) EXITS_TEST_GVA); + value = *((u64 *)EXITS_TEST_GVA); return value; } -static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) +static u32 run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) { int r; @@ -50,7 +50,7 @@ static void test_private_access_memslot_deleted(void) struct kvm_vcpu *vcpu; pthread_t vm_thread; void *thread_return; - uint32_t exit_reason; + u32 exit_reason; vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, guest_repeatedly_read); @@ -72,7 +72,7 @@ static void test_private_access_memslot_deleted(void) vm_mem_region_delete(vm, EXITS_TEST_SLOT); pthread_join(vm_thread, &thread_return); - exit_reason = (uint32_t)(uint64_t)thread_return; + exit_reason = (u32)(u64)thread_return; TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT); TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); @@ -86,7 +86,7 @@ static void test_private_access_memslot_not_private(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint32_t exit_reason; + u32 exit_reason; vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, guest_repeatedly_read); diff --git a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c index 49913784bc82a0..8e3898646c69d7 100644 --- a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c +++ b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c @@ -86,11 +86,11 @@ static void run_vcpu(struct kvm_vcpu *vcpu) } } -static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, +static struct kvm_vm *create_vm(u32 nr_vcpus, u32 bsp_vcpu_id, struct kvm_vcpu *vcpus[]) { struct kvm_vm *vm; - uint32_t i; + u32 i; vm = vm_create(nr_vcpus); @@ -104,7 +104,7 @@ static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, return vm; } -static void run_vm_bsp(uint32_t bsp_vcpu_id) +static void run_vm_bsp(u32 bsp_vcpu_id) { struct kvm_vcpu *vcpus[2]; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/set_sregs_test.c b/tools/testing/selftests/kvm/x86/set_sregs_test.c index f4095a3d12786a..8e654cc9ab1687 100644 --- a/tools/testing/selftests/kvm/x86/set_sregs_test.c +++ b/tools/testing/selftests/kvm/x86/set_sregs_test.c @@ -46,9 +46,9 @@ do { \ X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \ X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT) -static uint64_t calc_supported_cr4_feature_bits(void) +static u64 calc_supported_cr4_feature_bits(void) { - uint64_t cr4 = KVM_ALWAYS_ALLOWED_CR4; + u64 cr4 = KVM_ALWAYS_ALLOWED_CR4; if (kvm_cpu_has(X86_FEATURE_UMIP)) cr4 |= X86_CR4_UMIP; @@ -74,7 +74,7 @@ static uint64_t calc_supported_cr4_feature_bits(void) return cr4; } -static void test_cr_bits(struct kvm_vcpu *vcpu, uint64_t cr4) +static void test_cr_bits(struct kvm_vcpu *vcpu, u64 cr4) { struct kvm_sregs sregs; int rc, i; diff --git a/tools/testing/selftests/kvm/x86/sev_init2_tests.c b/tools/testing/selftests/kvm/x86/sev_init2_tests.c index b238615196ade4..8eeba2327c7cd0 100644 --- a/tools/testing/selftests/kvm/x86/sev_init2_tests.c +++ b/tools/testing/selftests/kvm/x86/sev_init2_tests.c @@ -34,7 +34,7 @@ static int __sev_ioctl(int vm_fd, int cmd_id, void *data) { struct kvm_sev_cmd cmd = { .id = cmd_id, - .data = (uint64_t)data, + .data = (u64)data, .sev_fd = open_sev_dev_path_or_exit(), }; int ret; @@ -94,7 +94,7 @@ void test_vm_types(void) "VM type is KVM_X86_SW_PROTECTED_VM"); } -void test_flags(uint32_t vm_type) +void test_flags(u32 vm_type) { int i; @@ -104,7 +104,7 @@ void test_flags(uint32_t vm_type) "invalid flag"); } -void test_features(uint32_t vm_type, uint64_t supported_features) +void test_features(u32 vm_type, u64 supported_features) { int i; diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c index 8bd37a476f1595..1a49ee3915864e 100644 --- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c @@ -13,9 +13,9 @@ #include "linux/psp-sev.h" #include "sev.h" -static void guest_sev_test_msr(uint32_t msr) +static void guest_sev_test_msr(u32 msr) { - uint64_t val = rdmsr(msr); + u64 val = rdmsr(msr); wrmsr(msr, val); GUEST_ASSERT(val == rdmsr(msr)); @@ -23,7 +23,7 @@ static void guest_sev_test_msr(uint32_t msr) #define guest_sev_test_reg(reg) \ do { \ - uint64_t val = get_##reg(); \ + u64 val = get_##reg(); \ \ set_##reg(val); \ GUEST_ASSERT(val == get_##reg()); \ @@ -42,7 +42,7 @@ static void guest_sev_test_regs(void) static void guest_snp_code(void) { - uint64_t sev_msr = rdmsr(MSR_AMD64_SEV); + u64 sev_msr = rdmsr(MSR_AMD64_SEV); GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED); GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); @@ -104,19 +104,19 @@ static void compare_xsave(u8 *from_host, u8 *from_guest) abort(); } -static void test_sync_vmsa(uint32_t type, uint64_t policy) +static void test_sync_vmsa(u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t gva; + gva_t gva; void *hva; double x87val = M_PI; struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 }; vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu); - gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, - MEM_REGION_TEST_DATA); + gva = vm_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, + MEM_REGION_TEST_DATA); hva = addr_gva2hva(vm, gva); vcpu_args_set(vcpu, 1, gva); @@ -150,7 +150,7 @@ static void test_sync_vmsa(uint32_t type, uint64_t policy) kvm_vm_free(vm); } -static void test_sev(void *guest_code, uint32_t type, uint64_t policy) +static void test_sev(void *guest_code, u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -201,7 +201,7 @@ static void guest_shutdown_code(void) __asm__ __volatile__("ud2"); } -static void test_sev_shutdown(uint32_t type, uint64_t policy) +static void test_sev_shutdown(u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -218,7 +218,7 @@ static void test_sev_shutdown(uint32_t type, uint64_t policy) kvm_vm_free(vm); } -static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy) +static void test_sev_smoke(void *guest, u32 type, u64 policy) { const u64 xf_mask = XFEATURE_MASK_X87_AVX; diff --git a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c index 0e8aec5680103d..3dca85e954781d 100644 --- a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c +++ b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c @@ -20,8 +20,8 @@ static void guest_code(bool tdp_enabled) { - uint64_t error_code; - uint64_t vector; + u64 error_code; + u64 vector; vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA)); @@ -47,8 +47,8 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; - uint64_t *hva; - uint64_t gpa; + u64 *hva; + gpa_t gpa; int rc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)); diff --git a/tools/testing/selftests/kvm/x86/smm_test.c b/tools/testing/selftests/kvm/x86/smm_test.c index ade8412bf94aac..740051167dbd44 100644 --- a/tools/testing/selftests/kvm/x86/smm_test.c +++ b/tools/testing/selftests/kvm/x86/smm_test.c @@ -34,13 +34,13 @@ * independent subset of asm here. * SMI handler always report back fixed stage SMRAM_STAGE. */ -uint8_t smi_handler[] = { +u8 smi_handler[] = { 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 0x0f, 0xaa, /* rsm */ }; -static inline void sync_with_host(uint64_t phase) +static inline void sync_with_host(u64 phase) { asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" : "+a" (phase)); @@ -65,7 +65,7 @@ static void guest_code(void *arg) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); + u64 apicbase = rdmsr(MSR_IA32_APICBASE); struct svm_test_data *svm = arg; struct vmx_pages *vmx_pages = arg; @@ -113,7 +113,7 @@ static void guest_code(void *arg) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct kvm_vcpu *vcpu; struct kvm_regs regs; diff --git a/tools/testing/selftests/kvm/x86/state_test.c b/tools/testing/selftests/kvm/x86/state_test.c index 992a52504a4ab5..409c6cc9f9214f 100644 --- a/tools/testing/selftests/kvm/x86/state_test.c +++ b/tools/testing/selftests/kvm/x86/state_test.c @@ -144,8 +144,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg) GUEST_SYNC(1); if (this_cpu_has(X86_FEATURE_XSAVE)) { - uint64_t supported_xcr0 = this_cpu_supported_xcr0(); - uint8_t buffer[PAGE_SIZE]; + u64 supported_xcr0 = this_cpu_supported_xcr0(); + u8 buffer[PAGE_SIZE]; memset(buffer, 0xcc, sizeof(buffer)); @@ -172,8 +172,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg) } if (this_cpu_has(X86_FEATURE_MPX)) { - uint64_t bounds[2] = { 10, 0xffffffffull }; - uint64_t output[2] = { }; + u64 bounds[2] = { 10, 0xffffffffull }; + u64 output[2] = { }; GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS); GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR); @@ -257,8 +257,8 @@ void check_nested_state(int stage, struct kvm_x86_state *state) int main(int argc, char *argv[]) { - uint64_t *xstate_bv, saved_xstate_bv; - vm_vaddr_t nested_gva = 0; + u64 *xstate_bv, saved_xstate_bv; + gva_t nested_gva = 0; struct kvm_cpuid2 empty_cpuid = {}; struct kvm_regs regs1, regs2; struct kvm_vcpu *vcpu, *vcpuN; @@ -331,7 +331,7 @@ int main(int argc, char *argv[]) * supported features, even if something goes awry in saving * the original snapshot. */ - xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512]; + xstate_bv = (void *)&((u8 *)state->xsave->region)[512]; saved_xstate_bv = *xstate_bv; vcpuN = __vm_vcpu_add(vm, vcpu->id + 1); diff --git a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c index 917b6066cfc123..d3cc5e4f788310 100644 --- a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c @@ -82,7 +82,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; struct ucall uc; diff --git a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c index ff99438824d3a3..7fbfaa054c952b 100644 --- a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c +++ b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c @@ -97,9 +97,9 @@ void test_lbrv_nested_state(bool nested_lbrv) { struct kvm_x86_state *state = NULL; struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; struct kvm_vm *vm; struct ucall uc; + gva_t svm_gva; pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled"); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c index a521a9eed06138..6a89eaffc65780 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c @@ -38,7 +38,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c index 00135cbba35eac..c6ea3d609a6298 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c @@ -42,7 +42,7 @@ static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c index 4bd1655f9e6d0d..f72f11d4c4f837 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c @@ -76,7 +76,7 @@ static void l2_guest_code_nmi(void) ud2(); } -static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt) +static void l1_guest_code(struct svm_test_data *svm, u64 is_nmi, u64 idt_alt) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; @@ -144,8 +144,8 @@ static void run_test(bool is_nmi) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t svm_gva; - vm_vaddr_t idt_alt_vm; + gva_t svm_gva; + gva_t idt_alt_vm; struct kvm_guest_debug debug; pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int"); @@ -161,14 +161,14 @@ static void run_test(bool is_nmi) if (!is_nmi) { void *idt, *idt_alt; - idt_alt_vm = vm_vaddr_alloc_page(vm); + idt_alt_vm = vm_alloc_page(vm); idt_alt = addr_gva2hva(vm, idt_alt_vm); idt = addr_gva2hva(vm, vm->arch.idt); memcpy(idt_alt, idt, getpagesize()); } else { idt_alt_vm = 0; } - vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); + vcpu_args_set(vcpu, 3, svm_gva, (u64)is_nmi, (u64)idt_alt_vm); memset(&debug, 0, sizeof(debug)); vcpu_guest_debug_set(vcpu, &debug); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c index 569869bed20b5c..a4935ce2fb9987 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c @@ -28,28 +28,28 @@ static void l2_code(void) vmcall(); } -static void l1_vmrun(struct svm_test_data *svm, u64 gpa) +static void l1_vmrun(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmload(struct svm_test_data *svm, u64 gpa) +static void l1_vmload(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmsave(struct svm_test_data *svm, u64 gpa) +static void l1_vmsave(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmexit(struct svm_test_data *svm, u64 gpa) +static void l1_vmexit(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); @@ -74,7 +74,7 @@ static u64 unmappable_gpa(struct kvm_vcpu *vcpu) static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct ucall uc; @@ -90,7 +90,7 @@ static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; vcpu_alloc_svm(vcpu->vm, &nested_gva); vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu)); @@ -103,7 +103,7 @@ static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu) { struct kvm_x86_state *state; - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct ucall uc; /* diff --git a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c index 8a62cca28cfbb3..b1887242f3b8e4 100644 --- a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c @@ -36,7 +36,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/sync_regs_test.c b/tools/testing/selftests/kvm/x86/sync_regs_test.c index 8fa3948b0170e1..e0c52321f87c43 100644 --- a/tools/testing/selftests/kvm/x86/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86/sync_regs_test.c @@ -20,7 +20,7 @@ #include "kvm_util.h" #include "processor.h" -#define UCALL_PIO_PORT ((uint16_t)0x1000) +#define UCALL_PIO_PORT ((u16)0x1000) struct ucall uc_none = { .cmd = UCALL_NONE, diff --git a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c index 56306a19144a7d..f1c488e0d4975f 100644 --- a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c +++ b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c @@ -72,13 +72,13 @@ int main(void) if (has_vmx) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx); vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva); } else { - vm_vaddr_t svm_gva; + gva_t svm_gva; vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm); vcpu_alloc_svm(vm, &svm_gva); diff --git a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c index 12b0964f4f1319..91583969a14fbb 100644 --- a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c @@ -95,7 +95,7 @@ int main(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t val; + u64 val; ksft_print_header(); ksft_set_plan(5); diff --git a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c index 59c7304f805efd..59da8d4da6079c 100644 --- a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c +++ b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c @@ -21,10 +21,10 @@ pthread_spinlock_t create_lock; #define TEST_TSC_KHZ 2345678UL #define TEST_TSC_OFFSET 200000000 -uint64_t tsc_sync; +u64 tsc_sync; static void guest_code(void) { - uint64_t start_tsc, local_tsc, tmp; + u64 start_tsc, local_tsc, tmp; start_tsc = rdtsc(); do { diff --git a/tools/testing/selftests/kvm/x86/ucna_injection_test.c b/tools/testing/selftests/kvm/x86/ucna_injection_test.c index 1e5e564523b345..df1ec8209c7693 100644 --- a/tools/testing/selftests/kvm/x86/ucna_injection_test.c +++ b/tools/testing/selftests/kvm/x86/ucna_injection_test.c @@ -45,7 +45,7 @@ #define MCI_CTL2_RESERVED_BIT BIT_ULL(29) -static uint64_t supported_mcg_caps; +static u64 supported_mcg_caps; /* * Record states about the injected UCNA. @@ -53,30 +53,30 @@ static uint64_t supported_mcg_caps; * handler. Variables without the 'i_' prefixes are recorded in guest main * execution thread. */ -static volatile uint64_t i_ucna_rcvd; -static volatile uint64_t i_ucna_addr; -static volatile uint64_t ucna_addr; -static volatile uint64_t ucna_addr2; +static volatile u64 i_ucna_rcvd; +static volatile u64 i_ucna_addr; +static volatile u64 ucna_addr; +static volatile u64 ucna_addr2; struct thread_params { struct kvm_vcpu *vcpu; - uint64_t *p_i_ucna_rcvd; - uint64_t *p_i_ucna_addr; - uint64_t *p_ucna_addr; - uint64_t *p_ucna_addr2; + u64 *p_i_ucna_rcvd; + u64 *p_i_ucna_addr; + u64 *p_ucna_addr; + u64 *p_ucna_addr2; }; static void verify_apic_base_addr(void) { - uint64_t msr = rdmsr(MSR_IA32_APICBASE); - uint64_t base = GET_APIC_BASE(msr); + u64 msr = rdmsr(MSR_IA32_APICBASE); + u64 base = GET_APIC_BASE(msr); GUEST_ASSERT(base == APIC_DEFAULT_GPA); } static void ucna_injection_guest_code(void) { - uint64_t ctl2; + u64 ctl2; verify_apic_base_addr(); xapic_enable(); @@ -106,7 +106,7 @@ static void ucna_injection_guest_code(void) static void cmci_disabled_guest_code(void) { - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN); GUEST_DONE(); @@ -114,7 +114,7 @@ static void cmci_disabled_guest_code(void) static void cmci_enabled_guest_code(void) { - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT); GUEST_DONE(); @@ -145,14 +145,15 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu) printf("vCPU received GP in guest.\n"); } -static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) { +static void inject_ucna(struct kvm_vcpu *vcpu, u64 addr) +{ /* * A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in * the IA32_MCi_STATUS register. * MSCOD=1 (BIT[16] - MscodDataRdErr). * MCACOD=0x0090 (Memory controller error format, channel 0) */ - uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | + u64 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090; struct kvm_x86_mce mce = {}; mce.status = status; @@ -216,10 +217,10 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par { struct kvm_vm *vm = vcpu->vm; params->vcpu = vcpu; - params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd); - params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr); - params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr); - params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2); + params->p_i_ucna_rcvd = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_rcvd); + params->p_i_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_addr); + params->p_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr); + params->p_ucna_addr2 = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr2); run_ucna_injection(params); @@ -242,7 +243,7 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p) { - uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; + u64 mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; if (enable_cmci_p) mcg_caps |= MCG_CMCI_P; @@ -250,7 +251,7 @@ static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p) vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps); } -static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid, +static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, u32 vcpuid, bool enable_cmci_p, void *guest_code) { struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code); diff --git a/tools/testing/selftests/kvm/x86/userspace_io_test.c b/tools/testing/selftests/kvm/x86/userspace_io_test.c index be7d72f3c029ff..9c5a87576c2e95 100644 --- a/tools/testing/selftests/kvm/x86/userspace_io_test.c +++ b/tools/testing/selftests/kvm/x86/userspace_io_test.c @@ -10,7 +10,7 @@ #include "kvm_util.h" #include "processor.h" -static void guest_ins_port80(uint8_t *buffer, unsigned int count) +static void guest_ins_port80(u8 *buffer, unsigned int count) { unsigned long end; @@ -26,7 +26,7 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count) static void guest_code(void) { - uint8_t buffer[8192]; + u8 buffer[8192]; int i; /* diff --git a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c index 8463a995641051..2808ce727e5f59 100644 --- a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c @@ -23,21 +23,21 @@ struct kvm_msr_filter filter_allow = { .nmsrs = 1, /* Test an MSR the kernel knows about. */ .base = MSR_IA32_XSS, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, { .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE, .nmsrs = 1, /* Test an MSR the kernel doesn't know about. */ .base = MSR_IA32_FLUSH_CMD, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, { .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE, .nmsrs = 1, /* Test a fabricated MSR that no one knows about. */ .base = MSR_NON_EXISTENT, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; @@ -49,7 +49,7 @@ struct kvm_msr_filter filter_fs = { .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = MSR_FS_BASE, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; @@ -61,12 +61,12 @@ struct kvm_msr_filter filter_gs = { .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = MSR_GS_BASE, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; -static uint64_t msr_non_existent_data; +static u64 msr_non_existent_data; static int guest_exception_count; static u32 msr_reads, msr_writes; @@ -77,7 +77,7 @@ static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE]; static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE]; static u8 bitmap_deadbeef[1] = { 0x1 }; -static void deny_msr(uint8_t *bitmap, u32 msr) +static void deny_msr(u8 *bitmap, u32 msr) { u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); @@ -142,26 +142,26 @@ struct kvm_msr_filter no_filter_deny = { * Note: Force test_rdmsr() to not be inlined to prevent the labels, * rdmsr_start and rdmsr_end, from being defined multiple times. */ -static noinline uint64_t test_rdmsr(uint32_t msr) +static noinline u64 test_rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; guest_exception_count = 0; __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } /* * Note: Force test_wrmsr() to not be inlined to prevent the labels, * wrmsr_start and wrmsr_end, from being defined multiple times. */ -static noinline void test_wrmsr(uint32_t msr, uint64_t value) +static noinline void test_wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; guest_exception_count = 0; @@ -176,26 +176,26 @@ extern char wrmsr_start, wrmsr_end; * Note: Force test_em_rdmsr() to not be inlined to prevent the labels, * rdmsr_start and rdmsr_end, from being defined multiple times. */ -static noinline uint64_t test_em_rdmsr(uint32_t msr) +static noinline u64 test_em_rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; guest_exception_count = 0; __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } /* * Note: Force test_em_wrmsr() to not be inlined to prevent the labels, * wrmsr_start and wrmsr_end, from being defined multiple times. */ -static noinline void test_em_wrmsr(uint32_t msr, uint64_t value) +static noinline void test_em_wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; guest_exception_count = 0; @@ -208,7 +208,7 @@ extern char em_wrmsr_start, em_wrmsr_end; static void guest_code_filter_allow(void) { - uint64_t data; + u64 data; /* * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS. @@ -328,7 +328,7 @@ static void guest_code_filter_deny(void) static void guest_code_permission_bitmap(void) { - uint64_t data; + u64 data; data = test_rdmsr(MSR_FS_BASE); GUEST_ASSERT(data == MSR_FS_BASE); @@ -391,7 +391,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu) } } -static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void process_rdmsr(struct kvm_vcpu *vcpu, u32 msr_index) { struct kvm_run *run = vcpu->run; @@ -423,7 +423,7 @@ static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) } } -static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void process_wrmsr(struct kvm_vcpu *vcpu, u32 msr_index) { struct kvm_run *run = vcpu->run; @@ -464,7 +464,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu) uc.cmd, UCALL_DONE); } -static uint64_t process_ucall(struct kvm_vcpu *vcpu) +static u64 process_ucall(struct kvm_vcpu *vcpu) { struct ucall uc = {}; @@ -489,20 +489,20 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu) } static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, - uint32_t msr_index) + u32 msr_index) { vcpu_run(vcpu); process_rdmsr(vcpu, msr_index); } static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, - uint32_t msr_index) + u32 msr_index) { vcpu_run(vcpu); process_wrmsr(vcpu, msr_index); } -static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu) +static u64 run_guest_then_process_ucall(struct kvm_vcpu *vcpu) { vcpu_run(vcpu); return process_ucall(vcpu); @@ -519,7 +519,7 @@ KVM_ONE_VCPU_TEST_SUITE(user_msr); KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow) { struct kvm_vm *vm = vcpu->vm; - uint64_t cmd; + u64 cmd; int rc; rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); @@ -732,7 +732,7 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm) .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = 0, - .bitmap = (uint8_t *)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; diff --git a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c index a81a24761aac07..1720113eae7990 100644 --- a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c @@ -38,7 +38,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -72,7 +72,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa) int main(int argc, char *argv[]) { unsigned long apic_access_addr = ~0ul; - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; unsigned long high_gpa; struct vmx_pages *vmx; bool done = false; diff --git a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c index 337c53fddeff2e..80a4fd1e5bbbeb 100644 --- a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c @@ -33,7 +33,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -110,7 +110,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; struct vmx_pages *vmx; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c index a100ee5f000936..a2eaceed9ad520 100644 --- a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c +++ b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c @@ -52,7 +52,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; struct kvm_run *run; diff --git a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c index 90720b6205f4ec..c1e8632a1bb6b5 100644 --- a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c @@ -12,11 +12,10 @@ #include "kvm_util.h" #include "vmx.h" -static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, - uint64_t mask) +static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) { - uint64_t val = vcpu_get_msr(vcpu, msr_index); - uint64_t bit; + u64 val = vcpu_get_msr(vcpu, msr_index); + u64 bit; mask &= val; @@ -26,11 +25,10 @@ static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, } } -static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, - uint64_t mask) +static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) { - uint64_t val = vcpu_get_msr(vcpu, msr_index); - uint64_t bit; + u64 val = vcpu_get_msr(vcpu, msr_index); + u64 bit; mask = ~mask | val; @@ -40,7 +38,7 @@ static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, } } -static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index) { vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0)); vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32)); @@ -68,10 +66,10 @@ static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu) } static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu, - uint64_t msr_bit, + u64 msr_bit, struct kvm_x86_cpu_feature feature) { - uint64_t val; + u64 val; vcpu_clear_cpuid_feature(vcpu, feature); @@ -90,7 +88,7 @@ static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu, static void ia32_feature_control_msr_test(struct kvm_vcpu *vcpu) { - uint64_t supported_bits = FEAT_CTL_LOCKED | + u64 supported_bits = FEAT_CTL_LOCKED | FEAT_CTL_VMX_ENABLED_INSIDE_SMX | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | FEAT_CTL_SGX_LC_ENABLED | diff --git a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c index 915c42001dbaa2..f13dee31738378 100644 --- a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c @@ -30,7 +30,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; u64 guest_cr4; - vm_paddr_t pml5_pa, pml4_pa; + gpa_t pml5_pa, pml4_pa; u64 *pml5; u64 exit_reason; @@ -73,7 +73,7 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + gva_t vmx_pages_gva = 0; struct kvm_vm *vm; struct kvm_vcpu *vcpu; struct kvm_x86_state *state; diff --git a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c index 7ff6f62e20a360..d004108dbdc638 100644 --- a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c @@ -52,16 +52,16 @@ static const union perf_capabilities format_caps = { .pebs_format = -1, }; -static void guest_test_perf_capabilities_gp(uint64_t val) +static void guest_test_perf_capabilities_gp(u64 val) { - uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); + u8 vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); __GUEST_ASSERT(vector == GP_VECTOR, "Expected #GP for value '0x%lx', got %s", val, ex_str(vector)); } -static void guest_code(uint64_t current_val) +static void guest_code(u64 current_val) { int i; @@ -129,7 +129,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code) KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) { - const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; + const u64 fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; int bit; for_each_set_bit(bit, &fungible_caps, 64) { @@ -148,7 +148,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code) { - const uint64_t reserved_caps = (~host_cap.capabilities | + const u64 reserved_caps = (~host_cap.capabilities | immutable_caps.capabilities) & ~format_caps.capabilities; union perf_capabilities val = host_cap; @@ -210,7 +210,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code) KVM_ONE_VCPU_TEST(vmx_pmu_caps, perf_capabilities_unsupported, guest_code) { - uint64_t val; + u64 val; int i, r; vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); diff --git a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c index 00dd2ac07a61e7..1b7b6ba23de762 100644 --- a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c @@ -152,7 +152,7 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + gva_t vmx_pages_gva = 0; struct kvm_regs regs1, regs2; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c index ae4a4b6c05cad7..39ce9a9369f5b3 100644 --- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c @@ -48,20 +48,20 @@ * Incremented in the IPI handler. Provides evidence to the sender that the IPI * arrived at the destination */ -static volatile uint64_t ipis_rcvd; +static volatile u64 ipis_rcvd; /* Data struct shared between host main thread and vCPUs */ struct test_data_page { - uint32_t halter_apic_id; - volatile uint64_t hlt_count; - volatile uint64_t wake_count; - uint64_t ipis_sent; - uint64_t migrations_attempted; - uint64_t migrations_completed; - uint32_t icr; - uint32_t icr2; - uint32_t halter_tpr; - uint32_t halter_ppr; + u32 halter_apic_id; + volatile u64 hlt_count; + volatile u64 wake_count; + u64 ipis_sent; + u64 migrations_attempted; + u64 migrations_completed; + u32 icr; + u32 icr2; + u32 halter_tpr; + u32 halter_ppr; /* * Record local version register as a cross-check that APIC access @@ -69,19 +69,19 @@ struct test_data_page { * arch/x86/kvm/lapic.c). If test is failing, check that values match * to determine whether APIC access exits are working. */ - uint32_t halter_lvr; + u32 halter_lvr; }; struct thread_params { struct test_data_page *data; struct kvm_vcpu *vcpu; - uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */ + u64 *pipis_rcvd; /* host address of ipis_rcvd global */ }; void verify_apic_base_addr(void) { - uint64_t msr = rdmsr(MSR_IA32_APICBASE); - uint64_t base = GET_APIC_BASE(msr); + u64 msr = rdmsr(MSR_IA32_APICBASE); + u64 base = GET_APIC_BASE(msr); GUEST_ASSERT(base == APIC_DEFAULT_GPA); } @@ -125,12 +125,12 @@ static void guest_ipi_handler(struct ex_regs *regs) static void sender_guest_code(struct test_data_page *data) { - uint64_t last_wake_count; - uint64_t last_hlt_count; - uint64_t last_ipis_rcvd_count; - uint32_t icr_val; - uint32_t icr2_val; - uint64_t tsc_start; + u64 last_wake_count; + u64 last_hlt_count; + u64 last_ipis_rcvd_count; + u32 icr_val; + u32 icr2_val; + u64 tsc_start; verify_apic_base_addr(); xapic_enable(); @@ -248,7 +248,7 @@ static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu) } void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, - uint64_t *pipis_rcvd) + u64 *pipis_rcvd) { long pages_not_moved; unsigned long nodemask = 0; @@ -259,9 +259,9 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, int i; int from, to; unsigned long bit; - uint64_t hlt_count; - uint64_t wake_count; - uint64_t ipis_sent; + u64 hlt_count; + u64 wake_count; + u64 ipis_sent; fprintf(stderr, "Calling migrate_pages every %d microseconds\n", delay_usecs); @@ -393,12 +393,12 @@ int main(int argc, char *argv[]) int run_secs = 0; int delay_usecs = 0; struct test_data_page *data; - vm_vaddr_t test_data_page_vaddr; + gva_t test_data_page_gva; bool migrate = false; pthread_t threads[2]; struct thread_params params[2]; struct kvm_vm *vm; - uint64_t *pipis_rcvd; + u64 *pipis_rcvd; get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs); if (run_secs <= 0) @@ -414,16 +414,16 @@ int main(int argc, char *argv[]) params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code); - test_data_page_vaddr = vm_vaddr_alloc_page(vm); - data = addr_gva2hva(vm, test_data_page_vaddr); + test_data_page_gva = vm_alloc_page(vm); + data = addr_gva2hva(vm, test_data_page_gva); memset(data, 0, sizeof(*data)); params[0].data = data; params[1].data = data; - vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr); - vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr); + vcpu_args_set(params[0].vcpu, 1, test_data_page_gva); + vcpu_args_set(params[1].vcpu, 1, test_data_page_gva); - pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); + pipis_rcvd = (u64 *)addr_gva2hva(vm, (u64)&ipis_rcvd); params[0].pipis_rcvd = pipis_rcvd; params[1].pipis_rcvd = pipis_rcvd; diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c index 0c5e12f5f14ed5..637bb90c1d9395 100644 --- a/tools/testing/selftests/kvm/x86/xapic_state_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c @@ -23,7 +23,7 @@ static void xapic_guest_code(void) xapic_enable(); while (1) { - uint64_t val = (u64)xapic_read_reg(APIC_IRR) | + u64 val = (u64)xapic_read_reg(APIC_IRR) | (u64)xapic_read_reg(APIC_IRR + 0x10) << 32; xapic_write_reg(APIC_ICR2, val >> 32); @@ -43,7 +43,7 @@ static void x2apic_guest_code(void) x2apic_enable(); do { - uint64_t val = x2apic_read_reg(APIC_IRR) | + u64 val = x2apic_read_reg(APIC_IRR) | x2apic_read_reg(APIC_IRR + 0x10) << 32; if (val & X2APIC_RSVD_BITS_MASK) { @@ -56,12 +56,12 @@ static void x2apic_guest_code(void) } while (1); } -static void ____test_icr(struct xapic_vcpu *x, uint64_t val) +static void ____test_icr(struct xapic_vcpu *x, u64 val) { struct kvm_vcpu *vcpu = x->vcpu; struct kvm_lapic_state xapic; struct ucall uc; - uint64_t icr; + u64 icr; /* * Tell the guest what ICR value to write. Use the IRR to pass info, @@ -93,7 +93,7 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val) TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); } -static void __test_icr(struct xapic_vcpu *x, uint64_t val) +static void __test_icr(struct xapic_vcpu *x, u64 val) { /* * The BUSY bit is reserved on both AMD and Intel, but only AMD treats @@ -109,7 +109,7 @@ static void __test_icr(struct xapic_vcpu *x, uint64_t val) static void test_icr(struct xapic_vcpu *x) { struct kvm_vcpu *vcpu = x->vcpu; - uint64_t icr, i, j; + u64 icr, i, j; icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED; for (i = 0; i <= 0xff; i++) @@ -142,9 +142,9 @@ static void test_icr(struct xapic_vcpu *x) __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK); } -static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) +static void __test_apic_id(struct kvm_vcpu *vcpu, u64 apic_base) { - uint32_t apic_id, expected; + u32 apic_id, expected; struct kvm_lapic_state xapic; vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base); @@ -170,9 +170,9 @@ static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) */ static void test_apic_id(void) { - const uint32_t NR_VCPUS = 3; + const u32 NR_VCPUS = 3; struct kvm_vcpu *vcpus[NR_VCPUS]; - uint64_t apic_base; + u64 apic_base; struct kvm_vm *vm; int i; diff --git a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c index 3862134d9d40da..ab25db2235d538 100644 --- a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c @@ -58,7 +58,7 @@ static void tpr_guest_irq_queue(void) if (is_x2apic) { x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR); } else { - uint32_t icr, icr2; + u32 icr, icr2; icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | IRQ_VECTOR; @@ -69,9 +69,9 @@ static void tpr_guest_irq_queue(void) } } -static uint8_t tpr_guest_tpr_get(void) +static u8 tpr_guest_tpr_get(void) { - uint32_t taskpri; + u32 taskpri; if (is_x2apic) taskpri = x2apic_read_reg(APIC_TASKPRI); @@ -81,9 +81,9 @@ static uint8_t tpr_guest_tpr_get(void) return GET_APIC_PRI(taskpri); } -static uint8_t tpr_guest_ppr_get(void) +static u8 tpr_guest_ppr_get(void) { - uint32_t procpri; + u32 procpri; if (is_x2apic) procpri = x2apic_read_reg(APIC_PROCPRI); @@ -93,9 +93,9 @@ static uint8_t tpr_guest_ppr_get(void) return GET_APIC_PRI(procpri); } -static uint8_t tpr_guest_cr8_get(void) +static u8 tpr_guest_cr8_get(void) { - uint64_t cr8; + u64 cr8; asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8)); @@ -104,7 +104,7 @@ static uint8_t tpr_guest_cr8_get(void) static void tpr_guest_check_tpr_ppr_cr8_equal(void) { - uint8_t tpr; + u8 tpr; tpr = tpr_guest_tpr_get(); @@ -157,19 +157,19 @@ static void tpr_guest_code(void) GUEST_DONE(); } -static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic) +static u8 lapic_tpr_get(struct kvm_lapic_state *xapic) { return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI])); } -static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val) +static void lapic_tpr_set(struct kvm_lapic_state *xapic, u8 val) { u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI]; *taskpri = SET_APIC_PRI(*taskpri, val); } -static uint8_t sregs_tpr(struct kvm_sregs *sregs) +static u8 sregs_tpr(struct kvm_sregs *sregs) { return sregs->cr8 & GENMASK(3, 0); } @@ -197,7 +197,7 @@ static void test_tpr_check_tpr_cr8_equal(struct kvm_vcpu *vcpu) static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask) { struct kvm_lapic_state xapic; - uint8_t tpr; + u8 tpr; static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number"); tpr = IRQ_VECTOR / 16; diff --git a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c index d038c1571729c9..40dc9e6b3fad3a 100644 --- a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c @@ -21,7 +21,7 @@ */ #define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \ do { \ - uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ + u64 __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ \ __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \ __supported == ((xfeatures) | (dependencies)), \ @@ -39,7 +39,7 @@ do { \ */ #define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \ do { \ - uint64_t __supported = (supported_xcr0) & (xfeatures); \ + u64 __supported = (supported_xcr0) & (xfeatures); \ \ __GUEST_ASSERT(!__supported || __supported == (xfeatures), \ "supported = 0x%lx, xfeatures = 0x%llx", \ @@ -48,8 +48,8 @@ do { \ static void guest_code(void) { - uint64_t initial_xcr0; - uint64_t supported_xcr0; + u64 initial_xcr0; + u64 supported_xcr0; int i, vector; set_cr4(get_cr4() | X86_CR4_OSXSAVE); diff --git a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c index 23909b501ac27c..5076f6a7545575 100644 --- a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c @@ -116,15 +116,15 @@ struct pvclock_wall_clock { } __attribute__((__packed__)); struct vcpu_runstate_info { - uint32_t state; - uint64_t state_entry_time; - uint64_t time[5]; /* Extra field for overrun check */ + u32 state; + u64 state_entry_time; + u64 time[5]; /* Extra field for overrun check */ }; struct compat_vcpu_runstate_info { - uint32_t state; - uint64_t state_entry_time; - uint64_t time[5]; + u32 state; + u64 state_entry_time; + u64 time[5]; } __attribute__((__packed__)); struct arch_vcpu_info { @@ -133,8 +133,8 @@ struct arch_vcpu_info { }; struct vcpu_info { - uint8_t evtchn_upcall_pending; - uint8_t evtchn_upcall_mask; + u8 evtchn_upcall_pending; + u8 evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct pvclock_vcpu_time_info time; @@ -145,7 +145,7 @@ struct shared_info { unsigned long evtchn_pending[64]; unsigned long evtchn_mask[64]; struct pvclock_wall_clock wc; - uint32_t wc_sec_hi; + u32 wc_sec_hi; /* arch_shared_info here */ }; @@ -658,7 +658,7 @@ int main(int argc, char *argv[]) printf("Testing RUNSTATE_ADJUST\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST; memset(&rst.u, 0, sizeof(rst.u)); - rst.u.runstate.state = (uint64_t)-1; + rst.u.runstate.state = (u64)-1; rst.u.runstate.time_blocked = 0x5a - rs->time[RUNSTATE_blocked]; rst.u.runstate.time_offline = @@ -1113,7 +1113,7 @@ int main(int argc, char *argv[]) /* Don't change the address, just trigger a write */ struct kvm_xen_vcpu_attr adj = { .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST, - .u.runstate.state = (uint64_t)-1 + .u.runstate.state = (u64)-1 }; vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj); diff --git a/tools/testing/selftests/kvm/x86/xss_msr_test.c b/tools/testing/selftests/kvm/x86/xss_msr_test.c index f331a4e9bae3b4..12c63df6bbcebd 100644 --- a/tools/testing/selftests/kvm/x86/xss_msr_test.c +++ b/tools/testing/selftests/kvm/x86/xss_msr_test.c @@ -17,7 +17,7 @@ int main(int argc, char *argv[]) bool xss_in_msr_list; struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t xss_val; + u64 xss_val; int i, r; /* Create VM */ diff --git a/tools/testing/selftests/mm/config b/tools/testing/selftests/mm/config index 1dbe2b4558ab9d..06f78bd232e2e3 100644 --- a/tools/testing/selftests/mm/config +++ b/tools/testing/selftests/mm/config @@ -13,3 +13,4 @@ CONFIG_PROFILING=y CONFIG_UPROBES=y CONFIG_MEMORY_FAILURE=y CONFIG_HWPOISON_INJECT=m +CONFIG_PROC_MEM_ALWAYS_FORCE=y diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile index 789037be44c7f3..5d2dffca0e918e 100644 --- a/tools/testing/selftests/sched_ext/Makefile +++ b/tools/testing/selftests/sched_ext/Makefile @@ -175,6 +175,7 @@ auto-test-targets := \ maximal \ maybe_null \ minimal \ + non_scx_kfunc_deny \ numa \ allowed_cpus \ peek_dsq \ diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c new file mode 100644 index 00000000000000..9f16d39255e734 --- /dev/null +++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are + * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs, + * such as TCP congestion control programs, should be rejected by the BPF + * verifier when attempting to call these kfuncs. + * + * Copyright (C) 2026 Ching-Chun (Jim) Huang + * Copyright (C) 2026 Cheng-Yang Chou + */ + +#include +#include +#include + +/* SCX kfunc from scx_kfunc_ids_any set */ +void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; + +SEC("struct_ops/ssthresh") +__u32 BPF_PROG(tcp_ca_ssthresh, struct sock *sk) +{ + /* + * This call should be rejected by the verifier because this is a + * TCP congestion control program (non-SCX struct_ops). + */ + scx_bpf_kick_cpu(0, 0); + return 2; +} + +SEC("struct_ops/cong_avoid") +void BPF_PROG(tcp_ca_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) {} + +SEC("struct_ops/undo_cwnd") +__u32 BPF_PROG(tcp_ca_undo_cwnd, struct sock *sk) { return 2; } + +SEC(".struct_ops") +struct tcp_congestion_ops tcp_non_scx_ca = { + .ssthresh = (void *)tcp_ca_ssthresh, + .cong_avoid = (void *)tcp_ca_cong_avoid, + .undo_cwnd = (void *)tcp_ca_undo_cwnd, + .name = "tcp_kfunc_deny", +}; + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c new file mode 100644 index 00000000000000..1c031575fb87be --- /dev/null +++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are + * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs, + * such as TCP congestion control programs, should be rejected by the BPF + * verifier when attempting to call these kfuncs. + * + * Copyright (C) 2026 Ching-Chun (Jim) Huang + * Copyright (C) 2026 Cheng-Yang Chou + */ + +#include +#include +#include +#include +#include +#include "non_scx_kfunc_deny.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status run(void *ctx) +{ + struct non_scx_kfunc_deny *skel; + int err; + + skel = non_scx_kfunc_deny__open(); + if (!skel) { + SCX_ERR("Failed to open skel"); + return SCX_TEST_FAIL; + } + + err = non_scx_kfunc_deny__load(skel); + non_scx_kfunc_deny__destroy(skel); + + if (err == 0) { + SCX_ERR("non-SCX BPF program loaded when it should have been rejected"); + return SCX_TEST_FAIL; + } + + return SCX_TEST_PASS; +} + +struct scx_test non_scx_kfunc_deny = { + .name = "non_scx_kfunc_deny", + .description = "Verify that non-SCX struct_ops programs cannot call SCX kfuncs", + .run = run, +}; +REGISTER_SCX_TEST(&non_scx_kfunc_deny) diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json index 557fb074acf0ca..cd19d05925e40f 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json @@ -302,5 +302,31 @@ "$TC qdisc del dev $ETH root", "echo \"1\" > /sys/bus/netdevsim/del_device" ] + }, + { + "id": "c7e1", + "name": "Class dump after graft and delete of explicit child qdisc", + "category": [ + "qdisc", + "taprio" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "echo \"1 1 8\" > /sys/bus/netdevsim/new_device", + "$TC qdisc replace dev $ETH handle 8001: parent root taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 clockid CLOCK_TAI", + "$TC qdisc add dev $ETH parent 8001:1 handle 8002: pfifo", + "$TC qdisc del dev $ETH parent 8001:1 handle 8002:" + ], + "cmdUnderTest": "$TC class show dev $ETH", + "expExitCode": "0", + "verifyCmd": "$TC class show dev $ETH", + "matchPattern": "class taprio 8001:[0-9]+ root", + "matchCount": "8", + "teardown": [ + "$TC qdisc del dev $ETH root", + "echo \"1\" > /sys/bus/netdevsim/del_device" + ] } ] diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h index b4864aad2db0f8..9e0dfd3a85b0e2 100644 --- a/tools/testing/vma/include/dup.h +++ b/tools/testing/vma/include/dup.h @@ -1330,7 +1330,7 @@ static inline int __compat_vma_mmap(struct vm_area_desc *desc, /* Update the VMA from the descriptor. */ compat_set_vma_from_desc(vma, desc); /* Complete any specified mmap actions. */ - return mmap_action_complete(vma, &desc->action); + return mmap_action_complete(vma, &desc->action, /*is_compat=*/true); } static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma) diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h index a30b8bc8495570..64164e25658fa6 100644 --- a/tools/testing/vma/include/stubs.h +++ b/tools/testing/vma/include/stubs.h @@ -87,7 +87,8 @@ static inline int mmap_action_prepare(struct vm_area_desc *desc) } static inline int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action) + struct mmap_action *action, + bool is_compat) { return 0; }