diff --git a/.mailmap b/.mailmap index 428d721ffbb168..da4afd2b241528 100644 --- a/.mailmap +++ b/.mailmap @@ -34,6 +34,7 @@ Alexander Lobakin Alexander Lobakin Alexander Mikhalitsyn Alexander Mikhalitsyn +Alexander Mikhalitsyn Alexander Sverdlin Alexander Sverdlin Alexander Sverdlin @@ -786,7 +787,8 @@ Subash Abhinov Kasiviswanathan Subhash Jadavani Sudarshan Rajagopalan -Sudeep Holla Sudeep KarkadaNagesha +Sudeep Holla Sudeep KarkadaNagesha +Sudeep Holla Sumit Garg Sumit Semwal Surabhi Vishnoi @@ -851,6 +853,7 @@ Valentin Schneider Veera Sundaram Sankaran Veerabhadrarao Badiganti Venkateswara Naralasetty +Viacheslav Bocharov Vikash Garodia Vikash Garodia Vincent Mailhol diff --git a/Documentation/ABI/testing/sysfs-class-tsm b/Documentation/ABI/testing/sysfs-class-tsm index 6fc1a5ac6da1af..2949468deaf72c 100644 --- a/Documentation/ABI/testing/sysfs-class-tsm +++ b/Documentation/ABI/testing/sysfs-class-tsm @@ -7,13 +7,3 @@ Description: signals when the PCI layer is able to support establishment of link encryption and other device-security features coordinated through a platform tsm. - -What: /sys/class/tsm/tsmN/streamH.R.E -Contact: linux-pci@vger.kernel.org -Description: - (RO) When a host bridge has established a secure connection via - the platform TSM, symlink appears. The primary function of this - is have a system global review of TSM resource consumption - across host bridges. The link points to the endpoint PCI device - and matches the same link published by the host bridge. See - Documentation/ABI/testing/sysfs-devices-pci-host-bridge. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1058f2a6d6a8c2..aa0031108bc1da 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3472,6 +3472,11 @@ Kernel parameters If there are multiple matching configurations changing the same attribute, the last one is used. + liveupdate= [KNL,EARLY] + Format: + Enable Live Update Orchestrator (LUO). + Default: off. + load_ramdisk= [RAM] [Deprecated] lockd.nlm_grace_period=P [NFS] Assign grace period. diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 245bf63949356c..06d0ebddeefa21 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -231,6 +231,8 @@ eventually gets pushed out to disk. This tunable is used to define when dirty inode is old enough to be eligible for writeback by the kernel flusher threads. And, it is also used as the interval to wakeup dirtytime_writeback thread. +Setting this to zero disables periodic dirtytime writeback. + dirty_writeback_centisecs ========================= diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml index 51bad2e8d6f1f7..4f9013d3687499 100644 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml @@ -88,7 +88,7 @@ patternProperties: pcie1_clkreq, pcie1_wakeup, pmic0, pmic1, ptp, ptp_clk, ptp_trig, pwm0, pwm1, pwm2, pwm3, rgmii, sdio0, sdio_sb, smi, spi_cs1, spi_cs2, spi_cs3, spi_quad, uart1, uart2, - usb2_drvvbus1, usb32_drvvbus ] + usb2_drvvbus1, usb32_drvvbus0 ] function: enum: [ drvbus, emmc, gpio, i2c, jtag, led, mii, mii_err, onewire, diff --git a/Documentation/devicetree/bindings/sound/awinic,aw87390.yaml b/Documentation/devicetree/bindings/sound/awinic,aw87390.yaml index ba9d8767c5d597..9c1baae767c4d1 100644 --- a/Documentation/devicetree/bindings/sound/awinic,aw87390.yaml +++ b/Documentation/devicetree/bindings/sound/awinic,aw87390.yaml @@ -15,12 +15,15 @@ description: sound quallity, which is a new high efficiency, low noise, constant large volume, 6th Smart K audio amplifier. -allOf: - - $ref: dai-common.yaml# - properties: compatible: - const: awinic,aw87390 + oneOf: + - enum: + - awinic,aw87390 + - items: + - enum: + - anbernic,rgds-amp + - const: awinic,aw87391 reg: maxItems: 1 @@ -40,10 +43,31 @@ required: - compatible - reg - "#sound-dai-cells" - - awinic,audio-channel unevaluatedProperties: false +allOf: + - $ref: dai-common.yaml# + - if: + properties: + compatible: + contains: + enum: + - awinic,aw87390 + then: + required: + - awinic,audio-channel + + - if: + properties: + compatible: + contains: + enum: + - anbernic,rgds-amp + then: + properties: + vdd-supply: true + examples: - | i2c { diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml index beef193aaaeba0..87559d0d079a7d 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml +++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml @@ -40,11 +40,33 @@ properties: tdm-slots: $ref: /schemas/types.yaml#/definitions/uint32 description: - number of channels over one serializer - the property is ignored in DIT mode + Number of channels over one serializer. This property + specifies the TX playback TDM slot count, along with default RX slot count + if tdm-slots-rx is not specified. + The property is ignored in DIT mode. minimum: 2 maximum: 32 + tdm-slots-rx: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Number of RX capture channels over one serializer. If specified, + allows independent RX TDM slot count separate from TX. Requires + ti,async-mode to be enabled for independent TX/RX clock rates. + The property is ignored in DIT mode. + minimum: 2 + maximum: 32 + + ti,async-mode: + description: + Specify to allow independent TX & RX clocking, + to enable audio playback & record with different sampling rate, + and different number of bits per frame. + if property is omitted, TX and RX will share same bit clock and frame clock signals, + thus RX need to use same bits per frame and sampling rate as TX in synchronous mode. + the property is ignored in DIT mode (as DIT is TX-only) + type: boolean + serial-dir: description: A list of serializer configuration @@ -125,7 +147,21 @@ properties: auxclk-fs-ratio: $ref: /schemas/types.yaml#/definitions/uint32 - description: ratio of AUCLK and FS rate if applicable + description: + Ratio of AUCLK and FS rate if applicable. This property specifies + the TX ratio, along with default RX ratio if auxclk-fs-ratio-rx + is not specified. + When not specified, the inputted system clock frequency via set_sysclk + callback by the machine driver is used for divider calculation. + + auxclk-fs-ratio-rx: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Ratio of AUCLK and FS rate for RX. If specified, allows + for a different RX ratio. Requires ti,async-mode to be + enabled when the ratio differs from auxclk-fs-ratio. + When not specified, it defaults to the value of auxclk-fs-ratio. + The property is ignored in DIT mode. gpio-controller: true @@ -170,14 +206,38 @@ allOf: - $ref: dai-common.yaml# - if: properties: - opmode: + op-mode: enum: - 0 - then: required: - tdm-slots + - if: + properties: + op-mode: + const: 1 + then: + properties: + tdm-slots: false + tdm-slots-rx: false + ti,async-mode: false + auxclk-fs-ratio-rx: false + + - if: + required: + - tdm-slots-rx + then: + required: + - ti,async-mode + + - if: + required: + - auxclk-fs-ratio-rx + then: + required: + - ti,async-mode + unevaluatedProperties: false examples: @@ -190,6 +250,7 @@ examples: interrupt-names = "tx", "rx"; op-mode = <0>; /* MCASP_IIS_MODE */ tdm-slots = <2>; + ti,async-mode; dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>; dma-names = "tx", "rx"; serial-dir = < diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml index c9152bac742185..608defc93c1e9f 100644 --- a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml +++ b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml @@ -25,6 +25,7 @@ properties: - fsl,imx53-asrc - fsl,imx8qm-asrc - fsl,imx8qxp-asrc + - fsl,imx952-asrc - items: - enum: - fsl,imx6sx-asrc diff --git a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml index 3d5d435c765b49..3a32f7517d0cb9 100644 --- a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml +++ b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml @@ -22,14 +22,20 @@ allOf: properties: compatible: - enum: - - fsl,imx7ulp-rpmsg-audio - - fsl,imx8mn-rpmsg-audio - - fsl,imx8mm-rpmsg-audio - - fsl,imx8mp-rpmsg-audio - - fsl,imx8ulp-rpmsg-audio - - fsl,imx93-rpmsg-audio - - fsl,imx95-rpmsg-audio + oneOf: + - enum: + - fsl,imx7ulp-rpmsg-audio + - fsl,imx8mn-rpmsg-audio + - fsl,imx8mm-rpmsg-audio + - fsl,imx8mp-rpmsg-audio + - fsl,imx8ulp-rpmsg-audio + - fsl,imx93-rpmsg-audio + - fsl,imx95-rpmsg-audio + - items: + - enum: + - fsl,imx94-rpmsg-audio + - fsl,imx952-rpmsg-audio + - const: fsl,imx95-rpmsg-audio clocks: items: diff --git a/Documentation/devicetree/bindings/sound/ti,tlv320aic3x.yaml b/Documentation/devicetree/bindings/sound/ti,tlv320aic3x.yaml index 206f6d61e362e3..50088698adac98 100644 --- a/Documentation/devicetree/bindings/sound/ti,tlv320aic3x.yaml +++ b/Documentation/devicetree/bindings/sound/ti,tlv320aic3x.yaml @@ -46,6 +46,7 @@ maintainers: properties: compatible: enum: + - ti,tlv320aic23 - ti,tlv320aic3x - ti,tlv320aic33 - ti,tlv320aic3007 diff --git a/MAINTAINERS b/MAINTAINERS index 77e237b31753bd..071729f46487e6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -335,7 +335,7 @@ F: tools/power/acpi/ ACPI FOR ARM64 (ACPI/arm64) M: Lorenzo Pieralisi M: Hanjun Guo -M: Sudeep Holla +M: Sudeep Holla L: linux-acpi@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -351,7 +351,7 @@ F: drivers/acpi/riscv/ F: include/linux/acpi_rimt.h ACPI PCC(Platform Communication Channel) MAILBOX DRIVER -M: Sudeep Holla +M: Sudeep Holla L: linux-acpi@vger.kernel.org S: Supported F: drivers/mailbox/pcc.c @@ -1030,6 +1030,13 @@ L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/amd/ae4dma/ +AMD ASoC DRIVERS +M: Vijendar Mukunda +R: Venkata Prasad Potturu +L: linux-sound@vger.kernel.org +S: Supported +F: sound/soc/amd/ + AMD AXI W1 DRIVER M: Kris Chaplin R: Thomas Delev @@ -2747,14 +2754,14 @@ F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li M: Sascha Hauer R: Pengutronix Kernel Team R: Fabio Estevam L: imx@lists.linux.dev L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: Documentation/devicetree/bindings/firmware/fsl* F: Documentation/devicetree/bindings/firmware/nxp* F: arch/arm/boot/dts/nxp/imx/ @@ -2769,22 +2776,22 @@ N: mxs N: \bmxc[^\d] ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: arch/arm/boot/dts/nxp/ls/ F: arch/arm64/boot/dts/freescale/fsl-* F: arch/arm64/boot/dts/freescale/qoriq-* ARM/FREESCALE VYBRID ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li M: Sascha Hauer R: Pengutronix Kernel Team R: Stefan Agner L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: arch/arm/boot/dts/nxp/vf/ F: arch/arm/mach-imx/*vf610* @@ -3681,7 +3688,7 @@ N: uniphier ARM/VERSATILE EXPRESS PLATFORM M: Liviu Dudau -M: Sudeep Holla +M: Sudeep Holla M: Lorenzo Pieralisi L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -6513,7 +6520,7 @@ F: drivers/i2c/busses/i2c-cp2615.c CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE M: Viresh Kumar -M: Sudeep Holla +M: Sudeep Holla L: linux-pm@vger.kernel.org S: Maintained W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php @@ -6609,7 +6616,7 @@ F: include/linux/platform_data/cpuidle-exynos.h CPUIDLE DRIVER - ARM PSCI M: Lorenzo Pieralisi -M: Sudeep Holla +M: Sudeep Holla M: Ulf Hansson L: linux-pm@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -9260,7 +9267,6 @@ F: drivers/scsi/be2iscsi/ EMULEX 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Ajit Khaparde M: Sriharsha Basavapatna -M: Somnath Kotur L: netdev@vger.kernel.org S: Maintained W: http://www.emulex.com @@ -9816,7 +9822,7 @@ F: include/uapi/linux/firewire*.h F: tools/firewire/ FIRMWARE FRAMEWORK FOR ARMV8-A -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/arm_ffa/ @@ -10514,7 +10520,7 @@ S: Maintained F: scripts/gendwarfksyms/ GENERIC ARCHITECTURE TOPOLOGY -M: Sudeep Holla +M: Sudeep Holla L: linux-kernel@vger.kernel.org S: Maintained F: drivers/base/arch_topology.c @@ -11376,6 +11382,11 @@ F: Documentation/ABI/testing/sysfs-devices-platform-kunpeng_hccs F: drivers/soc/hisilicon/kunpeng_hccs.c F: drivers/soc/hisilicon/kunpeng_hccs.h +HISILICON SOC HHA DRIVER +M: Yushan Wang +S: Maintained +F: drivers/cache/hisi_soc_hha.c + HISILICON LPC BUS DRIVER M: Jay Fang S: Maintained @@ -15101,7 +15112,7 @@ F: drivers/mailbox/arm_mhuv2.c F: include/linux/mailbox/arm_mhuv2_message.h MAILBOX ARM MHUv3 -M: Sudeep Holla +M: Sudeep Holla M: Cristian Marussi L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -20578,7 +20589,7 @@ F: drivers/pinctrl/pinctrl-amd.c PIN CONTROLLER - FREESCALE M: Dong Aisheng M: Fabio Estevam -M: Shawn Guo +M: Frank Li M: Jacky Bai R: Pengutronix Kernel Team R: NXP S32 Linux Team @@ -20974,6 +20985,18 @@ F: Documentation/devicetree/bindings/net/pse-pd/ F: drivers/net/pse-pd/ F: net/ethtool/pse-pd.c +PSP SECURITY PROTOCOL +M: Daniel Zahka +M: Jakub Kicinski +M: Willem de Bruijn +F: Documentation/netlink/specs/psp.yaml +F: Documentation/networking/psp.rst +F: include/net/psp/ +F: include/net/psp.h +F: include/uapi/linux/psp.h +F: net/psp/ +K: struct\ psp(_assoc|_dev|hdr)\b + PSTORE FILESYSTEM M: Kees Cook R: Tony Luck @@ -23631,7 +23654,7 @@ F: include/uapi/linux/sed* SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC) M: Mark Rutland M: Lorenzo Pieralisi -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/smccc/ @@ -25395,7 +25418,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git F: drivers/mfd/syscon.c SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers -M: Sudeep Holla +M: Sudeep Holla R: Cristian Marussi L: arm-scmi@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -26547,7 +26570,7 @@ F: samples/tsm-mr/ TRUSTED SERVICES TEE DRIVER M: Balint Dobszay -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: trusted-services@lists.trustedfirmware.org S: Maintained diff --git a/Makefile b/Makefile index 3373308d2217ce..d3a8482bdbd0de 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Baby Opossum Posse # *DOCUMENTATION* @@ -1624,7 +1624,8 @@ MRPROPER_FILES += include/config include/generated \ certs/x509.genkey \ vmlinux-gdb.py \ rpmbuild \ - rust/libmacros.so rust/libmacros.dylib + rust/libmacros.so rust/libmacros.dylib \ + rust/libpin_init_internal.so rust/libpin_init_internal.dylib # clean - Delete most, but leave enough to build external modules # diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index 6c607c68f3ad75..c35250c4991bc7 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -42,7 +42,10 @@ static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) { - return __memset64(p, v, n * 8, v >> 32); + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + return __memset64(p, v, n * 8, v >> 32); + else + return __memset64(p, v >> 32, n * 8, v); } /* diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 45288ec9eaf736..35e9eb180c9a1c 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -670,7 +670,6 @@ CONFIG_PINCTRL_LPASS_LPI=m CONFIG_PINCTRL_SC7280_LPASS_LPI=m CONFIG_PINCTRL_SM6115_LPASS_LPI=m CONFIG_PINCTRL_SM8250_LPASS_LPI=m -CONFIG_PINCTRL_SM8350_LPASS_LPI=m CONFIG_PINCTRL_SM8450_LPASS_LPI=m CONFIG_PINCTRL_SC8280XP_LPASS_LPI=m CONFIG_PINCTRL_SM8550_LPASS_LPI=m diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index e5000bef90f2ae..7cf9310de0ec1f 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -723,7 +723,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; - zone_device_page_init(dpage, 0); + zone_device_page_init(dpage, &kvmppc_uvmem_pgmap, 0); return dpage; out_clear: spin_lock(&kvmppc_uvmem_bitmap_lock); diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c index 38aac2c47845a0..d0c61f86cba337 100644 --- a/arch/riscv/errata/sifive/errata.c +++ b/arch/riscv/errata/sifive/errata.c @@ -75,26 +75,12 @@ static u32 __init_or_module sifive_errata_probe(unsigned long archid, return cpu_req_errata; } -static void __init_or_module warn_miss_errata(u32 miss_errata) -{ - int i; - - pr_warn("----------------------------------------------------------------\n"); - pr_warn("WARNING: Missing the following errata may cause potential issues\n"); - for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++) - if (miss_errata & 0x1 << i) - pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name); - pr_warn("Please enable the corresponding Kconfig to apply them\n"); - pr_warn("----------------------------------------------------------------\n"); -} - void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage) { struct alt_entry *alt; u32 cpu_req_errata; - u32 cpu_apply_errata = 0; u32 tmp; BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); @@ -118,10 +104,6 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt), alt->alt_len); mutex_unlock(&text_mutex); - cpu_apply_errata |= tmp; } } - if (stage != RISCV_ALTERNATIVES_MODULE && - cpu_apply_errata != cpu_req_errata) - warn_miss_errata(cpu_req_errata - cpu_apply_errata); } diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 6081327e55f5b6..28e115eed21805 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -2,7 +2,7 @@ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H -#define COMPAT_UTS_MACHINE "riscv\0\0" +#define COMPAT_UTS_MACHINE "riscv32\0\0" /* * Architecture specific compatibility types diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 34313387f9774c..8067e666a4ca68 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -20,7 +20,7 @@ extern void * const sys_call_table[]; extern void * const compat_sys_call_table[]; /* - * Only the low 32 bits of orig_r0 are meaningful, so we return int. + * Only the low 32 bits of orig_a0 are meaningful, so we return int. * This importantly ignores the high bits on 64-bit, so comparisons * sign-extend the low 32 bits. */ diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 5a956108b1eaf5..dbb067e345f05b 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -145,14 +145,14 @@ struct arch_ext_priv { long (*save)(struct pt_regs *regs, void __user *sc_vec); }; -struct arch_ext_priv arch_ext_list[] = { +static struct arch_ext_priv arch_ext_list[] = { { .magic = RISCV_V_MAGIC, .save = &save_v_state, }, }; -const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); +static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) @@ -297,7 +297,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, } else { err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic); err |= __put_user(ext_size, &sc_ext_ptr->size); - sc_ext_ptr = (void *)sc_ext_ptr + ext_size; + sc_ext_ptr = (void __user *)sc_ext_ptr + ext_size; } } /* Write zero to fp-reserved space and check it on restore_sigcontext */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index acf9ffa1a17183..dfd5c74ba41a2f 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -42,7 +42,7 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) { unsigned int level; pte_t *pte = lookup_address(addr, &level); - pteval_t val; + pteval_t val, new; if (WARN_ON(!pte || level != PG_LEVEL_4K)) return false; @@ -57,11 +57,12 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; /* - * Otherwise, invert the entire PTE. This avoids writing out an + * Otherwise, flip the Present bit, taking care to avoid writing an * L1TF-vulnerable PTE (not present, without the high address bits * set). */ - set_pte(pte, __pte(~val)); + new = val ^ _PAGE_PRESENT; + set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK))); /* * If the page was protected (non-present) and we're making it diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h index c9cf43d5ef238c..4220dae14a2d9f 100644 --- a/arch/x86/include/asm/vmware.h +++ b/arch/x86/include/asm/vmware.h @@ -140,7 +140,7 @@ unsigned long vmware_hypercall3(unsigned long cmd, unsigned long in1, "b" (in1), "c" (cmd), "d" (0) - : "cc", "memory"); + : "di", "si", "cc", "memory"); return out0; } @@ -165,7 +165,7 @@ unsigned long vmware_hypercall4(unsigned long cmd, unsigned long in1, "b" (in1), "c" (cmd), "d" (0) - : "cc", "memory"); + : "di", "si", "cc", "memory"); return out0; } diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 7cc8950005b6ec..4c7688670c2d8c 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -514,7 +514,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, */ spin_lock_irq(&kvm->irqfds.lock); - if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { + if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI || + WARN_ON_ONCE(irqfd->irq_bypass_vcpu)) { ret = kvm_pi_update_irte(irqfd, NULL); if (ret) pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n", diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 6b77b2033208f1..0f6c8596719b83 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -376,6 +376,7 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb) static int avic_init_backing_page(struct kvm_vcpu *vcpu) { + u32 max_id = x2avic_enabled ? x2avic_max_physical_id : AVIC_MAX_PHYSICAL_ID; struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); struct vcpu_svm *svm = to_svm(vcpu); u32 id = vcpu->vcpu_id; @@ -388,8 +389,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) * avic_vcpu_load() expects to be called if and only if the vCPU has * fully initialized AVIC. */ - if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) || - (id > x2avic_max_physical_id)) { + if (id > max_id) { kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG); vcpu->arch.apic->apicv_active = false; return 0; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 24d59ccfa40d9a..4394be40fe78d7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5284,6 +5284,8 @@ static __init void svm_set_cpu_caps(void) */ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM); + + kvm_setup_xss_caps(); } static __init int svm_hardware_setup(void) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6b96f7aea20bd3..8c94241fbcca70 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8051,6 +8051,8 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_SHSTK); kvm_cpu_cap_clear(X86_FEATURE_IBT); } + + kvm_setup_xss_caps(); } static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63afdb6bb078b2..72d37c8930ad78 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9953,6 +9953,23 @@ static struct notifier_block pvclock_gtod_notifier = { }; #endif +void kvm_setup_xss_caps(void) +{ + if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) + kvm_caps.supported_xss = 0; + + if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) && + !kvm_cpu_cap_has(X86_FEATURE_IBT)) + kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; + + if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) { + kvm_cpu_cap_clear(X86_FEATURE_SHSTK); + kvm_cpu_cap_clear(X86_FEATURE_IBT); + kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; + } +} +EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps); + static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) { memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); @@ -10125,19 +10142,6 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) if (!tdp_enabled) kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT; - if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) - kvm_caps.supported_xss = 0; - - if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) && - !kvm_cpu_cap_has(X86_FEATURE_IBT)) - kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; - - if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) { - kvm_cpu_cap_clear(X86_FEATURE_SHSTK); - kvm_cpu_cap_clear(X86_FEATURE_IBT); - kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; - } - if (kvm_caps.has_tsc_control) { /* * Make sure the user can only configure tsc_khz values that diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index fdab0ad490988e..00de24f55b1fe9 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -471,6 +471,8 @@ extern struct kvm_host_values kvm_host; extern bool enable_pmu; +void kvm_setup_xss_caps(void); + /* * Get a filtered version of KVM's supported XCR0 that strips out dynamic * features for which the current process doesn't (yet) have permission to use. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 535fc881c8daa7..b356c9b882544f 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2991,6 +2991,10 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id, * @t: the binder transaction that failed * @data_size: the user provided data size for the transaction * @error: enum binder_driver_return_protocol returned to sender + * + * Note that t->buffer is not safe to access here, as it may have been + * released (or not yet allocated). Callers should guarantee all the + * transaction items used here are safe to access. */ static void binder_netlink_report(struct binder_proc *proc, struct binder_transaction *t, @@ -3780,6 +3784,14 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_proc_or_thread; } } else { + /* + * Make a transaction copy. It is not safe to access 't' after + * binder_proc_transaction() reported a pending frozen. The + * target could thaw and consume the transaction at any point. + * Instead, use a safe 't_copy' for binder_netlink_report(). + */ + struct binder_transaction t_copy = *t; + BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); return_error = binder_proc_transaction(t, target_proc, NULL); @@ -3790,7 +3802,7 @@ static void binder_transaction(struct binder_proc *proc, */ if (return_error == BR_TRANSACTION_PENDING_FROZEN) { tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; - binder_netlink_report(proc, t, tr->data_size, + binder_netlink_report(proc, &t_copy, tr->data_size, return_error); } binder_enqueue_thread_work(thread, tcomplete); @@ -3812,8 +3824,9 @@ static void binder_transaction(struct binder_proc *proc, return; err_dead_proc_or_thread: - binder_txn_error("%d:%d dead process or thread\n", - thread->pid, proc->pid); + binder_txn_error("%d:%d %s process or thread\n", + proc->pid, thread->pid, + return_error == BR_FROZEN_REPLY ? "frozen" : "dead"); return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c index c69026df775c25..5c1319d8003614 100644 --- a/drivers/android/binder/rust_binderfs.c +++ b/drivers/android/binder/rust_binderfs.c @@ -132,8 +132,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode, mutex_lock(&binderfs_minors_mutex); if (++info->device_count <= info->mount_opts.max) minor = ida_alloc_max(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, + use_reserve ? BINDERFS_MAX_MINOR - 1 : + BINDERFS_MAX_MINOR_CAPPED - 1, GFP_KERNEL); else minor = -ENOSPC; @@ -391,12 +391,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb) if (!device) return -ENOMEM; - /* If we have already created a binder-control node, return. */ - if (info->control_dentry) { - ret = 0; - goto out; - } - ret = -ENOMEM; inode = new_inode(sb); if (!inode) @@ -405,8 +399,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); minor = ida_alloc_max(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, + use_reserve ? BINDERFS_MAX_MINOR - 1 : + BINDERFS_MAX_MINOR_CAPPED - 1, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { @@ -431,7 +425,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_private = device; info->control_dentry = dentry; - d_add(dentry, inode); + d_make_persistent(dentry, inode); + dput(dentry); return 0; diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs index 1a8e6fdc0dc423..e0ea33ccfe58bd 100644 --- a/drivers/android/binder/thread.rs +++ b/drivers/android/binder/thread.rs @@ -39,6 +39,10 @@ use core::{ sync::atomic::{AtomicU32, Ordering}, }; +fn is_aligned(value: usize, to: usize) -> bool { + value % to == 0 +} + /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` /// call and is discarded when it returns. struct ScatterGatherState { @@ -69,17 +73,24 @@ struct ScatterGatherEntry { } /// This entry specifies that a fixup should happen at `target_offset` of the -/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object` -/// and is applied later. Otherwise if `skip` is zero, then the size of the -/// fixup is `sizeof::()` and `pointer_value` is written to the buffer. -struct PointerFixupEntry { - /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup. - skip: usize, - /// The translated pointer to write when `skip` is zero. - pointer_value: u64, - /// The offset at which the value should be written. The offset is relative - /// to the original buffer. - target_offset: usize, +/// buffer. +enum PointerFixupEntry { + /// A fixup for a `binder_buffer_object`. + Fixup { + /// The translated pointer to write. + pointer_value: u64, + /// The offset at which the value should be written. The offset is relative + /// to the original buffer. + target_offset: usize, + }, + /// A skip for a `binder_fd_array_object`. + Skip { + /// The number of bytes to skip. + skip: usize, + /// The offset at which the skip should happen. The offset is relative + /// to the original buffer. + target_offset: usize, + }, } /// Return type of `apply_and_validate_fixup_in_parent`. @@ -762,8 +773,7 @@ impl Thread { parent_entry.fixup_min_offset = info.new_min_offset; parent_entry.pointer_fixups.push( - PointerFixupEntry { - skip: 0, + PointerFixupEntry::Fixup { pointer_value: buffer_ptr_in_user_space, target_offset: info.target_offset, }, @@ -789,6 +799,10 @@ impl Thread { let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; let fds_len = num_fds.checked_mul(size_of::()).ok_or(EINVAL)?; + if !is_aligned(parent_offset, size_of::()) { + return Err(EINVAL.into()); + } + let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; view.alloc.info_add_fd_reserve(num_fds)?; @@ -803,13 +817,16 @@ impl Thread { } }; + if !is_aligned(parent_entry.sender_uaddr, size_of::()) { + return Err(EINVAL.into()); + } + parent_entry.fixup_min_offset = info.new_min_offset; parent_entry .pointer_fixups .push( - PointerFixupEntry { + PointerFixupEntry::Skip { skip: fds_len, - pointer_value: 0, target_offset: info.target_offset, }, GFP_KERNEL, @@ -820,6 +837,7 @@ impl Thread { .sender_uaddr .checked_add(parent_offset) .ok_or(EINVAL)?; + let mut fda_bytes = KVec::new(); UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) .read_all(&mut fda_bytes, GFP_KERNEL)?; @@ -871,17 +889,21 @@ impl Thread { let mut reader = UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); for fixup in &mut sg_entry.pointer_fixups { - let fixup_len = if fixup.skip == 0 { - size_of::() - } else { - fixup.skip + let (fixup_len, fixup_offset) = match fixup { + PointerFixupEntry::Fixup { target_offset, .. } => { + (size_of::(), *target_offset) + } + PointerFixupEntry::Skip { + skip, + target_offset, + } => (*skip, *target_offset), }; - let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?; - if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end { + let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?; + if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end { pr_warn!( "Fixups oob {} {} {} {}", - fixup.target_offset, + fixup_offset, end_of_previous_fixup, offset_end, target_offset_end @@ -890,13 +912,13 @@ impl Thread { } let copy_off = end_of_previous_fixup; - let copy_len = fixup.target_offset - end_of_previous_fixup; + let copy_len = fixup_offset - end_of_previous_fixup; if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { pr_warn!("Failed copying into alloc: {:?}", err); return Err(err.into()); } - if fixup.skip == 0 { - let res = alloc.write::(fixup.target_offset, &fixup.pointer_value); + if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup { + let res = alloc.write::(fixup_offset, pointer_value); if let Err(err) = res { pr_warn!("Failed copying ptr into alloc: {:?}", err); return Err(err.into()); @@ -949,25 +971,30 @@ impl Thread { let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; - let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?; - let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?; - let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?; - let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?; + let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?; + let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?; let aligned_secctx_size = match secctx.as_ref() { Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, None => 0, }; + if !is_aligned(offsets_size, size_of::()) { + return Err(EINVAL.into()); + } + if !is_aligned(buffers_size, size_of::()) { + return Err(EINVAL.into()); + } + // This guarantees that at least `sizeof(usize)` bytes will be allocated. let len = usize::max( aligned_data_size - .checked_add(aligned_offsets_size) - .and_then(|sum| sum.checked_add(aligned_buffers_size)) + .checked_add(offsets_size) + .and_then(|sum| sum.checked_add(buffers_size)) .and_then(|sum| sum.checked_add(aligned_secctx_size)) .ok_or(ENOMEM)?, - size_of::(), + size_of::(), ); - let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size; + let secctx_off = aligned_data_size + offsets_size + buffers_size; let mut alloc = match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { Ok(alloc) => alloc, @@ -999,13 +1026,13 @@ impl Thread { } let offsets_start = aligned_data_size; - let offsets_end = aligned_data_size + aligned_offsets_size; + let offsets_end = aligned_data_size + offsets_size; // This state is used for BINDER_TYPE_PTR objects. let sg_state = sg_state.insert(ScatterGatherState { unused_buffer_space: UnusedBufferSpace { offset: offsets_end, - limit: len, + limit: offsets_end + buffers_size, }, sg_entries: KVec::new(), ancestors: KVec::new(), @@ -1014,12 +1041,16 @@ impl Thread { // Traverse the objects specified. let mut view = AllocationView::new(&mut alloc, data_size); for (index, index_offset) in (offsets_start..offsets_end) - .step_by(size_of::()) + .step_by(size_of::()) .enumerate() { - let offset = view.alloc.read(index_offset)?; + let offset: usize = view + .alloc + .read::(index_offset)? + .try_into() + .map_err(|_| EINVAL)?; - if offset < end_of_previous_object { + if offset < end_of_previous_object || !is_aligned(offset, size_of::()) { pr_warn!("Got transaction with invalid offset."); return Err(EINVAL.into()); } @@ -1051,7 +1082,7 @@ impl Thread { } // Update the indexes containing objects to clean up. - let offset_after_object = index_offset + size_of::(); + let offset_after_object = index_offset + size_of::(); view.alloc .set_info_offsets(offsets_start..offset_after_object); } diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index b46bcb91072de2..9f8a18c88d66ef 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -132,8 +132,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode, mutex_lock(&binderfs_minors_mutex); if (++info->device_count <= info->mount_opts.max) minor = ida_alloc_max(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, + use_reserve ? BINDERFS_MAX_MINOR - 1 : + BINDERFS_MAX_MINOR_CAPPED - 1, GFP_KERNEL); else minor = -ENOSPC; @@ -408,8 +408,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); minor = ida_alloc_max(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, + use_reserve ? BINDERFS_MAX_MINOR - 1 : + BINDERFS_MAX_MINOR_CAPPED - 1, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index bd59c0e9508b72..32a3a5b138029e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1225,28 +1225,16 @@ static int loop_clr_fd(struct loop_device *lo) } static int -loop_set_status(struct loop_device *lo, blk_mode_t mode, - struct block_device *bdev, const struct loop_info64 *info) +loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; bool partscan = false; bool size_changed = false; unsigned int memflags; - /* - * If we don't hold exclusive handle for the device, upgrade to it - * here to avoid changing device under exclusive owner. - */ - if (!(mode & BLK_OPEN_EXCL)) { - err = bd_prepare_to_claim(bdev, loop_set_status, NULL); - if (err) - goto out_reread_partitions; - } - err = mutex_lock_killable(&lo->lo_mutex); if (err) - goto out_abort_claiming; - + return err; if (lo->lo_state != Lo_bound) { err = -ENXIO; goto out_unlock; @@ -1285,10 +1273,6 @@ loop_set_status(struct loop_device *lo, blk_mode_t mode, } out_unlock: mutex_unlock(&lo->lo_mutex); -out_abort_claiming: - if (!(mode & BLK_OPEN_EXCL)) - bd_abort_claiming(bdev, loop_set_status); -out_reread_partitions: if (partscan) loop_reread_partitions(lo); @@ -1368,9 +1352,7 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) } static int -loop_set_status_old(struct loop_device *lo, blk_mode_t mode, - struct block_device *bdev, - const struct loop_info __user *arg) +loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; @@ -1378,19 +1360,17 @@ loop_set_status_old(struct loop_device *lo, blk_mode_t mode, if (copy_from_user(&info, arg, sizeof (struct loop_info))) return -EFAULT; loop_info64_from_old(&info, &info64); - return loop_set_status(lo, mode, bdev, &info64); + return loop_set_status(lo, &info64); } static int -loop_set_status64(struct loop_device *lo, blk_mode_t mode, - struct block_device *bdev, - const struct loop_info64 __user *arg) +loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) { struct loop_info64 info64; if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) return -EFAULT; - return loop_set_status(lo, mode, bdev, &info64); + return loop_set_status(lo, &info64); } static int @@ -1569,14 +1549,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, case LOOP_SET_STATUS: err = -EPERM; if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_status_old(lo, mode, bdev, argp); + err = loop_set_status_old(lo, argp); break; case LOOP_GET_STATUS: return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_status64(lo, mode, bdev, argp); + err = loop_set_status64(lo, argp); break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); @@ -1670,9 +1650,8 @@ loop_info64_to_compat(const struct loop_info64 *info64, } static int -loop_set_status_compat(struct loop_device *lo, blk_mode_t mode, - struct block_device *bdev, - const struct compat_loop_info __user *arg) +loop_set_status_compat(struct loop_device *lo, + const struct compat_loop_info __user *arg) { struct loop_info64 info64; int ret; @@ -1680,7 +1659,7 @@ loop_set_status_compat(struct loop_device *lo, blk_mode_t mode, ret = loop_info64_from_compat(arg, &info64); if (ret < 0) return ret; - return loop_set_status(lo, mode, bdev, &info64); + return loop_set_status(lo, &info64); } static int @@ -1706,7 +1685,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, switch(cmd) { case LOOP_SET_STATUS: - err = loop_set_status_compat(lo, mode, bdev, + err = loop_set_status_compat(lo, (const struct compat_loop_info __user *)arg); break; case LOOP_GET_STATUS: diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index af0e21149dbc9e..8f441eb8b19265 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3495,11 +3495,29 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req) rbd_assert(!need_exclusive_lock(img_req) || __rbd_is_lock_owner(rbd_dev)); - if (rbd_img_is_write(img_req)) { - rbd_assert(!img_req->snapc); + if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { + rbd_assert(!rbd_img_is_write(img_req)); + } else { + struct request *rq = blk_mq_rq_from_pdu(img_req); + u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; + u64 len = blk_rq_bytes(rq); + u64 mapping_size; + down_read(&rbd_dev->header_rwsem); - img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); + mapping_size = rbd_dev->mapping.size; + if (rbd_img_is_write(img_req)) { + rbd_assert(!img_req->snapc); + img_req->snapc = + ceph_get_snap_context(rbd_dev->header.snapc); + } up_read(&rbd_dev->header_rwsem); + + if (unlikely(off + len > mapping_size)) { + rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", + off, len, mapping_size); + img_req->pending.result = -EIO; + return; + } } for_each_obj_request(img_req, obj_req) { @@ -4725,7 +4743,6 @@ static void rbd_queue_workfn(struct work_struct *work) struct request *rq = blk_mq_rq_from_pdu(img_request); u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; u64 length = blk_rq_bytes(rq); - u64 mapping_size; int result; /* Ignore/skip any zero-length requests */ @@ -4738,17 +4755,9 @@ static void rbd_queue_workfn(struct work_struct *work) blk_mq_start_request(rq); down_read(&rbd_dev->header_rwsem); - mapping_size = rbd_dev->mapping.size; rbd_img_capture_header(img_request); up_read(&rbd_dev->header_rwsem); - if (offset + length > mapping_size) { - rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, - length, mapping_size); - result = -EIO; - goto err_img_request; - } - dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, img_request, obj_op_name(op_type), offset, length); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 8194a970f002ab..d1c354636315d2 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1662,6 +1662,7 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev, /* To avoid deadlock firstly remove itself */ sysfs_remove_file_self(&dev->kobj, sysfs_self); kobject_del(&dev->kobj); + kobject_put(&dev->kobj); } } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index d0adae3267b41e..2b28515de92c40 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -685,6 +685,8 @@ static int hci_uart_register_dev(struct hci_uart *hu) return err; } + set_bit(HCI_UART_PROTO_INIT, &hu->flags); + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; @@ -712,8 +714,6 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) hu->proto = p; - set_bit(HCI_UART_PROTO_INIT, &hu->flags); - err = hci_uart_register_dev(hu); if (err) { return err; diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index d8e029e7e53f72..3f00d953fb9a0e 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -142,6 +142,12 @@ static const struct of_device_id simple_pm_bus_of_match[] = { { .compatible = "simple-mfd", .data = ONLY_BUS }, { .compatible = "isa", .data = ONLY_BUS }, { .compatible = "arm,amba-bus", .data = ONLY_BUS }, + { .compatible = "fsl,ls1021a-scfg", }, + { .compatible = "fsl,ls1043a-scfg", }, + { .compatible = "fsl,ls1046a-scfg", }, + { .compatible = "fsl,ls1088a-isc", }, + { .compatible = "fsl,ls2080a-isc", }, + { .compatible = "fsl,lx2160a-isc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 81e16b5a0245ab..b8081acba928f5 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -263,6 +263,7 @@ static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unuse { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 }, { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 }, { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 }, + { /* sentinel */ } }; static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c index ea29cd5d0ff9fa..40d02adaf3f6da 100644 --- a/drivers/crypto/ccp/sev-dev-tsm.c +++ b/drivers/crypto/ccp/sev-dev-tsm.c @@ -19,12 +19,6 @@ MODULE_IMPORT_NS("PCI_IDE"); -#define TIO_DEFAULT_NR_IDE_STREAMS 1 - -static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS; -module_param_named(ide_nr, nr_ide_streams, uint, 0644); -MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB"); - #define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev)) #define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data)) #define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data)) @@ -193,7 +187,6 @@ static void streams_teardown(struct pci_ide **ide) static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, unsigned int tc) { - struct pci_dev *rp = pcie_find_root_port(pdev); struct pci_ide *ide1; if (ide[tc]) { @@ -201,17 +194,11 @@ static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, return -EBUSY; } - /* FIXME: find a better way */ - if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS) - pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams); - pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams); - ide1 = pci_ide_stream_alloc(pdev); if (!ide1) return -EFAULT; - /* Blindly assign streamid=0 to TC=0, and so on */ - ide1->stream_id = tc; + ide1->stream_id = ide1->host_bridge_stream; ide[tc] = ide1; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 7fea11a5e359ca..22ae387ae03ca6 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -173,20 +173,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer) } } -static void start_split_transaction_timeout(struct fw_transaction *t, - struct fw_card *card) +// card->transactions.lock should be acquired in advance for the linked list. +static void start_split_transaction_timeout(struct fw_transaction *t, unsigned int delta) { - unsigned long delta; - if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) return; t->is_split_transaction = true; - // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for - // local destination never runs in any type of IRQ context. - scoped_guard(spinlock_irqsave, &card->split_timeout.lock) - delta = card->split_timeout.jiffies; mod_timer(&t->split_timeout_timer, jiffies + delta); } @@ -207,13 +201,20 @@ static void transmit_complete_callback(struct fw_packet *packet, break; case ACK_PENDING: { + unsigned int delta; + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for // local destination never runs in any type of IRQ context. scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { t->split_timeout_cycle = compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff; + delta = card->split_timeout.jiffies; } - start_split_transaction_timeout(t, card); + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) + start_split_transaction_timeout(t, delta); break; } case ACK_BUSY_X: diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index aa6e740f9cd74b..9fdb50f3fec604 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -9,6 +9,7 @@ * Cirrus Logic International Semiconductor Ltd. */ +#include #include #include #include @@ -24,6 +25,41 @@ #include #include +#include "cs_dsp.h" + +/* + * When the KUnit test is running the error-case tests will cause a lot + * of messages. Rate-limit to prevent overflowing the kernel log buffer + * during KUnit test runs. + */ +#if IS_ENABLED(CONFIG_FW_CS_DSP_KUNIT_TEST) +bool cs_dsp_suppress_err_messages; +EXPORT_SYMBOL_IF_KUNIT(cs_dsp_suppress_err_messages); + +bool cs_dsp_suppress_warn_messages; +EXPORT_SYMBOL_IF_KUNIT(cs_dsp_suppress_warn_messages); + +bool cs_dsp_suppress_info_messages; +EXPORT_SYMBOL_IF_KUNIT(cs_dsp_suppress_info_messages); + +#define cs_dsp_err(_dsp, fmt, ...) \ + do { \ + if (!cs_dsp_suppress_err_messages) \ + dev_err_ratelimited(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__); \ + } while (false) +#define cs_dsp_warn(_dsp, fmt, ...) \ + do { \ + if (!cs_dsp_suppress_warn_messages) \ + dev_warn_ratelimited(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__); \ + } while (false) +#define cs_dsp_info(_dsp, fmt, ...) \ + do { \ + if (!cs_dsp_suppress_info_messages) \ + dev_info_ratelimited(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__); \ + } while (false) +#define cs_dsp_dbg(_dsp, fmt, ...) \ + dev_dbg_ratelimited(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__) +#else #define cs_dsp_err(_dsp, fmt, ...) \ dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__) #define cs_dsp_warn(_dsp, fmt, ...) \ @@ -32,6 +68,7 @@ dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__) #define cs_dsp_dbg(_dsp, fmt, ...) \ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__) +#endif #define ADSP1_CONTROL_1 0x00 #define ADSP1_CONTROL_2 0x02 diff --git a/drivers/firmware/cirrus/cs_dsp.h b/drivers/firmware/cirrus/cs_dsp.h new file mode 100644 index 00000000000000..adf543004aea31 --- /dev/null +++ b/drivers/firmware/cirrus/cs_dsp.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cs_dsp.h -- Private header for cs_dsp driver. + * + * Copyright (C) 2026 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef FW_CS_DSP_H +#define FW_CS_DSP_H + +#if IS_ENABLED(CONFIG_KUNIT) +extern bool cs_dsp_suppress_err_messages; +extern bool cs_dsp_suppress_warn_messages; +extern bool cs_dsp_suppress_info_messages; +#endif + +#endif /* ifndef FW_CS_DSP_H */ diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c index 99148ea22df3ea..66140caeebb5e2 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c @@ -17,6 +17,8 @@ #include #include +#include "../cs_dsp.h" + /* * Test method is: * @@ -2229,7 +2231,22 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp, return ret; /* Automatically call cs_dsp_remove() when test case ends */ - return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + ret = kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + if (ret) + return ret; + + /* + * The large number of test cases will cause an unusually large amount + * of dev_info() messages from cs_dsp, so suppress these. + */ + cs_dsp_suppress_info_messages = true; + + return 0; +} + +static void cs_dsp_bin_test_exit(struct kunit *test) +{ + cs_dsp_suppress_info_messages = false; } static int cs_dsp_bin_test_halo_init_common(struct kunit *test, int wmdr_ver) @@ -2816,6 +2833,7 @@ static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = { static struct kunit_suite cs_dsp_bin_test_halo = { .name = "cs_dsp_bin_halo", .init = cs_dsp_bin_test_halo_init, + .exit = cs_dsp_bin_test_exit, .test_cases = cs_dsp_bin_test_cases_halo, }; @@ -2828,12 +2846,14 @@ static struct kunit_suite cs_dsp_bin_test_halo_wmdr3 = { static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = { .name = "cs_dsp_bin_adsp2_32bit", .init = cs_dsp_bin_test_adsp2_32bit_init, + .exit = cs_dsp_bin_test_exit, .test_cases = cs_dsp_bin_test_cases_adsp2, }; static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = { .name = "cs_dsp_bin_adsp2_16bit", .init = cs_dsp_bin_test_adsp2_16bit_init, + .exit = cs_dsp_bin_test_exit, .test_cases = cs_dsp_bin_test_cases_adsp2, }; diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c index fe0112dc307772..9b2763b369702b 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c @@ -18,6 +18,8 @@ #include #include +#include "../cs_dsp.h" + KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); @@ -380,11 +382,9 @@ static void bin_block_payload_len_garbage(struct kunit *test) static void cs_dsp_bin_err_test_exit(struct kunit *test) { - /* - * Testing error conditions can produce a lot of log output - * from cs_dsp error messages, so rate limit the test cases. - */ - usleep_range(200, 500); + cs_dsp_suppress_err_messages = false; + cs_dsp_suppress_warn_messages = false; + cs_dsp_suppress_info_messages = false; } static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp, @@ -474,7 +474,19 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds return ret; /* Automatically call cs_dsp_remove() when test case ends */ - return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + ret = kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + if (ret) + return ret; + + /* + * Testing error conditions can produce a lot of log output + * from cs_dsp error messages, so suppress messages. + */ + cs_dsp_suppress_err_messages = true; + cs_dsp_suppress_warn_messages = true; + cs_dsp_suppress_info_messages = true; + + return 0; } static int cs_dsp_bin_err_test_halo_init(struct kunit *test) diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c index 9e997c4ee2d67c..f02cb6cf763868 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c @@ -18,6 +18,8 @@ #include #include +#include "../cs_dsp.h" + /* * Test method is: * @@ -1853,7 +1855,22 @@ static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp, return ret; /* Automatically call cs_dsp_remove() when test case ends */ - return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + ret = kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + if (ret) + return ret; + + /* + * The large number of test cases will cause an unusually large amount + * of dev_info() messages from cs_dsp, so suppress these. + */ + cs_dsp_suppress_info_messages = true; + + return 0; +} + +static void cs_dsp_wmfw_test_exit(struct kunit *test) +{ + cs_dsp_suppress_info_messages = false; } static int cs_dsp_wmfw_test_halo_init(struct kunit *test) @@ -2163,42 +2180,49 @@ static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = { static struct kunit_suite cs_dsp_wmfw_test_halo = { .name = "cs_dsp_wmfwV3_halo", .init = cs_dsp_wmfw_test_halo_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_halo, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = { .name = "cs_dsp_wmfwV0_adsp2_32bit", .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = { .name = "cs_dsp_wmfwV1_adsp2_32bit", .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = { .name = "cs_dsp_wmfwV2_adsp2_32bit", .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = { .name = "cs_dsp_wmfwV0_adsp2_16bit", .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = { .name = "cs_dsp_wmfwV1_adsp2_16bit", .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = { .name = "cs_dsp_wmfwV2_adsp2_16bit", .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init, + .exit = cs_dsp_wmfw_test_exit, .test_cases = cs_dsp_wmfw_test_cases_adsp2, }; diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c index c309843261d724..37162d12e2fa78 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c @@ -18,6 +18,8 @@ #include #include +#include "../cs_dsp.h" + KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); @@ -989,11 +991,9 @@ static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test) static void cs_dsp_wmfw_err_test_exit(struct kunit *test) { - /* - * Testing error conditions can produce a lot of log output - * from cs_dsp error messages, so rate limit the test cases. - */ - usleep_range(200, 500); + cs_dsp_suppress_err_messages = false; + cs_dsp_suppress_warn_messages = false; + cs_dsp_suppress_info_messages = false; } static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp, @@ -1072,7 +1072,19 @@ static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *d return ret; /* Automatically call cs_dsp_remove() when test case ends */ - return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + ret = kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); + if (ret) + return ret; + + /* + * Testing error conditions can produce a lot of log output + * from cs_dsp error messages, so suppress messages. + */ + cs_dsp_suppress_err_messages = true; + cs_dsp_suppress_warn_messages = true; + cs_dsp_suppress_info_messages = true; + + return 0; } static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test) diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c index 7b829a03ca5291..288675fdbdc534 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_tests.c +++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c @@ -12,3 +12,4 @@ MODULE_AUTHOR("Richard Fitzgerald "); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("FW_CS_DSP"); MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS"); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index af9287ff5dc43e..2352d099709c2b 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -301,12 +301,10 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank( struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq) { struct brcmstb_gpio_bank *bank; - int i = 0; - /* banks are in descending order */ - list_for_each_entry_reverse(bank, &priv->bank_list, node) { - i += bank->chip.gc.ngpio; - if (hwirq < i) + list_for_each_entry(bank, &priv->bank_list, node) { + if (hwirq >= bank->chip.gc.offset && + hwirq < (bank->chip.gc.offset + bank->chip.gc.ngpio)) return bank; } return NULL; diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c index 77d07e31366fa4..0fdf15faa344d2 100644 --- a/drivers/gpio/gpio-loongson-64bit.c +++ b/drivers/gpio/gpio-loongson-64bit.c @@ -263,7 +263,7 @@ static int loongson_gpio_init_irqchip(struct platform_device *pdev, chip->irq.num_parents = data->intr_num; chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num, sizeof(*chip->irq.parents), GFP_KERNEL); - if (!chip->parent) + if (!chip->irq.parents) return -ENOMEM; for (i = 0; i < data->intr_num; i++) { diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index e136e81794dfb3..e39723b5901bac 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -799,10 +799,13 @@ static struct platform_device omap_mpuio_device = { static inline void omap_mpuio_init(struct gpio_bank *bank) { - platform_set_drvdata(&omap_mpuio_device, bank); + static bool registered; - if (platform_driver_register(&omap_mpuio_driver) == 0) - (void) platform_device_register(&omap_mpuio_device); + platform_set_drvdata(&omap_mpuio_device, bank); + if (!registered) { + (void)platform_device_register(&omap_mpuio_device); + registered = true; + } } /*---------------------------------------------------------------------*/ @@ -1575,13 +1578,24 @@ static struct platform_driver omap_gpio_driver = { */ static int __init omap_gpio_drv_reg(void) { - return platform_driver_register(&omap_gpio_driver); + int ret; + + ret = platform_driver_register(&omap_mpuio_driver); + if (ret) + return ret; + + ret = platform_driver_register(&omap_gpio_driver); + if (ret) + platform_driver_unregister(&omap_mpuio_driver); + + return ret; } postcore_initcall(omap_gpio_drv_reg); static void __exit omap_gpio_exit(void) { platform_driver_unregister(&omap_gpio_driver); + platform_driver_unregister(&omap_mpuio_driver); } module_exit(omap_gpio_exit); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 8727ae54bc578b..f93a3dbb2daaf3 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -914,6 +914,8 @@ static void pca953x_irq_shutdown(struct irq_data *d) clear_bit(hwirq, chip->irq_trig_fall); clear_bit(hwirq, chip->irq_trig_level_low); clear_bit(hwirq, chip->irq_trig_level_high); + + pca953x_irq_mask(d); } static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p) diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index bae2061f15fc47..0fff4a699f12d1 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -164,12 +163,6 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip, unsigned long flags; u32 data = input ? 0 : 1; - - if (input) - pinctrl_gpio_direction_input(chip, offset); - else - pinctrl_gpio_direction_output(chip, offset); - raw_spin_lock_irqsave(&bank->slock, flags); rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr); raw_spin_unlock_irqrestore(&bank->slock, flags); @@ -593,7 +586,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank) gc->ngpio = bank->nr_pins; gc->label = bank->name; gc->parent = bank->dev; - gc->can_sleep = true; ret = gpiochip_add_data(gc, bank); if (ret) { diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index 413bcd0a424050..2cc8abe705cdb7 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -35,7 +35,7 @@ struct sprd_gpio { struct gpio_chip chip; void __iomem *base; - spinlock_t lock; + raw_spinlock_t lock; int irq; }; @@ -54,7 +54,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset, unsigned long flags; u32 tmp; - spin_lock_irqsave(&sprd_gpio->lock, flags); + raw_spin_lock_irqsave(&sprd_gpio->lock, flags); tmp = readl_relaxed(base + reg); if (val) @@ -63,7 +63,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset, tmp &= ~BIT(SPRD_GPIO_BIT(offset)); writel_relaxed(tmp, base + reg); - spin_unlock_irqrestore(&sprd_gpio->lock, flags); + raw_spin_unlock_irqrestore(&sprd_gpio->lock, flags); } static int sprd_gpio_read(struct gpio_chip *chip, unsigned int offset, u16 reg) @@ -236,7 +236,7 @@ static int sprd_gpio_probe(struct platform_device *pdev) if (IS_ERR(sprd_gpio->base)) return PTR_ERR(sprd_gpio->base); - spin_lock_init(&sprd_gpio->lock); + raw_spin_lock_init(&sprd_gpio->lock); sprd_gpio->chip.label = dev_name(&pdev->dev); sprd_gpio->chip.ngpio = SPRD_GPIO_NR; diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index 37f2ce20f1ae72..098e67d70ffa56 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -1682,10 +1682,10 @@ static void gpio_virtuser_device_config_group_release(struct config_item *item) { struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); - guard(mutex)(&dev->lock); - - if (gpio_virtuser_device_is_live(dev)) - gpio_virtuser_device_deactivate(dev); + scoped_guard(mutex, &dev->lock) { + if (gpio_virtuser_device_is_live(dev)) + gpio_virtuser_device_deactivate(dev); + } mutex_destroy(&dev->lock); ida_free(&gpio_virtuser_ida, dev->id); diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c index 83dd227dbbecc7..5e709ba35ec26a 100644 --- a/drivers/gpio/gpiolib-acpi-core.c +++ b/drivers/gpio/gpiolib-acpi-core.c @@ -1104,6 +1104,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, unsigned int pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; + u16 word, shift; bool found; mutex_lock(&achip->conn_lock); @@ -1158,10 +1159,22 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, mutex_unlock(&achip->conn_lock); - if (function == ACPI_WRITE) - gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i))); - else - *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; + /* + * For the cases when OperationRegion() consists of more than + * 64 bits calculate the word and bit shift to use that one to + * access the value. + */ + word = i / 64; + shift = i % 64; + + if (function == ACPI_WRITE) { + gpiod_set_raw_value_cansleep(desc, value[word] & BIT_ULL(shift)); + } else { + if (gpiod_get_raw_value_cansleep(desc)) + value[word] |= BIT_ULL(shift); + else + value[word] &= ~BIT_ULL(shift); + } } out: @@ -1346,6 +1359,7 @@ static int acpi_gpio_package_count(const union acpi_object *obj) while (element < end) { switch (element->type) { case ACPI_TYPE_LOCAL_REFERENCE: + case ACPI_TYPE_STRING: element += 3; fallthrough; case ACPI_TYPE_INTEGER: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b1c24c8fa68623..a51e76623bada0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1920,21 +1920,21 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( /* Make sure restore workers don't access the BO any more */ mutex_lock(&process_info->lock); - list_del(&mem->validate_list); + if (!list_empty(&mem->validate_list)) + list_del_init(&mem->validate_list); mutex_unlock(&process_info->lock); + ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); + if (unlikely(ret)) + return ret; + /* Cleanup user pages and MMU notifiers */ if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { amdgpu_hmm_unregister(mem->bo); - mutex_lock(&process_info->notifier_lock); amdgpu_hmm_range_free(mem->range); - mutex_unlock(&process_info->notifier_lock); + mem->range = NULL; } - ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); - if (unlikely(ret)) - return ret; - amdgpu_amdkfd_remove_eviction_fence(mem->bo, process_info->eviction_fence); pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6ccb80e2d7c9b5..39387da8586b4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2405,9 +2405,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) - amdgpu_aspm = 0; - if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) supports_atomic = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 7e623f91f2d7ca..d9c7ad297293bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -498,8 +498,13 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, if (adev->irq.retry_cam_enabled) return; + else if (adev->irq.ih1.ring_size) + ih = &adev->irq.ih1; + else if (adev->irq.ih_soft.enabled) + ih = &adev->irq.ih_soft; + else + return; - ih = &adev->irq.ih1; /* Get the WPTR of the last entry in IH ring */ last_wptr = amdgpu_ih_get_wptr(adev, ih); /* Order wptr with ring data. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 72ec455fa932ca..44f230d67da242 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -235,7 +235,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_ib_begin(ring); - if (ring->funcs->emit_gfx_shadow) + if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow) amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, init_shadow, vmid); @@ -291,7 +291,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, fence_flags | AMDGPU_FENCE_FLAG_64BIT); } - if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) { + if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec && + adev->gfx.cp_gfx_shadow) { amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d75b9940f24874..fc65fb36e115ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6879,7 +6879,7 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8a2ee2de390f3f..e642236ea2c511 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4201,7 +4201,7 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } @@ -6823,11 +6823,12 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; + bool use_mmio = false; int r; amdgpu_ring_reset_helper_begin(ring, timedout_fence); - r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio); if (r) { dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); @@ -6836,16 +6837,18 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, return r; } - r = gfx_v11_0_kgq_init_queue(ring, true); - if (r) { - dev_err(adev->dev, "failed to init kgq\n"); - return r; - } + if (use_mmio) { + r = gfx_v11_0_kgq_init_queue(ring, true); + if (r) { + dev_err(adev->dev, "failed to init kgq\n"); + return r; + } - r = amdgpu_mes_map_legacy_queue(adev, ring); - if (r) { - dev_err(adev->dev, "failed to remap kgq\n"); - return r; + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } } return amdgpu_ring_reset_helper_end(ring, timedout_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index b786967022d29d..4aab89a9ab4011 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -3079,7 +3079,7 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } @@ -5297,11 +5297,12 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; + bool use_mmio = false; int r; amdgpu_ring_reset_helper_begin(ring, timedout_fence); - r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio); if (r) { dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); r = gfx_v12_reset_gfx_pipe(ring); @@ -5309,16 +5310,18 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, return r; } - r = gfx_v12_0_kgq_init_queue(ring, true); - if (r) { - dev_err(adev->dev, "failed to init kgq\n"); - return r; - } + if (use_mmio) { + r = gfx_v12_0_kgq_init_queue(ring, true); + if (r) { + dev_err(adev->dev, "failed to init kgq\n"); + return r; + } - r = amdgpu_mes_map_legacy_queue(adev, ring); - if (r) { - dev_err(adev->dev, "failed to remap kgq\n"); - return r; + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } } return amdgpu_ring_reset_helper_end(ring, timedout_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 3a52754b5cadb3..ceddc953785fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1671,7 +1671,7 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) goto failure; - if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) { + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x52) { r = mes_v11_0_set_hw_resources_1(&adev->mes); if (r) { DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index ad36c96478a826..25536d89635d35 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -225,7 +225,13 @@ static u32 soc21_get_config_memsize(struct amdgpu_device *adev) static u32 soc21_get_xclk(struct amdgpu_device *adev) { - return adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; + + /* reference clock is actually 99.81 Mhz rather than 100 Mhz */ + if ((adev->flags & AMD_IS_APU) && reference_clock == 10000) + return 9981; + + return reference_clock; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index af53e796ea1baa..6ada7b4af7c685 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) page = pfn_to_page(pfn); svm_range_bo_ref(prange->svm_bo); page->zone_device_data = prange->svm_bo; - zone_device_page_init(page, 0); + zone_device_page_init(page, page_pgmap(page), 0); } static void diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ea5a250440f60..a8a59126b2d2b0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7754,10 +7754,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); /* Cancel and flush any pending HDMI HPD debounce work */ - cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); - if (aconnector->hdmi_prev_sink) { - dc_sink_release(aconnector->hdmi_prev_sink); - aconnector->hdmi_prev_sink = NULL; + if (aconnector->hdmi_hpd_debounce_delay_ms) { + cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } } if (aconnector->bl_idx != -1) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index 0690c346f2c521..227aa8672d17ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -105,9 +105,12 @@ void cm_helper_program_gamcor_xfer_func( #define NUMBER_REGIONS 32 #define NUMBER_SW_SEGMENTS 16 -bool cm3_helper_translate_curve_to_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params, bool fixpoint) +#define DC_LOGGER \ + ctx->logger + +bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, + const struct dc_transfer_func *output_tf, + struct pwl_params *lut_params, bool fixpoint) { struct curve_points3 *corner_points; struct pwl_result_data *rgb_resulted; @@ -163,6 +166,11 @@ bool cm3_helper_translate_curve_to_hw_format( hw_points += (1 << seg_distr[k]); } + // DCN3+ have 257 pts in lieu of no separate slope registers + // Prior HW had 256 base+slope pairs + // Shaper LUT (i.e. fixpoint == true) is still 256 bases and 256 deltas + hw_points = fixpoint ? (hw_points - 1) : hw_points; + j = 0; for (k = 0; k < (region_end - region_start); k++) { increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]); @@ -223,8 +231,6 @@ bool cm3_helper_translate_curve_to_hw_format( corner_points[1].green.slope = dc_fixpt_zero; corner_points[1].blue.slope = dc_fixpt_zero; - // DCN3+ have 257 pts in lieu of no separate slope registers - // Prior HW had 256 base+slope pairs lut_params->hw_points_num = hw_points + 1; k = 0; @@ -248,6 +254,10 @@ bool cm3_helper_translate_curve_to_hw_format( if (fixpoint == true) { i = 1; while (i != hw_points + 2) { + uint32_t red_clamp; + uint32_t green_clamp; + uint32_t blue_clamp; + if (i >= hw_points) { if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) rgb_plus_1->red = dc_fixpt_add(rgb->red, @@ -260,9 +270,20 @@ bool cm3_helper_translate_curve_to_hw_format( rgb_minus_1->delta_blue); } - rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); - rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); - rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); + + red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red); + green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green); + blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue); + + if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10) + DC_LOG_ERROR("Losing delta precision while programming shaper LUT."); + + rgb->delta_red_reg = red_clamp & 0x3ff; + rgb->delta_green_reg = green_clamp & 0x3ff; + rgb->delta_blue_reg = blue_clamp & 0x3ff; rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue); diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h index b86347c9b0389d..95f9318a54efce 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h @@ -59,7 +59,7 @@ void cm_helper_program_gamcor_xfer_func( const struct pwl_params *params, const struct dcn3_xfer_func_reg *reg); -bool cm3_helper_translate_curve_to_hw_format( +bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 81bcadf5e57ed7..f2d4cd527874df 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -239,7 +239,7 @@ bool dcn30_set_blend_lut( if (plane_state->blend_tf.type == TF_TYPE_HWPWL) blend_lut = &plane_state->blend_tf.pwl; else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { - result = cm3_helper_translate_curve_to_hw_format( + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, &plane_state->blend_tf, &dpp_base->regamma_params, false); if (!result) return result; @@ -334,8 +334,9 @@ bool dcn30_set_input_transfer_func(struct dc *dc, if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) params = &plane_state->in_transfer_func.pwl; else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) params = &dpp_base->degamma_params; result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -406,7 +407,7 @@ bool dcn30_set_output_transfer_func(struct dc *dc, params = &stream->out_transfer_func.pwl; else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format( + cm3_helper_translate_curve_to_hw_format(stream->ctx, &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index bf19ba65d09aa2..be1f3caf4096fe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -486,8 +486,9 @@ bool dcn32_set_mcm_luts( if (plane_state->blend_tf.type == TF_TYPE_HWPWL) lut_params = &plane_state->blend_tf.pwl; else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { - result = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, - &dpp_base->regamma_params, false); + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->blend_tf, + &dpp_base->regamma_params, false); if (!result) return result; @@ -501,9 +502,9 @@ bool dcn32_set_mcm_luts( lut_params = &plane_state->in_shaper_func.pwl; else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace - ASSERT(false); - cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, - &dpp_base->shaper_params, true); + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_shaper_func, + &dpp_base->shaper_params, true); lut_params = &dpp_base->shaper_params; } @@ -543,8 +544,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc, if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) params = &plane_state->in_transfer_func.pwl; else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) params = &dpp_base->degamma_params; dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -575,7 +577,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc, params = &stream->out_transfer_func.pwl; else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format( + cm3_helper_translate_curve_to_hw_format(stream->ctx, &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 2fbc22afb89c5a..5eda7648d0d2ba 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -430,7 +430,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - rval = cm3_helper_translate_curve_to_hw_format( + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, mcm_luts.lut1d_func, &dpp_base->regamma_params, false); m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; @@ -450,7 +450,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, m_lut_params.pwl = &mcm_luts.shaper->pwl; else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { ASSERT(false); - rval = cm3_helper_translate_curve_to_hw_format( + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, mcm_luts.shaper, &dpp_base->regamma_params, true); m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; @@ -627,8 +627,9 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, if (plane_state->blend_tf.type == TF_TYPE_HWPWL) lut_params = &plane_state->blend_tf.pwl; else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, - &dpp_base->regamma_params, false); + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->blend_tf, + &dpp_base->regamma_params, false); lut_params = rval ? &dpp_base->regamma_params : NULL; } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); @@ -639,8 +640,9 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, lut_params = &plane_state->in_shaper_func.pwl; else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, - &dpp_base->shaper_params, true); + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_shaper_func, + &dpp_base->shaper_params, true); lut_params = rval ? &dpp_base->shaper_params : NULL; } result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); @@ -674,7 +676,7 @@ bool dcn401_set_output_transfer_func(struct dc *dc, params = &stream->out_transfer_func.pwl; else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format( + cm3_helper_translate_curve_to_hw_format(stream->ctx, &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 79b174e5326da6..302af1fb6901e7 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -80,15 +80,15 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; + mutex_lock(&adev->pm.mutex); + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && (!is_vcn || adev->vcn.num_vcn_inst == 1)) { dev_dbg(adev->dev, "IP block%d already in the target %s state!", block_type, gate ? "gate" : "ungate"); - return 0; + goto out_unlock; } - mutex_lock(&adev->pm.mutex); - switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: @@ -115,6 +115,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, if (!ret) atomic_set(&adev->pm.pwr_state[block_type], pwr_state); +out_unlock: mutex_unlock(&adev->pm.mutex); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 4263798d716b76..8e592a477c33a1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -56,6 +56,7 @@ #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) +#define SMU_V13_SOFT_FREQ_ROUND(x) ((x) + 1) extern const int pmfw_decoded_link_speed[5]; extern const int pmfw_decoded_link_width[7]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 29a4583db8734b..0b1e6f25e61129 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -57,6 +57,7 @@ extern const int decoded_link_width[8]; #define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx]) #define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx]) +#define SMU_V14_SOFT_FREQ_ROUND(x) ((x) + 1) struct smu_14_0_max_sustainable_clocks { uint32_t display_clock; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a89075e257174a..2efd914d81e5b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1555,6 +1555,7 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { + max = SMU_V13_SOFT_FREQ_ROUND(max); if (automatic) param = (uint32_t)((clk_id << 16) | 0xffff); else diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index f2a16dfee59981..06a81533759cda 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1178,6 +1178,7 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { + max = SMU_V14_SOFT_FREQ_ROUND(max); if (automatic) param = (uint32_t)((clk_id << 16) | 0xffff); else diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c index 8d13a35b206a42..cc221483ef0d5a 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ struct imx8mp_hdmi_pai { struct regmap *regmap; + struct device *dev; }; static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, @@ -43,6 +45,9 @@ static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; int val; + if (pm_runtime_resume_and_get(hdmi_pai->dev) < 0) + return; + /* PAI set control extended */ val = WTMK_HIGH(3) | WTMK_LOW(3); val |= NUM_CH(channel); @@ -85,6 +90,8 @@ static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi) /* Stop PAI */ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); + + pm_runtime_put_sync(hdmi_pai->dev); } static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { @@ -101,6 +108,7 @@ static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void struct imx8mp_hdmi_pai *hdmi_pai; struct resource *res; void __iomem *base; + int ret; hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); if (!hdmi_pai) @@ -121,6 +129,13 @@ static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void plat_data->disable_audio = imx8mp_hdmi_pai_disable; plat_data->priv_audio = hdmi_pai; + hdmi_pai->dev = dev; + ret = devm_pm_runtime_enable(dev); + if (ret < 0) { + dev_err(dev, "failed to enable PM runtime: %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e4df434273945e..25f68fed9b4823 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -960,16 +960,21 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, { struct drm_gem_change_handle *args = data; struct drm_gem_object *obj; - int ret; + int handle, ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; + /* idr_alloc() limitation. */ + if (args->new_handle > INT_MAX) + return -EINVAL; + handle = args->new_handle; + obj = drm_gem_object_lookup(file_priv, args->handle); if (!obj) return -ENOENT; - if (args->handle == args->new_handle) { + if (args->handle == handle) { ret = 0; goto out; } @@ -977,18 +982,19 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, mutex_lock(&file_priv->prime.lock); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, - args->new_handle, args->new_handle + 1, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1, + GFP_NOWAIT); spin_unlock(&file_priv->table_lock); if (ret < 0) goto out_unlock; if (obj->dma_buf) { - ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); + ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, + handle); if (ret < 0) { spin_lock(&file_priv->table_lock); - idr_remove(&file_priv->object_idr, args->new_handle); + idr_remove(&file_priv->object_idr, handle); spin_unlock(&file_priv->table_lock); goto out_unlock; } diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index 06c1bd8fc4d170..704f2f94501909 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -197,7 +197,7 @@ static void drm_pagemap_get_devmem_page(struct page *page, struct drm_pagemap_zdd *zdd) { page->zone_device_data = drm_pagemap_zdd_get(zdd); - zone_device_page_init(page, 0); + zone_device_page_init(page, page_pgmap(page), 0); } /** diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 3a946b4720649a..c224c7ff353ce3 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -250,7 +250,6 @@ static irqreturn_t gma_irq_handler(int irq, void *arg) void gma_irq_preinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct drm_crtc *crtc; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -261,15 +260,10 @@ void gma_irq_preinstall(struct drm_device *dev) PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); - drm_for_each_crtc(crtc, dev) { - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); - - if (vblank->enabled) { - u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG : - _PSB_VSYNC_PIPEA_FLAG; - dev_priv->vdc_irq_mask |= mask; - } - } + if (dev->vblank[0].enabled) + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; + if (dev->vblank[1].enabled) + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; /* Revisit this area - want per device masks ? */ if (dev_priv->ops->hotplug) @@ -284,8 +278,8 @@ void gma_irq_preinstall(struct drm_device *dev) void gma_irq_postinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct drm_crtc *crtc; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -298,13 +292,11 @@ void gma_irq_postinstall(struct drm_device *dev) PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - drm_for_each_crtc(crtc, dev) { - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); - - if (vblank->enabled) - gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); else - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); } if (dev_priv->ops->hotplug_enable) @@ -345,8 +337,8 @@ void gma_irq_uninstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); - struct drm_crtc *crtc; unsigned long irqflags; + unsigned int i; if (!dev_priv->irq_enabled) return; @@ -358,11 +350,9 @@ void gma_irq_uninstall(struct drm_device *dev) PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - drm_for_each_crtc(crtc, dev) { - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); - - if (vblank->enabled) - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); } dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index c5c6e070cc063d..e861b8b9d8fa81 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -528,6 +528,13 @@ static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, }; +static void imx_tve_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int imx_tve_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -549,6 +556,12 @@ static int imx_tve_probe(struct platform_device *pdev) if (ddc_node) { tve->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); + if (tve->ddc) { + ret = devm_add_action_or_reset(dev, imx_tve_put_device, + &tve->ddc->dev); + if (ret) + return ret; + } } tve->mode = of_get_tve_mode(np); diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c index a689c71ff1653d..bbdeb791c5b38b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_bmc.c +++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include #include #include @@ -12,7 +13,7 @@ void mgag200_bmc_stop_scanout(struct mga_device *mdev) { u8 tmp; - int iter_max; + int ret; /* * 1 - The first step is to inform the BMC of an upcoming mode @@ -42,30 +43,22 @@ void mgag200_bmc_stop_scanout(struct mga_device *mdev) /* * 3a- The third step is to verify if there is an active scan. - * We are waiting for a 0 on remhsyncsts ). + * We are waiting for a 0 on remhsyncsts (). */ - iter_max = 300; - while (!(tmp & 0x1) && iter_max) { - WREG8(DAC_INDEX, MGA1064_SPAREREG); - tmp = RREG8(DAC_DATA); - udelay(1000); - iter_max--; - } + ret = read_poll_timeout(RREG_DAC, tmp, !(tmp & 0x1), + 1000, 300000, false, + MGA1064_SPAREREG); + if (ret == -ETIMEDOUT) + return; /* - * 3b- This step occurs only if the remove is actually + * 3b- This step occurs only if the remote BMC is actually * scanning. We are waiting for the end of the frame which is * a 1 on remvsyncsts (XSPAREREG<1>) */ - if (iter_max) { - iter_max = 300; - while ((tmp & 0x2) && iter_max) { - WREG8(DAC_INDEX, MGA1064_SPAREREG); - tmp = RREG8(DAC_DATA); - udelay(1000); - iter_max--; - } - } + (void)read_poll_timeout(RREG_DAC, tmp, (tmp & 0x2), + 1000, 300000, false, + MGA1064_SPAREREG); } void mgag200_bmc_start_scanout(struct mga_device *mdev) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index f4bf40cd7c88a8..a875c4bf8cbe4c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -111,6 +111,12 @@ #define DAC_INDEX 0x3c00 #define DAC_DATA 0x3c0a +#define RREG_DAC(reg) \ + ({ \ + WREG8(DAC_INDEX, reg); \ + RREG8(DAC_DATA); \ + }) \ + #define WREG_DAC(reg, v) \ do { \ WREG8(DAC_INDEX, reg); \ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index ac9a95aab2fb56..4c042133261c96 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -501,8 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = { {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, - {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111}, - {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555}, {} }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 03f1d564eb124d..b698c74306f8fe 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h @@ -11,7 +11,7 @@ struct nvif_client { int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *); void nvif_client_dtor(struct nvif_client *); -int nvif_client_suspend(struct nvif_client *); +int nvif_client_suspend(struct nvif_client *, bool); int nvif_client_resume(struct nvif_client *); /*XXX*/ diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 7b08ff769039a3..61c8a177b28fdc 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h @@ -8,7 +8,7 @@ struct nvif_driver { const char *name; int (*init)(const char *name, u64 device, const char *cfg, const char *dbg, void **priv); - int (*suspend)(void *priv); + int (*suspend)(void *priv, bool runtime); int (*resume)(void *priv); int (*ioctl)(void *priv, void *data, u32 size, void **hack); void __iomem *(*map)(void *priv, u64 handle, u32 size); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 99579e7b937605..954a89d43bada1 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -2,6 +2,7 @@ #ifndef __NVKM_DEVICE_H__ #define __NVKM_DEVICE_H__ #include +#include #include enum nvkm_subdev_type; @@ -93,7 +94,7 @@ struct nvkm_device_func { void *(*dtor)(struct nvkm_device *); int (*preinit)(struct nvkm_device *); int (*init)(struct nvkm_device *); - void (*fini)(struct nvkm_device *, bool suspend); + void (*fini)(struct nvkm_device *, enum nvkm_suspend_state suspend); int (*irq)(struct nvkm_device *); resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id); resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index 738899fcf30b6a..1e97be6c65644e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h @@ -20,7 +20,7 @@ struct nvkm_engine_func { int (*oneinit)(struct nvkm_engine *); int (*info)(struct nvkm_engine *, u64 mthd, u64 *data); int (*init)(struct nvkm_engine *); - int (*fini)(struct nvkm_engine *, bool suspend); + int (*fini)(struct nvkm_engine *, enum nvkm_suspend_state suspend); int (*reset)(struct nvkm_engine *); int (*nonstall)(struct nvkm_engine *); void (*intr)(struct nvkm_engine *); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h index 10107ef3ca49ef..54d356154274f9 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h @@ -2,6 +2,7 @@ #ifndef __NVKM_OBJECT_H__ #define __NVKM_OBJECT_H__ #include +#include struct nvkm_event; struct nvkm_gpuobj; struct nvkm_uevent; @@ -27,7 +28,7 @@ enum nvkm_object_map { struct nvkm_object_func { void *(*dtor)(struct nvkm_object *); int (*init)(struct nvkm_object *); - int (*fini)(struct nvkm_object *, bool suspend); + int (*fini)(struct nvkm_object *, enum nvkm_suspend_state suspend); int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size); int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **); int (*map)(struct nvkm_object *, void *argv, u32 argc, @@ -49,7 +50,7 @@ int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size, void nvkm_object_del(struct nvkm_object **); void *nvkm_object_dtor(struct nvkm_object *); int nvkm_object_init(struct nvkm_object *); -int nvkm_object_fini(struct nvkm_object *, bool suspend); +int nvkm_object_fini(struct nvkm_object *, enum nvkm_suspend_state); int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size); int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **); int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc, diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h index 0e70a9afba33d4..cf66aee4d111d2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h @@ -13,7 +13,7 @@ struct nvkm_oproxy { struct nvkm_oproxy_func { void (*dtor[2])(struct nvkm_oproxy *); int (*init[2])(struct nvkm_oproxy *); - int (*fini[2])(struct nvkm_oproxy *, bool suspend); + int (*fini[2])(struct nvkm_oproxy *, enum nvkm_suspend_state suspend); }; void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *, diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h index bce6e1ba09ea04..bd6b1b658e40f3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h @@ -40,7 +40,7 @@ struct nvkm_subdev_func { int (*oneinit)(struct nvkm_subdev *); int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data); int (*init)(struct nvkm_subdev *); - int (*fini)(struct nvkm_subdev *, bool suspend); + int (*fini)(struct nvkm_subdev *, enum nvkm_suspend_state suspend); void (*intr)(struct nvkm_subdev *); }; @@ -65,7 +65,7 @@ void nvkm_subdev_unref(struct nvkm_subdev *); int nvkm_subdev_preinit(struct nvkm_subdev *); int nvkm_subdev_oneinit(struct nvkm_subdev *); int nvkm_subdev_init(struct nvkm_subdev *); -int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); +int nvkm_subdev_fini(struct nvkm_subdev *, enum nvkm_suspend_state suspend); int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *); void nvkm_subdev_intr(struct nvkm_subdev *); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/suspend_state.h b/drivers/gpu/drm/nouveau/include/nvkm/core/suspend_state.h new file mode 100644 index 00000000000000..134120fb71f4eb --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/suspend_state.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_SUSPEND_STATE_H__ +#define __NVKM_SUSPEND_STATE_H__ + +enum nvkm_suspend_state { + NVKM_POWEROFF, + NVKM_SUSPEND, + NVKM_RUNTIME_SUSPEND, +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index b8b97e10ae83ed..64fed208e4cf85 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -44,6 +44,9 @@ typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 rep * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the * caller after the GSP RPC command is issued. * + * NVKM_GSP_RPC_REPLY_NOSEQ - If specified, exactly like NOWAIT + * but don't emit RPC sequence number. + * * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP * RPC message after the GSP RPC command is issued. * @@ -53,6 +56,7 @@ typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 rep */ enum nvkm_gsp_rpc_reply_policy { NVKM_GSP_RPC_REPLY_NOWAIT = 0, + NVKM_GSP_RPC_REPLY_NOSEQ, NVKM_GSP_RPC_REPLY_RECV, NVKM_GSP_RPC_REPLY_POLL, }; @@ -242,6 +246,8 @@ struct nvkm_gsp { /* The size of the registry RPC */ size_t registry_rpc_size; + u32 rpc_seq; + #ifdef CONFIG_DEBUG_FS /* * Logging buffers in debugfs. The wrapper objects need to remain diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 829c2b573971cb..00515623a2cc72 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -352,8 +352,6 @@ nouveau_user_framebuffer_create(struct drm_device *dev, static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { .fb_create = nouveau_user_framebuffer_create, - .atomic_commit = drm_atomic_helper_commit, - .atomic_check = drm_atomic_helper_check, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 58071652679d8a..3d8031296eed64 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -425,7 +425,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large) order = ilog2(DMEM_CHUNK_NPAGES); } - zone_device_folio_init(folio, order); + zone_device_folio_init(folio, page_pgmap(folio_page(folio, 0)), order); return page; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1527b801f013aa..dc469e571c0aa4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -983,7 +983,7 @@ nouveau_do_suspend(struct nouveau_drm *drm, bool runtime) } NV_DEBUG(drm, "suspending object tree...\n"); - ret = nvif_client_suspend(&drm->_client); + ret = nvif_client_suspend(&drm->_client, runtime); if (ret) goto fail_client; diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index adb802421fda58..eeb4ebbc16bf6a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c @@ -62,10 +62,16 @@ nvkm_client_resume(void *priv) } static int -nvkm_client_suspend(void *priv) +nvkm_client_suspend(void *priv, bool runtime) { struct nvkm_client *client = priv; - return nvkm_object_fini(&client->object, true); + enum nvkm_suspend_state state; + + if (runtime) + state = NVKM_RUNTIME_SUSPEND; + else + state = NVKM_SUSPEND; + return nvkm_object_fini(&client->object, state); } static int diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c index fdf5054ed7d83c..36d3c99786bd5c 100644 --- a/drivers/gpu/drm/nouveau/nvif/client.c +++ b/drivers/gpu/drm/nouveau/nvif/client.c @@ -30,9 +30,9 @@ #include int -nvif_client_suspend(struct nvif_client *client) +nvif_client_suspend(struct nvif_client *client, bool runtime) { - return client->driver->suspend(client->object.priv); + return client->driver->suspend(client->object.priv, runtime); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index 36a31e9eea228e..5bf62940d7be1e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -41,7 +41,7 @@ nvkm_engine_reset(struct nvkm_engine *engine) if (engine->func->reset) return engine->func->reset(engine); - nvkm_subdev_fini(&engine->subdev, false); + nvkm_subdev_fini(&engine->subdev, NVKM_POWEROFF); return nvkm_subdev_init(&engine->subdev); } @@ -98,7 +98,7 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) } static int -nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_engine_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_engine *engine = nvkm_engine(subdev); if (engine->func->fini) diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index 45051a1249daca..b8fc9be678515f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -141,7 +141,7 @@ nvkm_ioctl_new(struct nvkm_client *client, } ret = -EEXIST; } - nvkm_object_fini(object, false); + nvkm_object_fini(object, NVKM_POWEROFF); } nvkm_object_del(&object); @@ -160,7 +160,7 @@ nvkm_ioctl_del(struct nvkm_client *client, nvif_ioctl(object, "delete size %d\n", size); if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { nvif_ioctl(object, "delete\n"); - nvkm_object_fini(object, false); + nvkm_object_fini(object, NVKM_POWEROFF); nvkm_object_del(&object); } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index 390c265cf8af42..af9f00f74c2886 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c @@ -142,13 +142,25 @@ nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj, } int -nvkm_object_fini(struct nvkm_object *object, bool suspend) +nvkm_object_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { - const char *action = suspend ? "suspend" : "fini"; + const char *action; struct nvkm_object *child; s64 time; int ret; + switch (suspend) { + case NVKM_POWEROFF: + default: + action = "fini"; + break; + case NVKM_SUSPEND: + action = "suspend"; + break; + case NVKM_RUNTIME_SUSPEND: + action = "runtime"; + break; + } nvif_debug(object, "%s children...\n", action); time = ktime_to_us(ktime_get()); list_for_each_entry_reverse(child, &object->tree, head) { @@ -212,11 +224,11 @@ nvkm_object_init(struct nvkm_object *object) fail_child: list_for_each_entry_continue_reverse(child, &object->tree, head) - nvkm_object_fini(child, false); + nvkm_object_fini(child, NVKM_POWEROFF); fail: nvif_error(object, "init failed with %d\n", ret); if (object->func->fini) - object->func->fini(object, false); + object->func->fini(object, NVKM_POWEROFF); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c index 5db80d1780f089..7c9edf7527686f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c @@ -87,7 +87,7 @@ nvkm_oproxy_uevent(struct nvkm_object *object, void *argv, u32 argc, } static int -nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) +nvkm_oproxy_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_oproxy *oproxy = nvkm_oproxy(object); int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index 6c20e827a069a5..b7045d1c841586 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -51,12 +51,24 @@ nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) } int -nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_subdev_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_device *device = subdev->device; - const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset"; + const char *action; s64 time; + switch (suspend) { + case NVKM_POWEROFF: + default: + action = subdev->use.enabled ? "fini" : "reset"; + break; + case NVKM_SUSPEND: + action = "suspend"; + break; + case NVKM_RUNTIME_SUSPEND: + action = "runtime"; + break; + } nvkm_trace(subdev, "%s running...\n", action); time = ktime_to_us(ktime_get()); @@ -186,7 +198,7 @@ void nvkm_subdev_unref(struct nvkm_subdev *subdev) { if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) { - nvkm_subdev_fini(subdev, false); + nvkm_subdev_fini(subdev, NVKM_POWEROFF); mutex_unlock(&subdev->use.mutex); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c index cc254c390a5719..46beb6e470ee36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c @@ -73,7 +73,7 @@ nvkm_uevent_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) } static int -nvkm_uevent_fini(struct nvkm_object *object, bool suspend) +nvkm_uevent_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_uevent *uevent = nvkm_uevent(object); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c index 1c0c6013870602..1a3caf69760827 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c @@ -46,7 +46,7 @@ ga100_ce_nonstall(struct nvkm_engine *engine) } int -ga100_ce_fini(struct nvkm_engine *engine, bool suspend) +ga100_ce_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { nvkm_inth_block(&engine->subdev.inth); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index 34fd2657134b81..f07b4585331043 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -14,7 +14,7 @@ extern const struct nvkm_object_func gv100_ce_cclass; int ga100_ce_oneinit(struct nvkm_engine *); int ga100_ce_init(struct nvkm_engine *); -int ga100_ce_fini(struct nvkm_engine *, bool); +int ga100_ce_fini(struct nvkm_engine *, enum nvkm_suspend_state); int ga100_ce_nonstall(struct nvkm_engine *); u32 gb202_ce_grce_mask(struct nvkm_device *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 2517b65d8faad9..b101e14f841e03 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2936,13 +2936,25 @@ nvkm_device_engine(struct nvkm_device *device, int type, int inst) } int -nvkm_device_fini(struct nvkm_device *device, bool suspend) +nvkm_device_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) { - const char *action = suspend ? "suspend" : "fini"; + const char *action; struct nvkm_subdev *subdev; int ret; s64 time; + switch (suspend) { + case NVKM_POWEROFF: + default: + action = "fini"; + break; + case NVKM_SUSPEND: + action = "suspend"; + break; + case NVKM_RUNTIME_SUSPEND: + action = "runtime"; + break; + } nvdev_trace(device, "%s running...\n", action); time = ktime_to_us(ktime_get()); @@ -3032,7 +3044,7 @@ nvkm_device_init(struct nvkm_device *device) if (ret) return ret; - nvkm_device_fini(device, false); + nvkm_device_fini(device, NVKM_POWEROFF); nvdev_trace(device, "init running...\n"); time = ktime_to_us(ktime_get()); @@ -3060,9 +3072,9 @@ nvkm_device_init(struct nvkm_device *device) fail_subdev: list_for_each_entry_from(subdev, &device->subdev, head) - nvkm_subdev_fini(subdev, false); + nvkm_subdev_fini(subdev, NVKM_POWEROFF); fail: - nvkm_device_fini(device, false); + nvkm_device_fini(device, NVKM_POWEROFF); nvdev_error(device, "init failed with %d\n", ret); return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 8f0261a0d618bf..4c29b60460d48d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1605,10 +1605,10 @@ nvkm_device_pci_irq(struct nvkm_device *device) } static void -nvkm_device_pci_fini(struct nvkm_device *device, bool suspend) +nvkm_device_pci_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) { struct nvkm_device_pci *pdev = nvkm_device_pci(device); - if (suspend) { + if (suspend != NVKM_POWEROFF) { pci_disable_device(pdev->pdev); pdev->suspend = true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index 75ee7506d4432b..d0c40f0342442d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -56,5 +56,5 @@ int nvkm_device_ctor(const struct nvkm_device_func *, const char *name, const char *cfg, const char *dbg, struct nvkm_device *); int nvkm_device_init(struct nvkm_device *); -int nvkm_device_fini(struct nvkm_device *, bool suspend); +int nvkm_device_fini(struct nvkm_device *, enum nvkm_suspend_state suspend); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 58191b7a0494ef..32ff3181f47b67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -218,7 +218,7 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc, } static int -nvkm_udevice_fini(struct nvkm_object *object, bool suspend) +nvkm_udevice_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_udevice *udev = nvkm_udevice(object); struct nvkm_device *device = udev->device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index b24eb1e560bc7c..84745f60912e0b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -99,13 +99,13 @@ nvkm_disp_intr(struct nvkm_engine *engine) } static int -nvkm_disp_fini(struct nvkm_engine *engine, bool suspend) +nvkm_disp_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_disp *disp = nvkm_disp(engine); struct nvkm_outp *outp; if (disp->func->fini) - disp->func->fini(disp, suspend); + disp->func->fini(disp, suspend != NVKM_POWEROFF); list_for_each_entry(outp, &disp->outps, head) { if (outp->func->fini) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c index 9b84e357d3544e..57a62a2de7c7da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c @@ -128,7 +128,7 @@ nvkm_disp_chan_child_get(struct nvkm_object *object, int index, struct nvkm_ocla } static int -nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend) +nvkm_disp_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_disp_chan *chan = nvkm_disp_chan(object); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index fd5ee9f0af360c..cf8e356867b4fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c @@ -93,13 +93,13 @@ nvkm_falcon_intr(struct nvkm_engine *engine) } static int -nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) +nvkm_falcon_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_falcon *falcon = nvkm_falcon(engine); struct nvkm_device *device = falcon->engine.subdev.device; const u32 base = falcon->addr; - if (!suspend) { + if (suspend == NVKM_POWEROFF) { nvkm_memory_unref(&falcon->core); if (falcon->external) { vfree(falcon->data.data); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 6fd4e60634fbe4..1561287a32f208 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -122,7 +122,7 @@ nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_dev } static int -nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) +nvkm_fifo_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_fifo *fifo = nvkm_fifo(engine); struct nvkm_runl *runl; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c index 52420a1edca52a..c978b97e10c64a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c @@ -72,7 +72,7 @@ struct nvkm_uobj { }; static int -nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend) +nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, enum nvkm_suspend_state suspend) { struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy); struct nvkm_chan *chan = uobj->chan; @@ -87,7 +87,7 @@ nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend) nvkm_chan_cctx_bind(chan, ectx->engn, NULL); if (refcount_dec_and_test(&ectx->uses)) - nvkm_object_fini(ectx->object, false); + nvkm_object_fini(ectx->object, NVKM_POWEROFF); mutex_unlock(&chan->cgrp->mutex); } @@ -269,7 +269,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc, } static int -nvkm_uchan_fini(struct nvkm_object *object, bool suspend) +nvkm_uchan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_chan *chan = nvkm_uchan(object)->chan; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c index f5e68f09df768d..cd4908b1b4dfff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c @@ -168,11 +168,11 @@ nvkm_gr_init(struct nvkm_engine *engine) } static int -nvkm_gr_fini(struct nvkm_engine *engine, bool suspend) +nvkm_gr_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_gr *gr = nvkm_gr(engine); if (gr->func->fini) - return gr->func->fini(gr, suspend); + return gr->func->fini(gr, suspend != NVKM_POWEROFF); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 3ea447f6a45b51..3608215f0f11c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -2330,7 +2330,7 @@ gf100_gr_reset(struct nvkm_gr *base) WARN_ON(gf100_gr_fecs_halt_pipeline(gr)); - subdev->func->fini(subdev, false); + subdev->func->fini(subdev, NVKM_POWEROFF); nvkm_mc_disable(device, subdev->type, subdev->inst); if (gr->func->gpccs.reset) gr->func->gpccs.reset(gr); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c index ca822f07b63e9e..82937df8b8c0d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c @@ -1158,7 +1158,7 @@ nv04_gr_chan_dtor(struct nvkm_object *object) } static int -nv04_gr_chan_fini(struct nvkm_object *object, bool suspend) +nv04_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nv04_gr_chan *chan = nv04_gr_chan(object); struct nv04_gr *gr = chan->gr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c index 92ef7c9b29101c..fcb4e4fce83f7a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c @@ -951,7 +951,7 @@ nv10_gr_context_switch(struct nv10_gr *gr) } static int -nv10_gr_chan_fini(struct nvkm_object *object, bool suspend) +nv10_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nv10_gr_chan *chan = nv10_gr_chan(object); struct nv10_gr *gr = chan->gr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 13407fafe94797..ab57b3b40228e8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -27,7 +27,7 @@ nv20_gr_chan_init(struct nvkm_object *object) } int -nv20_gr_chan_fini(struct nvkm_object *object, bool suspend) +nv20_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nv20_gr_chan *chan = nv20_gr_chan(object); struct nv20_gr *gr = chan->gr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h index c0d2be53413e0d..786c7832f7acdb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h @@ -31,5 +31,5 @@ struct nv20_gr_chan { void *nv20_gr_chan_dtor(struct nvkm_object *); int nv20_gr_chan_init(struct nvkm_object *); -int nv20_gr_chan_fini(struct nvkm_object *, bool); +int nv20_gr_chan_fini(struct nvkm_object *, enum nvkm_suspend_state); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index b609b0150ba140..e3e797cf30349e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -89,7 +89,7 @@ nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, } static int -nv40_gr_chan_fini(struct nvkm_object *object, bool suspend) +nv40_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nv40_gr_chan *chan = nv40_gr_chan(object); struct nv40_gr *gr = chan->gr; @@ -101,7 +101,7 @@ nv40_gr_chan_fini(struct nvkm_object *object, bool suspend) nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); if (nvkm_rd32(device, 0x40032c) == inst) { - if (suspend) { + if (suspend != NVKM_POWEROFF) { nvkm_wr32(device, 0x400720, 0x00000000); nvkm_wr32(device, 0x400784, inst); nvkm_mask(device, 0x400310, 0x00000020, 0x00000020); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index 4b1374adbda3a1..38146f9cc81c4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c @@ -65,7 +65,7 @@ nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, } static int -nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend) +nv44_mpeg_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c index f2c60da5d1e86f..3e4d6a680ee954 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c @@ -37,7 +37,7 @@ nvkm_sec2_finimsg(void *priv, struct nvfw_falcon_msg *hdr) } static int -nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) +nvkm_sec2_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_sec2 *sec2 = nvkm_sec2(engine); struct nvkm_subdev *subdev = &sec2->engine.subdev; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c index f7d3ba0afb5564..910a5bb2d191db 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c @@ -76,7 +76,7 @@ nvkm_xtensa_intr(struct nvkm_engine *engine) } static int -nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) +nvkm_xtensa_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) { struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); struct nvkm_device *device = xtensa->engine.subdev.device; @@ -85,7 +85,7 @@ nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ - if (!suspend) + if (suspend == NVKM_POWEROFF) nvkm_memory_unref(&xtensa->gpu_fw); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c index 9b8ca4e898f903..13d829593180c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c @@ -182,7 +182,7 @@ nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) } static int -nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_acr_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { if (!subdev->use.enabled) return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c index 91bc53be97ffc2..7dee55bf9ada9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c @@ -90,7 +90,7 @@ nvkm_bar_bar2_init(struct nvkm_device *device) } static int -nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_bar_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_bar *bar = nvkm_bar(subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c index 178dc56909c274..71420f81714b35 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c @@ -577,7 +577,7 @@ nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) } static int -nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_clk_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_clk *clk = nvkm_clk(subdev); flush_work(&clk->work); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c index 3d9319c319c647..ad5ec9ee1294db 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c @@ -67,11 +67,11 @@ nvkm_devinit_post(struct nvkm_devinit *init) } static int -nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_devinit_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_devinit *init = nvkm_devinit(subdev); /* force full reinit on resume */ - if (suspend) + if (suspend != NVKM_POWEROFF) init->post = true; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index b53ac9a2552fa7..d8d32bb5bcd9e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -51,7 +51,7 @@ nvkm_fault_intr(struct nvkm_subdev *subdev) } static int -nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_fault_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_fault *fault = nvkm_fault(subdev); if (fault->func->fini) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c index cd2fbc0472d8e7..8ab052d18e5d36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c @@ -56,7 +56,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc, } static int -nvkm_ufault_fini(struct nvkm_object *object, bool suspend) +nvkm_ufault_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) { struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); buffer->fault->func->buffer.fini(buffer); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index b196baa376dcdb..b2c34878a68f63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -144,7 +144,7 @@ nvkm_gpio_intr(struct nvkm_subdev *subdev) } static int -nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_gpio_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_gpio *gpio = nvkm_gpio(subdev); u32 mask = (1ULL << gpio->func->lines) - 1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 7ccb4176106653..30cb843ba35cdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -48,7 +48,7 @@ nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst) } static int -nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_gsp_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_gsp *gsp = nvkm_gsp(subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c index b0dd5fce7bad36..88436a26417799 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c @@ -17,7 +17,7 @@ #include int -gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +gh100_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) { struct nvkm_falcon *falcon = &gsp->falcon; int ret, time = 4000; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 9dd66a2e38017b..71b7203bef5073 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -59,7 +59,7 @@ struct nvkm_gsp_func { void (*dtor)(struct nvkm_gsp *); int (*oneinit)(struct nvkm_gsp *); int (*init)(struct nvkm_gsp *); - int (*fini)(struct nvkm_gsp *, bool suspend); + int (*fini)(struct nvkm_gsp *, enum nvkm_suspend_state suspend); int (*reset)(struct nvkm_gsp *); struct { @@ -75,7 +75,7 @@ int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *); void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *); int tu102_gsp_oneinit(struct nvkm_gsp *); int tu102_gsp_init(struct nvkm_gsp *); -int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); +int tu102_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); int tu102_gsp_reset(struct nvkm_gsp *); u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); @@ -87,12 +87,12 @@ int ga102_gsp_reset(struct nvkm_gsp *); int gh100_gsp_oneinit(struct nvkm_gsp *); int gh100_gsp_init(struct nvkm_gsp *); -int gh100_gsp_fini(struct nvkm_gsp *, bool suspend); +int gh100_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); void r535_gsp_dtor(struct nvkm_gsp *); int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); -int r535_gsp_fini(struct nvkm_gsp *, bool suspend); +int r535_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index 150e22fde2ac20..e962d0e8f83777 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -208,7 +208,7 @@ r535_fbsr_resume(struct nvkm_gsp *gsp) } static int -r535_fbsr_suspend(struct nvkm_gsp *gsp) +r535_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 2a7e80c6d70f39..7fb13434c051de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -704,7 +704,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) build_registry(gsp, rpc); - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOSEQ); fail: clean_registry(gsp); @@ -921,7 +921,7 @@ r535_gsp_set_system_info(struct nvkm_gsp *gsp) info->pciConfigMirrorSize = device->pci->func->cfg.size; r535_gsp_acpi_info(gsp, &info->acpiMethodData); - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); } static int @@ -1721,7 +1721,7 @@ r535_gsp_sr_data_size(struct nvkm_gsp *gsp) } int -r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +r535_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) { struct nvkm_rm *rm = gsp->rm; int ret; @@ -1748,7 +1748,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; sr->sizeOfSuspendResumeData = len; - ret = rm->api->fbsr->suspend(gsp); + ret = rm->api->fbsr->suspend(gsp, suspend == NVKM_RUNTIME_SUSPEND); if (ret) { nvkm_gsp_mem_dtor(&gsp->sr.meta); nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c index 0dc4782df8c0c1..3ca3de8f434082 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -557,6 +557,7 @@ r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, switch (policy) { case NVKM_GSP_RPC_REPLY_NOWAIT: + case NVKM_GSP_RPC_REPLY_NOSEQ: break; case NVKM_GSP_RPC_REPLY_RECV: reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); @@ -588,6 +589,11 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, rpc->data, rpc->length - sizeof(*rpc), true); } + if (policy == NVKM_GSP_RPC_REPLY_NOSEQ) + rpc->sequence = 0; + else + rpc->sequence = gsp->rpc_seq++; + ret = r535_gsp_cmdq_push(gsp, rpc); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c index 2945d5b4e57070..8ef8b4f6558830 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c @@ -62,7 +62,7 @@ r570_fbsr_resume(struct nvkm_gsp *gsp) } static int -r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) +r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, bool runtime) { NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; struct nvkm_gsp_object memlist; @@ -81,7 +81,7 @@ r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) ctrl->hClient = gsp->internal.client.object.handle; ctrl->hSysMem = memlist.handle; ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; - ctrl->bEnteringGcoffState = 1; + ctrl->bEnteringGcoffState = runtime ? 1 : 0; ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); if (ret) @@ -92,7 +92,7 @@ r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) } static int -r570_fbsr_suspend(struct nvkm_gsp *gsp) +r570_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; @@ -133,7 +133,7 @@ r570_fbsr_suspend(struct nvkm_gsp *gsp) return ret; /* Initialise FBSR on RM. */ - ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); + ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size, runtime); if (ret) { nvkm_gsp_sg_free(device, &gsp->sr.fbsr); return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c index 9d2fa4e66d5982..996941c668ba9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -176,7 +176,7 @@ r570_gsp_set_system_info(struct nvkm_gsp *gsp) info->bIsPrimary = video_is_primary_device(device->dev); info->bPreserveVideoMemoryAllocations = false; - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 393ea775941fae..4f0ae6cc085c3d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -78,7 +78,7 @@ struct nvkm_rm_api { } *device; const struct nvkm_rm_api_fbsr { - int (*suspend)(struct nvkm_gsp *); + int (*suspend)(struct nvkm_gsp *, bool runtime); void (*resume)(struct nvkm_gsp *); } *fbsr; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 04b642a1f73051..19cb269e7a2659 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -161,7 +161,7 @@ tu102_gsp_reset(struct nvkm_gsp *gsp) } int -tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +tu102_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) { u32 mbox0 = 0xff, mbox1 = 0xff; int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 7ec17e8435a136..454bb21815a272 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -135,7 +135,7 @@ nvkm_i2c_intr(struct nvkm_subdev *subdev) } static int -nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_i2c_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_i2c *i2c = nvkm_i2c(subdev); struct nvkm_i2c_pad *pad; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 2f55bab8e132ff..6b9ed61684a027 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -176,7 +176,7 @@ nvkm_instmem_boot(struct nvkm_instmem *imem) } static int -nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_instmem_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_instmem *imem = nvkm_instmem(subdev); int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index 6867934256a768..0f3e0d324a529c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -74,7 +74,7 @@ nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow) } static int -nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_pci_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_pci *pci = nvkm_pci(subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index 8f2f50ad4ded1f..9e9004ec4588b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -77,7 +77,7 @@ nvkm_pmu_intr(struct nvkm_subdev *subdev) } static int -nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_pmu_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index fc5ee118e91067..1510aba339560b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -341,15 +341,15 @@ nvkm_therm_intr(struct nvkm_subdev *subdev) } static int -nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_therm_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_therm *therm = nvkm_therm(subdev); if (therm->func->fini) therm->func->fini(therm); - nvkm_therm_fan_fini(therm, suspend); - nvkm_therm_sensor_fini(therm, suspend); + nvkm_therm_fan_fini(therm, suspend != NVKM_POWEROFF); + nvkm_therm_sensor_fini(therm, suspend != NVKM_POWEROFF); if (suspend) { therm->suspend = therm->mode; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c index 8b0da0c062685d..a5c3c282b5d077 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c @@ -149,7 +149,7 @@ nvkm_timer_intr(struct nvkm_subdev *subdev) } static int -nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend) +nvkm_timer_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) { struct nvkm_timer *tmr = nvkm_timer(subdev); tmr->func->alarm_fini(tmr); diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig index 4b55308fd2eb4a..e933e647802722 100644 --- a/drivers/gpu/drm/tyr/Kconfig +++ b/drivers/gpu/drm/tyr/Kconfig @@ -6,6 +6,7 @@ config DRM_TYR depends on RUST depends on ARM || ARM64 || COMPILE_TEST depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE + depends on COMMON_CLK default n help Rust DRM driver for ARM Mali CSF-based GPUs. diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 9f6251b1008bac..82edd046600557 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -347,11 +347,10 @@ static bool is_bound(struct xe_config_group_device *dev) return false; ret = pci_get_drvdata(pdev); - pci_dev_put(pdev); - if (ret) pci_dbg(pdev, "Already bound to driver\n"); + pci_dev_put(pdev); return ret; } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index cf29e259861f95..9a6d49fcd8e49b 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -984,8 +984,6 @@ void xe_device_remove(struct xe_device *xe) { xe_display_unregister(xe); - xe_nvm_fini(xe); - drm_dev_unplug(&xe->drm); xe_bo_pci_dev_remove_all(xe); diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index fd948003175069..8e3614b240107a 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -190,9 +190,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto err_syncs; } - if (xe_exec_queue_is_parallel(q)) { - err = copy_from_user(addresses, addresses_user, sizeof(u64) * - q->width); + if (args->num_batch_buffer && xe_exec_queue_is_parallel(q)) { + err = copy_from_user(addresses, addresses_user, + sizeof(u64) * q->width); if (err) { err = -EFAULT; goto err_syncs; diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index a686b04879d648..edb939f2626851 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -1618,7 +1618,7 @@ int xe_guc_start(struct xe_guc *guc) return xe_guc_submit_start(guc); } -void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) +int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) { struct xe_gt *gt = guc_to_gt(guc); unsigned int fw_ref; @@ -1630,7 +1630,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) if (!IS_SRIOV_VF(gt_to_xe(gt))) { fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (!fw_ref) - return; + return -EIO; status = xe_mmio_read32(>->mmio, GUC_STATUS); @@ -1658,6 +1658,8 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) drm_puts(p, "\n"); xe_guc_submit_print(guc, p); + + return 0; } /** diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index e2d4c5f44ae34d..61226ab8166976 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -45,7 +45,7 @@ int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val); int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val); void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir); void xe_guc_sanitize(struct xe_guc *guc); -void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); +int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); int xe_guc_reset_prepare(struct xe_guc *guc); void xe_guc_reset_wait(struct xe_guc *guc); void xe_guc_stop_prepare(struct xe_guc *guc); diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 281286f2b5f989..b8c1dd953665ba 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1185,7 +1185,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc, return -ENOSPC; *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1); - *cmd++ = CS_DEBUG_MODE1(0).addr; + *cmd++ = CS_DEBUG_MODE2(0).addr; *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE); return cmd - batch; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index d8ee76aab4e41f..9d7329cef910af 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1201,7 +1201,7 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, } /** - * xe_get_migrate_exec_queue() - Get the execution queue from migrate context. + * xe_migrate_exec_queue() - Get the execution queue from migrate context. * @migrate: Migrate context. * * Return: Pointer to execution queue on success, error on failure diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c index 33f4ac82fc80ab..6da42b2b5e4673 100644 --- a/drivers/gpu/drm/xe/xe_nvm.c +++ b/drivers/gpu/drm/xe/xe_nvm.c @@ -83,6 +83,27 @@ static bool xe_nvm_writable_override(struct xe_device *xe) return writable_override; } +static void xe_nvm_fini(void *arg) +{ + struct xe_device *xe = arg; + struct intel_dg_nvm_dev *nvm = xe->nvm; + + if (!xe->info.has_gsc_nvm) + return; + + /* No access to internal NVM from VFs */ + if (IS_SRIOV_VF(xe)) + return; + + /* Nvm pointer should not be NULL here */ + if (WARN_ON(!nvm)) + return; + + auxiliary_device_delete(&nvm->aux_dev); + auxiliary_device_uninit(&nvm->aux_dev); + xe->nvm = NULL; +} + int xe_nvm_init(struct xe_device *xe) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -132,39 +153,17 @@ int xe_nvm_init(struct xe_device *xe) ret = auxiliary_device_init(aux_dev); if (ret) { drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret); - goto err; + kfree(nvm); + xe->nvm = NULL; + return ret; } ret = auxiliary_device_add(aux_dev); if (ret) { drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret); auxiliary_device_uninit(aux_dev); - goto err; + xe->nvm = NULL; + return ret; } - return 0; - -err: - kfree(nvm); - xe->nvm = NULL; - return ret; -} - -void xe_nvm_fini(struct xe_device *xe) -{ - struct intel_dg_nvm_dev *nvm = xe->nvm; - - if (!xe->info.has_gsc_nvm) - return; - - /* No access to internal NVM from VFs */ - if (IS_SRIOV_VF(xe)) - return; - - /* Nvm pointer should not be NULL here */ - if (WARN_ON(!nvm)) - return; - - auxiliary_device_delete(&nvm->aux_dev); - auxiliary_device_uninit(&nvm->aux_dev); - xe->nvm = NULL; + return devm_add_action_or_reset(xe->drm.dev, xe_nvm_fini, xe); } diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h index 7f3d5f57bed088..fd3467ad35a4d9 100644 --- a/drivers/gpu/drm/xe/xe_nvm.h +++ b/drivers/gpu/drm/xe/xe_nvm.h @@ -10,6 +10,4 @@ struct xe_device; int xe_nvm_init(struct xe_device *xe); -void xe_nvm_fini(struct xe_device *xe); - #endif diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 9c9ea10d994c4b..2aa883f5ef797e 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -342,7 +342,6 @@ static const struct xe_device_desc lnl_desc = { .has_display = true, .has_flat_ccs = 1, .has_pxp = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .va_bits = 48, @@ -363,7 +362,6 @@ static const struct xe_device_desc bmg_desc = { .has_heci_cscfi = 1, .has_late_bind = true, .has_sriov = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .subplatforms = (const struct xe_subplatform_desc[]) { @@ -380,7 +378,6 @@ static const struct xe_device_desc ptl_desc = { .has_display = true, .has_flat_ccs = 1, .has_sriov = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .needs_shared_vf_gt_wq = true, @@ -393,7 +390,6 @@ static const struct xe_device_desc nvls_desc = { .dma_mask_size = 46, .has_display = true, .has_flat_ccs = 1, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .require_force_probe = true, .va_bits = 48, @@ -675,7 +671,6 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.has_pxp = desc->has_pxp; xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) && desc->has_sriov; - xe->info.has_mem_copy_instr = desc->has_mem_copy_instr; xe->info.skip_guc_pc = desc->skip_guc_pc; xe->info.skip_mtcfg = desc->skip_mtcfg; xe->info.skip_pcode = desc->skip_pcode; @@ -864,6 +859,7 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval; xe->info.has_usm = graphics_desc->has_usm; xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; + xe->info.has_mem_copy_instr = GRAPHICS_VER(xe) >= 20; xe_info_probe_tile_count(xe); diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index 9892c063a9c543..a4451bdc79fb35 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -46,7 +46,6 @@ struct xe_device_desc { u8 has_late_bind:1; u8 has_llc:1; u8 has_mbx_power_limits:1; - u8 has_mem_copy_instr:1; u8 has_pxp:1; u8 has_sriov:1; u8 needs_scratch:1; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7669225302656e..51eb6d005331a0 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -357,9 +358,15 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ static u32 vram_threshold_value(struct xe_device *xe) { - /* FIXME: D3Cold temporarily disabled by default on BMG */ - if (xe->info.platform == XE_BATTLEMAGE) - return 0; + if (xe->info.platform == XE_BATTLEMAGE) { + const char *product_name; + + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + if (product_name && strstr(product_name, "NUC13RNG")) { + drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n"); + return 0; + } + } return DEFAULT_VRAM_THRESHOLD; } diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 1c0915e2cc16ed..fde36280e14d3e 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -491,7 +491,7 @@ static int copy_mask(void __user **ptr, if (copy_to_user(*ptr, topo, sizeof(*topo))) return -EFAULT; - *ptr += sizeof(topo); + *ptr += sizeof(*topo); if (copy_to_user(*ptr, mask, mask_size)) return -EFAULT; diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c index 918a59e686ea72..1b7ac31a37ca25 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval.c @@ -115,7 +115,7 @@ static void tlb_inval_fini(struct drm_device *drm, void *arg) } /** - * xe_gt_tlb_inval_init - Initialize TLB invalidation state + * xe_gt_tlb_inval_init_early() - Initialize TLB invalidation state * @gt: GT structure * * Initialize TLB invalidation state, purely software initialization, should diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c index 1ae0dec2cf3161..1ec9c0f5f0b6eb 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_job.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c @@ -164,7 +164,7 @@ static void xe_tlb_inval_job_destroy(struct kref *ref) } /** - * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency + * xe_tlb_inval_job_alloc_dep() - TLB invalidation job alloc dependency * @job: TLB invalidation job to alloc dependency for * * Allocate storage for a dependency in the TLB invalidation fence. This diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 29ccdc2fb7ff85..de408df0c4d786 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -47,6 +47,8 @@ static int cap_in_hardware; static bool force_cap_on; +static DEFINE_MUTEX(acpi_notify_lock); + static int can_cap_in_hardware(void) { return force_cap_on || cap_in_hardware; @@ -823,18 +825,26 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) resource = acpi_driver_data(device); + guard(mutex)(&acpi_notify_lock); + switch (event) { case METER_NOTIFY_CONFIG: + if (!IS_ERR(resource->hwmon_dev)) + hwmon_device_unregister(resource->hwmon_dev); + mutex_lock(&resource->lock); + free_capabilities(resource); remove_domain_devices(resource); - hwmon_device_unregister(resource->hwmon_dev); res = read_capabilities(resource); if (res) dev_err_once(&device->dev, "read capabilities failed.\n"); res = read_domain_devices(resource); if (res && res != -ENODEV) dev_err_once(&device->dev, "read domain devices failed.\n"); + + mutex_unlock(&resource->lock); + resource->hwmon_dev = hwmon_device_register_with_info(&device->dev, ACPI_POWER_METER_NAME, @@ -843,7 +853,7 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) power_extra_groups); if (IS_ERR(resource->hwmon_dev)) dev_err_once(&device->dev, "register hwmon device failed.\n"); - mutex_unlock(&resource->lock); + break; case METER_NOTIFY_TRIP: sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME); @@ -953,7 +963,8 @@ static void acpi_power_meter_remove(struct acpi_device *device) return; resource = acpi_driver_data(device); - hwmon_device_unregister(resource->hwmon_dev); + if (!IS_ERR(resource->hwmon_dev)) + hwmon_device_unregister(resource->hwmon_dev); remove_domain_devices(resource); free_capabilities(resource); diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 6040a894067438..93143cfc157cf7 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -1639,6 +1639,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = { }, .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3], }, + { + .ident = "Dell G15 5510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"), + }, + .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3], + }, { .ident = "Dell G15 5511", .matches = { diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 516c34bb61c9cf..a8892ced1e549f 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -148,7 +148,7 @@ static int set_fan_speed(struct gpio_fan_data *fan_data, int speed_index) int ret; ret = pm_runtime_put_sync(fan_data->dev); - if (ret < 0) + if (ret < 0 && ret != -ENOSYS) return ret; } @@ -291,7 +291,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr, { struct gpio_fan_data *fan_data = dev_get_drvdata(dev); unsigned long rpm; - int ret = count; + int ret; if (kstrtoul(buf, 10, &rpm)) return -EINVAL; @@ -308,7 +308,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr, exit_unlock: mutex_unlock(&fan_data->lock); - return ret; + return ret ? ret : count; } static DEVICE_ATTR_RW(pwm1); diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index b3694a4209b975..89928d38831b61 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -749,6 +749,7 @@ static ssize_t occ_show_extended(struct device *dev, * are dynamically allocated, we cannot use the existing kernel macros which * stringify the name argument. */ +__printf(7, 8) static void occ_init_attribute(struct occ_attribute *attr, int mode, ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf), ssize_t (*store)(struct device *dev, struct device_attribute *attr, diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index dcce882f3ebaaf..85f554044cf1ee 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1103,7 +1103,8 @@ static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned i case IMX_I2C_STATE_READ_BLOCK_DATA_LEN: i2c_imx_isr_read_block_data_len(i2c_imx); - i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; + if (i2c_imx->state == IMX_I2C_STATE_READ_BLOCK_DATA_LEN) + i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; break; case IMX_I2C_STATE_WRITE: diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index 378104cd395e59..04cc7a9036e431 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -1078,6 +1078,9 @@ static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq) { char header[64]; + /* Reset VCMDQ */ + tegra241_vcmdq_hw_deinit(vcmdq); + /* Configure the vcmdq only; User space does the enabling */ writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE)); diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h index 52ef028ed2db97..d575f3ba9d3416 100644 --- a/drivers/iommu/generic_pt/iommu_pt.h +++ b/drivers/iommu/generic_pt/iommu_pt.h @@ -931,6 +931,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, struct pt_table_p *table) { struct pt_state pts = pt_init(range, level, table); + unsigned int flush_start_index = UINT_MAX; + unsigned int flush_end_index = UINT_MAX; struct pt_unmap_args *unmap = arg; unsigned int num_oas = 0; unsigned int start_index; @@ -986,6 +988,9 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, iommu_pages_list_add(&unmap->free_list, pts.table_lower); pt_clear_entries(&pts, ilog2(1)); + if (pts.index < flush_start_index) + flush_start_index = pts.index; + flush_end_index = pts.index + 1; } pts.index++; } else { @@ -999,7 +1004,10 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, num_contig_lg2 = pt_entry_num_contig_lg2(&pts); pt_clear_entries(&pts, num_contig_lg2); num_oas += log2_to_int(num_contig_lg2); + if (pts.index < flush_start_index) + flush_start_index = pts.index; pts.index += log2_to_int(num_contig_lg2); + flush_end_index = pts.index; } if (pts.index >= pts.end_index) break; @@ -1007,7 +1015,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, } while (true); unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts)); - flush_writes_range(&pts, start_index, pts.index); + if (flush_start_index != flush_end_index) + flush_writes_range(&pts, flush_start_index, flush_end_index); return ret; } diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index b4c85242dc7962..3809793e0259f1 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -24,7 +24,7 @@ #define PASID_FLAG_NESTED BIT(1) #define PASID_FLAG_PAGE_SNOOP BIT(2) -#define PASID_FLAG_PWSNP BIT(2) +#define PASID_FLAG_PWSNP BIT(3) /* * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first- diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index dbe51ecb9a20f8..f606148920fa72 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -289,6 +289,7 @@ static void batch_clear(struct pfn_batch *batch) batch->end = 0; batch->pfns[0] = 0; batch->npfns[0] = 0; + batch->kind = 0; } /* diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c index 50a7b38381b986..96f9c20621cf54 100644 --- a/drivers/irqchip/irq-ls-extirq.c +++ b/drivers/irqchip/irq-ls-extirq.c @@ -168,40 +168,34 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) return 0; } -static int __init -ls_extirq_of_init(struct device_node *node, struct device_node *parent) +static int ls_extirq_probe(struct platform_device *pdev) { struct irq_domain *domain, *parent_domain; + struct device_node *node, *parent; + struct device *dev = &pdev->dev; struct ls_extirq_data *priv; int ret; + node = dev->of_node; + parent = of_irq_find_parent(node); + if (!parent) + return dev_err_probe(dev, -ENODEV, "Failed to get IRQ parent node\n"); + parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Cannot find parent domain\n"); - ret = -ENODEV; - goto err_irq_find_host; - } + if (!parent_domain) + return dev_err_probe(dev, -EPROBE_DEFER, "Cannot find parent domain\n"); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto err_alloc_priv; - } + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); - /* - * All extirq OF nodes are under a scfg/syscon node with - * the 'ranges' property - */ - priv->intpcr = of_iomap(node, 0); - if (!priv->intpcr) { - pr_err("Cannot ioremap OF node %pOF\n", node); - ret = -ENOMEM; - goto err_iomap; - } + priv->intpcr = devm_of_iomap(dev, node, 0, NULL); + if (!priv->intpcr) + return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node); ret = ls_extirq_parse_map(priv, node); if (ret) - goto err_parse_map; + return dev_err_probe(dev, ret, "Failed to parse IRQ map\n"); priv->big_endian = of_device_is_big_endian(node->parent); priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") || @@ -210,23 +204,26 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent) domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node), &extirq_domain_ops, priv); - if (!domain) { - ret = -ENOMEM; - goto err_add_hierarchy; - } + if (!domain) + return dev_err_probe(dev, -ENOMEM, "Failed to add IRQ domain\n"); return 0; - -err_add_hierarchy: -err_parse_map: - iounmap(priv->intpcr); -err_iomap: - kfree(priv); -err_alloc_priv: -err_irq_find_host: - return ret; } -IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init); -IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init); -IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init); +static const struct of_device_id ls_extirq_dt_ids[] = { + { .compatible = "fsl,ls1021a-extirq" }, + { .compatible = "fsl,ls1043a-extirq" }, + { .compatible = "fsl,ls1088a-extirq" }, + {} +}; +MODULE_DEVICE_TABLE(of, ls_extirq_dt_ids); + +static struct platform_driver ls_extirq_driver = { + .probe = ls_extirq_probe, + .driver = { + .name = "ls-extirq", + .of_match_table = ls_extirq_dt_ids, + } +}; + +builtin_platform_driver(ls_extirq_driver); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a02aecac05cdf9..3fa3b13a410f42 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1107,17 +1107,13 @@ static void detached_dev_do_request(struct bcache_device *d, if (bio_op(orig_bio) == REQ_OP_DISCARD && !bdev_max_discard_sectors(dc->bdev)) { + bio_end_io_acct(orig_bio, start_time); bio_endio(orig_bio); return; } clone_bio = bio_alloc_clone(dc->bdev, orig_bio, GFP_NOIO, &d->bio_detached); - if (!clone_bio) { - orig_bio->bi_status = BLK_STS_RESOURCE; - bio_endio(orig_bio); - return; - } ddip = container_of(clone_bio, struct detached_dev_io_private, bio); /* Count on the bcache device */ diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index e60e4ac1fd6fb1..3e86f346f751a3 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -215,7 +215,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = { SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), SPINAND_INFO("F50D1G41LB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f, - 0x7f), + 0x7f, 0x7f), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e7caf400a59cbd..45bd2bb102ffdf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2202,11 +2202,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, unblock_netpoll_tx(); } - /* broadcast mode uses the all_slaves to loop through slaves. */ - if (bond_mode_can_use_xmit_hash(bond) || - BOND_MODE(bond) == BOND_MODE_BROADCAST) - bond_update_slave_arr(bond, NULL); - if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { if (bond->xdp_prog) { @@ -2240,6 +2235,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bpf_prog_inc(bond->xdp_prog); } + /* broadcast mode uses the all_slaves to loop through slaves. */ + if (bond_mode_can_use_xmit_hash(bond) || + BOND_MODE(bond) == BOND_MODE_BROADCAST) + bond_update_slave_arr(bond, NULL); + bond_xdp_set_features(bond_dev); slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", @@ -3047,8 +3047,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 __func__, &sip); return; } - slave->last_rx = jiffies; - slave->target_last_arp_rx[i] = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); } static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, @@ -3267,8 +3267,8 @@ static void bond_validate_na(struct bonding *bond, struct slave *slave, __func__, saddr); return; } - slave->last_rx = jiffies; - slave->target_last_arp_rx[i] = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); } static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, @@ -3338,7 +3338,7 @@ int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, (slave_do_arp_validate_only(bond) && is_ipv6) || #endif !slave_do_arp_validate_only(bond)) - slave->last_rx = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); return RX_HANDLER_ANOTHER; } else if (is_arp) { return bond_arp_rcv(skb, bond, slave); @@ -3406,7 +3406,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, last_tx, 1) && - bond_time_in_interval(bond, slave->last_rx, 1)) { + bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 1)) { bond_propose_link_state(slave, BOND_LINK_UP); slave_state_changed = 1; @@ -3430,8 +3430,10 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || - !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { + if (!bond_time_in_interval(bond, last_tx, + bond->params.missed_max) || + !bond_time_in_interval(bond, READ_ONCE(slave->last_rx), + bond->params.missed_max)) { bond_propose_link_state(slave, BOND_LINK_DOWN); slave_state_changed = 1; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 384499c869b8da..f1c6e9d8f61671 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1152,7 +1152,7 @@ static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot, if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) { bond_for_each_slave(bond, slave, iter) - slave->target_last_arp_rx[slot] = last_rx; + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); targets[slot] = target; } } @@ -1221,8 +1221,8 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) bond_for_each_slave(bond, slave, iter) { targets_rx = slave->target_last_arp_rx; for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) - targets_rx[i] = targets_rx[i+1]; - targets_rx[i] = 0; + WRITE_ONCE(targets_rx[i], READ_ONCE(targets_rx[i+1])); + WRITE_ONCE(targets_rx[i], 0); } for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) targets[i] = targets[i+1]; @@ -1377,7 +1377,7 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { bond_for_each_slave(bond, slave, iter) { - slave->target_last_arp_rx[slot] = last_rx; + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); slave_set_ns_maddr(bond, slave, target, &targets[slot]); } targets[slot] = *target; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index c2a3a4eef5b281..58da323f14d7c1 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1099,7 +1099,7 @@ static int at91_can_probe(struct platform_device *pdev) if (IS_ERR(transceiver)) { err = PTR_ERR(transceiver); dev_err_probe(&pdev->dev, err, "failed to get phy\n"); - goto exit_iounmap; + goto exit_free; } dev->netdev_ops = &at91_netdev_ops; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 192338b481f21f..d8b2dd74b3a1a4 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -610,7 +610,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *parent = urb->context; struct gs_can *dev; - struct net_device *netdev; + struct net_device *netdev = NULL; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; @@ -768,7 +768,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } } else if (rc != -ESHUTDOWN && net_ratelimit()) { netdev_info(netdev, "failed to re-submit IN URB: %pe\n", - ERR_PTR(urb->status)); + ERR_PTR(rc)); } } diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c index 1c511f5dc6ab23..7b8c1549a0fb57 100644 --- a/drivers/net/dsa/yt921x.c +++ b/drivers/net/dsa/yt921x.c @@ -682,21 +682,22 @@ static int yt921x_read_mib(struct yt921x_priv *priv, int port) const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; u64 *valp = &((u64 *)mib)[i]; - u64 val = *valp; u32 val0; - u32 val1; + u64 val; res = yt921x_reg_read(priv, reg, &val0); if (res) break; if (desc->size <= 1) { - if (val < (u32)val) - /* overflow */ - val += (u64)U32_MAX + 1; - val &= ~U32_MAX; - val |= val0; + u64 old_val = *valp; + + val = (old_val & ~(u64)U32_MAX) | val0; + if (val < old_val) + val += 1ull << 32; } else { + u32 val1; + res = yt921x_reg_read(priv, reg + 4, &val1); if (res) break; diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 30f9d271e59531..71a2397edf2bbf 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1089,6 +1089,9 @@ static int adin1110_check_spi(struct adin1110_priv *priv) reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return dev_err_probe(&priv->spidev->dev, PTR_ERR(reset_gpio), + "failed to get reset gpio\n"); if (reset_gpio) { /* MISO pin is used for internal configuration, can't have * anyone else disturbing the SDO line. diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index b9973956c48095..ceb6c11431dd9c 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -1261,7 +1261,7 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", phy_modes(intf->phy_interface), intf->port); ret = -EINVAL; - goto err_free_netdev; + goto err_deregister_fixed_link; } ret = of_get_ethdev_address(ndev_dn, ndev); @@ -1286,6 +1286,9 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, return intf; +err_deregister_fixed_link: + if (of_phy_is_fixed_link(ndev_dn)) + of_phy_deregister_fixed_link(ndev_dn); err_free_netdev: free_netdev(ndev); err: diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 0732440eeacd62..c1a3df22525492 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3505,6 +3505,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) */ netdev->netdev_ops = &lionetdevops; + lio = GET_LIO(netdev); + + memset(lio, 0, sizeof(struct lio)); + + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; + + /* Point to the properties for octeon device to which this + * interface belongs. + */ + lio->oct_dev = octeon_dev; + lio->octprops = props; + lio->netdev = netdev; + retval = netif_set_real_num_rx_queues(netdev, num_oqueues); if (retval) { dev_err(&octeon_dev->pci_dev->dev, @@ -3521,16 +3538,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) goto setup_nic_dev_free; } - lio = GET_LIO(netdev); - - memset(lio, 0, sizeof(struct lio)); - - lio->ifidx = ifidx_or_pfnum; - - props = &octeon_dev->props[i]; - props->gmxport = resp->cfg_info.linfo.gmxport; - props->netdev = netdev; - lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; for (j = 0; j < num_oqueues; j++) { @@ -3596,13 +3603,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->min_mtu = LIO_MIN_MTU_SIZE; netdev->max_mtu = LIO_MAX_MTU_SIZE; - /* Point to the properties for octeon device to which this - * interface belongs. - */ - lio->oct_dev = octeon_dev; - lio->octprops = props; - lio->netdev = netdev; - dev_dbg(&octeon_dev->pci_dev->dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); @@ -3750,6 +3750,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!devlink) { device_unlock(&octeon_dev->pci_dev->dev); dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); + i--; goto setup_nic_dev_free; } @@ -3765,11 +3766,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) setup_nic_dev_free: - while (i--) { + do { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); - } + } while (i--); setup_nic_dev_done: diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index e02942dbbcce53..43c595f3b84e45 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -2212,11 +2212,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) setup_nic_dev_free: - while (i--) { + do { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); - } + } while (i--); setup_nic_dev_done: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index b1e1ad9e4b48e6..66240c340492ce 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1531,6 +1531,10 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) } if_id = (status & 0xFFFF0000) >> 16; + if (if_id >= ethsw->sw_attr.num_ifs) { + dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id); + goto out; + } port_priv = ethsw->ports[if_id]; if (status & DPSW_IRQ_EVENT_LINK_CHANGED) @@ -3024,6 +3028,12 @@ static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) goto err_close; } + if (!ethsw->sw_attr.num_ifs) { + dev_err(dev, "DPSW device has no interfaces\n"); + err = -ENODEV; + goto err_close; + } + err = dpsw_get_api_version(ethsw->mc_io, 0, ðsw->major, ðsw->minor); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 53b26cece16a80..e380a4f3985560 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2512,10 +2512,13 @@ int enetc_configure_si(struct enetc_ndev_priv *priv) struct enetc_hw *hw = &si->hw; int err; - /* set SI cache attributes */ - enetc_wr(hw, ENETC_SICAR0, - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); - enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + if (is_enetc_rev1(si)) { + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + } + /* enable SI */ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index 498346dd996aa9..5850540634b0cd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -59,10 +59,10 @@ static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si, if (si != 0) { __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si)); - __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si)); + __raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si)); } else { __raw_writel(upper, hw->port + ENETC4_PMAR0); - __raw_writew(lower, hw->port + ENETC4_PMAR1); + __raw_writel(lower, hw->port + ENETC4_PMAR1); } } @@ -73,7 +73,7 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si, u16 lower; upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si)); - lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si)); + lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si)); put_unaligned_le32(upper, addr); put_unaligned_le16(lower, addr + 4); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index 3d5f31879d5c64..a635bfdc30afc2 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -74,10 +74,6 @@ int enetc4_setup_cbdr(struct enetc_si *si) if (!user->ring) return -ENOMEM; - /* set CBDR cache attributes */ - enetc_wr(hw, ENETC_SICAR2, - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); - regs.pir = hw->reg + ENETC_SICBDRPIR; regs.cir = hw->reg + ENETC_SICBDRCIR; regs.mr = hw->reg + ENETC_SICBDRMR; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 7b882b8921fe94..662e4fbafb74c7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -708,13 +708,24 @@ struct enetc_cmd_rfse { #define ENETC_RFSE_EN BIT(15) #define ENETC_RFSE_MODE_BD 2 +static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) +{ + u32 upper; + u16 lower; + + upper = __raw_readl(hw->reg + ENETC_SIPMAR0); + lower = __raw_readl(hw->reg + ENETC_SIPMAR1); + + put_unaligned_le32(upper, addr); + put_unaligned_le16(lower, addr + 4); +} + static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, struct net_device *ndev) { - u8 addr[ETH_ALEN] __aligned(4); + u8 addr[ETH_ALEN]; - *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); - *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); + enetc_get_primary_mac_addr(hw, addr); eth_hw_addr_set(ndev, addr); } diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 970d5ca8cddeec..cbdf3a842cfe00 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1206,6 +1206,11 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) } } +static inline bool gve_is_clock_enabled(struct gve_priv *priv) +{ + return priv->nic_ts_report; +} + /* gqi napi handler defined in gve_main.c */ int gve_napi_poll(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 52500ae8348e62..66ddc4413f8dff 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -152,11 +152,13 @@ gve_get_ethtool_stats(struct net_device *netdev, u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, - tmp_tx_pkts, tmp_tx_bytes; + tmp_tx_pkts, tmp_tx_bytes, + tmp_xdp_tx_errors, tmp_xdp_redirect_errors; u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, - tx_dropped; - int stats_idx, base_stats_idx, max_stats_idx; + tx_dropped, xdp_tx_errors, xdp_redirect_errors; + int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx; + int stats_idx, stats_region_len, nic_stats_len; struct stats *report_stats; int *rx_qid_to_stats_idx; int *tx_qid_to_stats_idx; @@ -198,6 +200,7 @@ gve_get_ethtool_stats(struct net_device *netdev, for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, + xdp_tx_errors = 0, xdp_redirect_errors = 0, ring = 0; ring < priv->rx_cfg.num_queues; ring++) { if (priv->rx) { @@ -215,6 +218,9 @@ gve_get_ethtool_stats(struct net_device *netdev, rx->rx_desc_err_dropped_pkt; tmp_rx_hsplit_unsplit_pkt = rx->rx_hsplit_unsplit_pkt; + tmp_xdp_tx_errors = rx->xdp_tx_errors; + tmp_xdp_redirect_errors = + rx->xdp_redirect_errors; } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); rx_pkts += tmp_rx_pkts; @@ -224,6 +230,8 @@ gve_get_ethtool_stats(struct net_device *netdev, rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; + xdp_tx_errors += tmp_xdp_tx_errors; + xdp_redirect_errors += tmp_xdp_redirect_errors; } } for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; @@ -249,8 +257,8 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx_bytes; data[i++] = tx_bytes; /* total rx dropped packets */ - data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + - rx_desc_err_dropped_pkt; + data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt + + xdp_tx_errors + xdp_redirect_errors; data[i++] = tx_dropped; data[i++] = priv->tx_timeo_cnt; data[i++] = rx_skb_alloc_fail; @@ -265,20 +273,38 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->stats_report_trigger_cnt; i = GVE_MAIN_STATS_LEN; - /* For rx cross-reporting stats, start from nic rx stats in report */ - base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + - GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; - /* The boundary between driver stats and NIC stats shifts if there are - * stopped queues. - */ - base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + - NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; - max_stats_idx = NIC_RX_STATS_REPORT_NUM * - (priv->rx_cfg.num_queues - num_stopped_rxqs) + - base_stats_idx; + rx_base_stats_idx = 0; + max_rx_stats_idx = 0; + max_tx_stats_idx = 0; + stats_region_len = priv->stats_report_len - + sizeof(struct gve_stats_report); + nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + + NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats); + if (unlikely((stats_region_len - + nic_stats_len) % sizeof(struct stats))) { + net_err_ratelimited("Starting index of NIC stats should be multiple of stats size"); + } else { + /* For rx cross-reporting stats, + * start from nic rx stats in report + */ + rx_base_stats_idx = (stats_region_len - nic_stats_len) / + sizeof(struct stats); + /* The boundary between driver stats and NIC stats + * shifts if there are stopped queues + */ + rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM * + num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM * + num_stopped_txqs; + max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM * + (priv->rx_cfg.num_queues - num_stopped_rxqs) + + rx_base_stats_idx; + max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM * + (num_tx_queues - num_stopped_txqs) + + max_rx_stats_idx; + } /* Preprocess the stats report for rx, map queue id to start index */ skip_nic_stats = false; - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; + for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx; stats_idx += NIC_RX_STATS_REPORT_NUM) { u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); @@ -311,6 +337,9 @@ gve_get_ethtool_stats(struct net_device *netdev, tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; + tmp_xdp_tx_errors = rx->xdp_tx_errors; + tmp_xdp_redirect_errors = + rx->xdp_redirect_errors; } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); data[i++] = tmp_rx_bytes; @@ -321,8 +350,9 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx->rx_frag_alloc_cnt; /* rx dropped packets */ data[i++] = tmp_rx_skb_alloc_fail + - tmp_rx_buf_alloc_fail + - tmp_rx_desc_err_dropped_pkt; + tmp_rx_desc_err_dropped_pkt + + tmp_xdp_tx_errors + + tmp_xdp_redirect_errors; data[i++] = rx->rx_copybreak_pkt; data[i++] = rx->rx_copied_pkt; /* stats from NIC */ @@ -354,14 +384,9 @@ gve_get_ethtool_stats(struct net_device *netdev, i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; } - /* For tx cross-reporting stats, start from nic tx stats in report */ - base_stats_idx = max_stats_idx; - max_stats_idx = NIC_TX_STATS_REPORT_NUM * - (num_tx_queues - num_stopped_txqs) + - max_stats_idx; - /* Preprocess the stats report for tx, map queue id to start index */ skip_nic_stats = false; - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; + /* NIC TX stats start right after NIC RX stats */ + for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx; stats_idx += NIC_TX_STATS_REPORT_NUM) { u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); @@ -938,7 +963,7 @@ static int gve_get_ts_info(struct net_device *netdev, ethtool_op_get_ts_info(netdev, info); - if (priv->nic_timestamp_supported) { + if (gve_is_clock_enabled(priv)) { info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 7eb64e1e4d858b..dbc84de39b7078 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -283,9 +283,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv) int tx_stats_num, rx_stats_num; tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * - gve_num_tx_queues(priv); + priv->tx_cfg.max_queues; rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * - priv->rx_cfg.num_queues; + priv->rx_cfg.max_queues; priv->stats_report_len = struct_size(priv->stats_report, stats, size_add(tx_stats_num, rx_stats_num)); priv->stats_report = @@ -680,10 +680,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) } } - err = gve_init_clock(priv); - if (err) { - dev_err(&priv->pdev->dev, "Failed to init clock"); - goto abort_with_ptype_lut; + if (priv->nic_timestamp_supported) { + err = gve_init_clock(priv); + if (err) { + dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support"); + err = 0; + } } err = gve_init_rss_config(priv, priv->rx_cfg.num_queues); @@ -2183,7 +2185,7 @@ static int gve_set_ts_config(struct net_device *dev, } if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) { - if (!priv->nic_ts_report) { + if (!gve_is_clock_enabled(priv)) { NL_SET_ERR_MSG_MOD(extack, "RX timestamping is not supported"); kernel_config->rx_filter = HWTSTAMP_FILTER_NONE; diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c index 073677d82ee8e5..de42fc2c19a1a6 100644 --- a/drivers/net/ethernet/google/gve/gve_ptp.c +++ b/drivers/net/ethernet/google/gve/gve_ptp.c @@ -70,11 +70,6 @@ static int gve_ptp_init(struct gve_priv *priv) struct gve_ptp *ptp; int err; - if (!priv->nic_timestamp_supported) { - dev_dbg(&priv->pdev->dev, "Device does not support PTP\n"); - return -EOPNOTSUPP; - } - priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL); if (!priv->ptp) return -ENOMEM; @@ -116,9 +111,6 @@ int gve_init_clock(struct gve_priv *priv) { int err; - if (!priv->nic_timestamp_supported) - return 0; - err = gve_ptp_init(priv); if (err) return err; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index f1bd8f5d573239..63a96106a69371 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -484,7 +484,7 @@ int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct gve_xdp_buff *ctx = (void *)_ctx; - if (!ctx->gve->nic_ts_report) + if (!gve_is_clock_enabled(ctx->gve)) return -ENODATA; if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0b1cc0481027ab..d3bc3207054f90 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9030,7 +9030,6 @@ int i40e_open(struct net_device *netdev) TCP_FLAG_FIN | TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); - udp_tunnel_get_rx_info(netdev); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 98010354db155e..d47af94f31a991 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2783,12 +2783,14 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) ASSERT_RTNL(); ice_for_each_rxq(vsi, q_idx) - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, - &vsi->rx_rings[q_idx]->q_vector->napi); + if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, + &vsi->rx_rings[q_idx]->q_vector->napi); ice_for_each_txq(vsi, q_idx) - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, - &vsi->tx_rings[q_idx]->q_vector->napi); + if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, + &vsi->tx_rings[q_idx]->q_vector->napi); /* Also set the interrupt number for the NAPI */ ice_for_each_q_vector(vsi, v_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index de488185cd4a87..d04605d3e61af1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3314,18 +3314,20 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) if (ice_is_reset_in_progress(pf->state)) goto skip_irq; - if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { - /* Process outstanding Tx timestamps. If there is more work, - * re-arm the interrupt to trigger again. - */ - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { - wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); - ice_flush(hw); - } - } + if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) + ice_ptp_process_ts(pf); skip_irq: ice_irq_dynamic_ena(hw, NULL, NULL); + ice_flush(hw); + + if (ice_ptp_tx_tstamps_pending(pf)) { + /* If any new Tx timestamps happened while in interrupt, + * re-arm the interrupt to trigger it again. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } return IRQ_HANDLED; } @@ -6982,7 +6984,6 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes + pf->stats.rx_undersize + - pf->hw_csum_rx_error + pf->stats.rx_jabber + pf->stats.rx_fragments + pf->stats.rx_oversize; @@ -7808,6 +7809,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* Restore timestamp mode settings after VSI rebuild */ ice_ptp_restore_timestamp_mode(pf); + + /* Start PTP periodic work after VSI is fully rebuilt */ + ice_ptp_queue_work(pf); return; err_vsi_rebuild: @@ -9658,9 +9662,6 @@ int ice_open_internal(struct net_device *netdev) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); - /* Update existing tunnels information */ - udp_tunnel_get_rx_info(netdev); - return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 4c8d20f2d2c0a4..27268300147625 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -573,6 +573,9 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) pf = ptp_port_to_pf(ptp_port); hw = &pf->hw; + if (!tx->init) + return; + /* Read the Tx ready status first */ if (tx->has_ready_bitmap) { err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); @@ -674,14 +677,9 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) pf->ptp.tx_hwtstamp_good += tstamp_good; } -/** - * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device - * @pf: Board private structure - */ -static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) +static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf) { struct ice_ptp_port *port; - unsigned int i; mutex_lock(&pf->adapter->ports.lock); list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { @@ -693,49 +691,6 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) ice_ptp_process_tx_tstamp(tx); } mutex_unlock(&pf->adapter->ports.lock); - - for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { - u64 tstamp_ready; - int err; - - /* Read the Tx ready status first */ - err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); - if (err) - break; - else if (tstamp_ready) - return ICE_TX_TSTAMP_WORK_PENDING; - } - - return ICE_TX_TSTAMP_WORK_DONE; -} - -/** - * ice_ptp_tx_tstamp - Process Tx timestamps for this function. - * @tx: Tx tracking structure to initialize - * - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete - * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. - */ -static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) -{ - bool more_timestamps; - unsigned long flags; - - if (!tx->init) - return ICE_TX_TSTAMP_WORK_DONE; - - /* Process the Tx timestamp tracker */ - ice_ptp_process_tx_tstamp(tx); - - /* Check if there are outstanding Tx timestamps */ - spin_lock_irqsave(&tx->lock, flags); - more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); - spin_unlock_irqrestore(&tx->lock, flags); - - if (more_timestamps) - return ICE_TX_TSTAMP_WORK_PENDING; - - return ICE_TX_TSTAMP_WORK_DONE; } /** @@ -1347,9 +1302,12 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup) /* Do not reconfigure E810 or E830 PHY */ return; case ICE_MAC_GENERIC: - case ICE_MAC_GENERIC_3K_E825: ice_ptp_port_phy_restart(ptp_port); return; + case ICE_MAC_GENERIC_3K_E825: + if (linkup) + ice_ptp_port_phy_restart(ptp_port); + return; default: dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); } @@ -2663,30 +2621,92 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) return idx + tx->offset; } -/** - * ice_ptp_process_ts - Process the PTP Tx timestamps - * @pf: Board private structure - * - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx - * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. - */ -enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) +void ice_ptp_process_ts(struct ice_pf *pf) { switch (pf->ptp.tx_interrupt_mode) { case ICE_PTP_TX_INTERRUPT_NONE: /* This device has the clock owner handle timestamps for it */ - return ICE_TX_TSTAMP_WORK_DONE; + return; case ICE_PTP_TX_INTERRUPT_SELF: /* This device handles its own timestamps */ - return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + ice_ptp_process_tx_tstamp(&pf->ptp.port.tx); + return; case ICE_PTP_TX_INTERRUPT_ALL: /* This device handles timestamps for all ports */ - return ice_ptp_tx_tstamp_owner(pf); + ice_ptp_tx_tstamp_owner(pf); + return; + default: + WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", + pf->ptp.tx_interrupt_mode); + return; + } +} + +static bool ice_port_has_timestamps(struct ice_ptp_tx *tx) +{ + bool more_timestamps; + + scoped_guard(spinlock_irqsave, &tx->lock) { + if (!tx->init) + return false; + + more_timestamps = !bitmap_empty(tx->in_use, tx->len); + } + + return more_timestamps; +} + +static bool ice_any_port_has_timestamps(struct ice_pf *pf) +{ + struct ice_ptp_port *port; + + scoped_guard(mutex, &pf->adapter->ports.lock) { + list_for_each_entry(port, &pf->adapter->ports.ports, + list_node) { + struct ice_ptp_tx *tx = &port->tx; + + if (ice_port_has_timestamps(tx)) + return true; + } + } + + return false; +} + +bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + unsigned int i; + + /* Check software indicator */ + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_NONE: + return false; + case ICE_PTP_TX_INTERRUPT_SELF: + if (ice_port_has_timestamps(&pf->ptp.port.tx)) + return true; + break; + case ICE_PTP_TX_INTERRUPT_ALL: + if (ice_any_port_has_timestamps(pf)) + return true; + break; default: WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", pf->ptp.tx_interrupt_mode); - return ICE_TX_TSTAMP_WORK_DONE; + break; + } + + /* Check hardware indicator */ + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { + u64 tstamp_ready = 0; + int err; + + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (err || tstamp_ready) + return true; } + + return false; } /** @@ -2738,7 +2758,9 @@ irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) return IRQ_WAKE_THREAD; case ICE_MAC_E830: /* E830 can read timestamps in the top half using rd32() */ - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { + ice_ptp_process_ts(pf); + + if (ice_ptp_tx_tstamps_pending(pf)) { /* Process outstanding Tx timestamps. If there * is more work, re-arm the interrupt to trigger again. */ @@ -2817,6 +2839,20 @@ static void ice_ptp_periodic_work(struct kthread_work *work) msecs_to_jiffies(err ? 10 : 500)); } +/** + * ice_ptp_queue_work - Queue PTP periodic work for a PF + * @pf: Board private structure + * + * Helper function to queue PTP periodic work after VSI rebuild completes. + * This ensures that PTP work only runs when VSI structures are ready. + */ +void ice_ptp_queue_work(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) && + pf->ptp.state == ICE_PTP_READY) + kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); +} + /** * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild * @pf: Board private structure @@ -2835,10 +2871,15 @@ static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, struct ice_pf *peer_pf = ptp_port_to_pf(port); if (!ice_is_primary(&peer_pf->hw)) { - if (rebuild) + if (rebuild) { + /* TODO: When implementing rebuild=true: + * 1. Ensure secondary PFs' VSIs are rebuilt + * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild + */ ice_ptp_rebuild(peer_pf, reset_type); - else + } else { ice_ptp_prepare_for_reset(peer_pf, reset_type); + } } } } @@ -2984,9 +3025,6 @@ void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ptp->state = ICE_PTP_READY; - /* Start periodic work going */ - kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); - dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); return; @@ -3191,8 +3229,9 @@ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { switch (pf->hw.mac_type) { case ICE_MAC_GENERIC: - /* E822 based PHY has the clock owner process the interrupt - * for all ports. + case ICE_MAC_GENERIC_3K_E825: + /* E82x hardware has the clock owner process timestamps for + * all ports. */ if (ice_pf_src_tmr_owned(pf)) pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 27016aac4f1e8a..8c44bd758a4fe3 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -304,8 +304,9 @@ void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); -enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); +void ice_ptp_process_ts(struct ice_pf *pf); irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); +bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf); u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts); @@ -317,6 +318,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); void ice_ptp_link_change(struct ice_pf *pf, bool linkup); +void ice_ptp_queue_work(struct ice_pf *pf); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_hwtstamp_get(struct net_device *netdev, @@ -345,16 +347,18 @@ static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } -static inline bool ice_ptp_process_ts(struct ice_pf *pf) -{ - return true; -} +static inline void ice_ptp_process_ts(struct ice_pf *pf) { } static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) { return IRQ_HANDLED; } +static inline bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) +{ + return false; +} + static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) { @@ -383,6 +387,10 @@ static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { } +static inline void ice_ptp_queue_work(struct ice_pf *pf) +{ +} + static inline int ice_ptp_clock_index(struct ice_pf *pf) { return -1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 034618e79169e4..c58051e4350be2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11468,20 +11468,17 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) */ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; - bool disable_dev; int err = -EIO; if (hw->mac.type != ixgbe_mac_e610) - goto clean_up_probe; + return err; ixgbe_get_hw_control(adapter); - mutex_init(&hw->aci.lock); err = ixgbe_get_flash_data(&adapter->hw); if (err) - goto shutdown_aci; + goto err_release_hw_control; timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task); @@ -11504,16 +11501,8 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) devl_unlock(adapter->devlink); return 0; -shutdown_aci: - mutex_destroy(&adapter->hw.aci.lock); +err_release_hw_control: ixgbe_release_hw_control(adapter); -clean_up_probe: - disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); - free_netdev(netdev); - devlink_free(adapter->devlink); - pci_release_mem_regions(pdev); - if (disable_dev) - pci_disable_device(pdev); return err; } @@ -11655,8 +11644,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - if (ixgbe_check_fw_error(adapter)) - return ixgbe_recovery_probe(adapter); + if (ixgbe_check_fw_error(adapter)) { + err = ixgbe_recovery_probe(adapter); + if (err) + goto err_sw_init; + + return 0; + } if (adapter->hw.mac.type == ixgbe_mac_e610) { err = ixgbe_get_caps(&adapter->hw); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 44b201817d94c6..c116da7d7f18c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1389,7 +1389,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); if (efs->rule.flow_type < 0) { ret = efs->rule.flow_type; - goto clean_rule; + goto clean_eth_rule; } ret = mvpp2_cls_rfs_parse_rule(&efs->rule); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index bcea3fc26a8c7d..57db7ea2f5be9c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1338,7 +1338,7 @@ int octep_device_setup(struct octep_device *oct) ret = octep_ctrl_net_init(oct); if (ret) - return ret; + goto unsupported_dev; INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 36806e813c33cc..1301c56e20d653 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -613,3 +613,19 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) cq->dbg = NULL; } } + +static int vhca_id_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + + seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(vhca_id); + +void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev) +{ + debugfs_create_file("vhca_id", 0400, dev->priv.dbg.dbg_root, dev, + &vhca_id_fops); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 64c04f52990fe0..781e39b5aa1def 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -575,3 +575,17 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev return plen && flen && flen == plen && !memcmp(fsystem_guid, psystem_guid, flen); } + +void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]) + device_lock_assert(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]->adev.dev); + else + mlx5_core_err(dev, "ETH driver already removed\n"); + if (priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]) + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]->adev); + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]) + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]->adev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index a8fb4bec369cf4..9c7064187ed0ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -430,7 +430,8 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->replay_esn.esn = sa_entry->esn_state.esn; attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; attrs->replay_esn.overlap = sa_entry->esn_state.overlap; - if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT || + x->xso.type != XFRM_DEV_OFFLOAD_PACKET) goto skip_replay_window; switch (x->replay_esn->replay_window) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c index c17ea0fcd8efe4..ef7f5338540fdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c @@ -177,8 +177,6 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct net *net = sock_net(skb->sk); - const struct ipv6hdr *ip6; - struct tcphdr *th; if (!mlx5e_psp_set_state(priv, skb, psp_st)) return true; @@ -190,11 +188,18 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, return false; } if (skb_is_gso(skb)) { - ip6 = ipv6_hdr(skb); - th = inner_tcp_hdr(skb); + int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb); + struct tcphdr *th = inner_tcp_hdr(skb); - th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr, - &ip6->daddr, 0); + if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *ip = ip_hdr(skb); + + th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + const struct ipv6hdr *ip6 = ipv6_hdr(skb); + + th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } } return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9042c8a388e42b..4b2963bbe7ff45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4052,6 +4052,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_queue_update_stats(priv); } + netdev_stats_to_stats64(stats, &dev->stats); + if (mlx5e_is_uplink_rep(priv)) { struct mlx5e_vport_stats *vstats = &priv->stats.vport; @@ -4068,21 +4070,21 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_fold_sw_stats64(priv, stats); } - stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer; - stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards); + stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer; + stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards); - stats->rx_length_errors = + stats->rx_length_errors += PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + PPORT_802_3_GET(pstats, a_frame_too_long_errors) + VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small); - stats->rx_crc_errors = + stats->rx_crc_errors += PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); - stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); - stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); - stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + - stats->rx_frame_errors; - stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; + stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors); + stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards); + stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors; } static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv) @@ -6842,6 +6844,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = edev->mdev; + mlx5_eswitch_safe_aux_devs_remove(mdev); mlx5_core_uplink_netdev_set(mdev, NULL); if (priv->profile) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a8773b2342c2af..424786f489ecfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2147,11 +2147,14 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow, static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow) { + struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp_dev *pos; + struct mlx5_eswitch *peer_esw; int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { - if (i == mlx5_get_dev_index(flow->priv->mdev)) - continue; + devcom = flow->priv->mdev->priv.eswitch->devcom; + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { + i = mlx5_get_dev_index(peer_esw->dev); mlx5e_tc_del_fdb_peer_flow(flow, i); } } @@ -5513,12 +5516,16 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) { + struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp_dev *pos; struct mlx5e_tc_flow *flow, *tmp; + struct mlx5_eswitch *peer_esw; int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { - if (i == mlx5_get_dev_index(esw->dev)) - continue; + devcom = esw->devcom; + + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { + i = mlx5_get_dev_index(peer_esw->dev); list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i]) mlx5e_tc_del_fdb_peers_flow(flow); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 1c37098e09ea51..49a637829c5941 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -188,7 +188,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; - return err; + goto out; } err = esw_acl_ingress_lgcy_groups_create(esw, vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ad1073f7b79f78..714ad28e8445b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -929,6 +929,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, u16 vport_num); bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev); +void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -1009,9 +1010,12 @@ mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev) static inline bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) { - return -EOPNOTSUPP; + return false; } +static inline void +mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {} + #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index ea94a727633f1f..02b7e474586d9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3981,6 +3981,32 @@ static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw, return true; } +#define MLX5_ESW_HOLD_TIMEOUT_MS 7000 +#define MLX5_ESW_HOLD_RETRY_DELAY_MS 500 + +void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) +{ + unsigned long timeout; + bool hold_esw = true; + + /* Wait for any concurrent eswitch mode transition to complete. */ + if (!mlx5_esw_hold(dev)) { + timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS); + while (!mlx5_esw_hold(dev)) { + if (!time_before(jiffies, timeout)) { + hold_esw = false; + break; + } + msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS); + } + } + if (hold_esw) { + if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) + mlx5_core_reps_aux_devs_remove(dev); + mlx5_esw_release(dev); + } +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ced747bef6415f..c348ee62cd3af9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -1198,7 +1198,8 @@ int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, boo u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; - if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) + if (disconnect && + !MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) return -EOPNOTSUPP; MLX5_SET(set_flow_table_root_in, in, opcode, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4209da722f9a6c..55b4e0cceae2f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1806,16 +1806,6 @@ static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) return -ENOMEM; } -static int vhca_id_show(struct seq_file *file, void *priv) -{ - struct mlx5_core_dev *dev = file->private; - - seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(vhca_id); - static int mlx5_notifiers_init(struct mlx5_core_dev *dev) { int err; @@ -1884,7 +1874,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); - debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops); + INIT_LIST_HEAD(&priv->traps); err = mlx5_cmd_init(dev); @@ -2022,6 +2012,8 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_init_one; } + mlx5_vhca_debugfs_init(dev); + pci_save_state(pdev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index cfebc110c02fd9..f2d74382fb85d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -258,6 +258,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); void mlx5_cmd_flush(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); +void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); @@ -290,6 +291,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); +void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index b706f1486504a7..c45540fe7d9d95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -76,6 +76,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto init_one_err; } + mlx5_vhca_debugfs_init(mdev); return 0; init_one_err: diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 36af94a2e062aa..2794f75df8fcba 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1524,9 +1524,8 @@ static void rocker_world_port_post_fini(struct rocker_port *rocker_port) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_post_fini) - return; - wops->port_post_fini(rocker_port); + if (wops->port_post_fini) + wops->port_post_fini(rocker_port); kfree(rocker_port->wpriv); } diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 6ef96292909a2f..3db589b90b68ad 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -2182,12 +2182,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) { - int rc; - - mutex_lock(&efx->net_dev->ethtool->rss_lock); - rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); - mutex_unlock(&efx->net_dev->ethtool->rss_lock); - return rc; + return efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); } void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c index 220eb5ce75833d..b49c4708bf9eb1 100644 --- a/drivers/net/ethernet/spacemit/k1_emac.c +++ b/drivers/net/ethernet/spacemit/k1_emac.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,7 @@ #define EMAC_DEFAULT_BUFSIZE 1536 #define EMAC_RX_BUF_2K 2048 -#define EMAC_RX_BUF_4K 4096 +#define EMAC_RX_BUF_MAX FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK) /* Tuning parameters from SpacemiT */ #define EMAC_TX_FRAMES 64 @@ -202,8 +203,7 @@ static void emac_init_hw(struct emac_priv *priv) { /* Destination address for 802.3x Ethernet flow control */ u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 }; - - u32 rxirq = 0, dma = 0; + u32 rxirq = 0, dma = 0, frame_sz; regmap_set_bits(priv->regmap_apmu, priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG, @@ -228,6 +228,15 @@ static void emac_init_hw(struct emac_priv *priv) DEFAULT_TX_THRESHOLD); emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD); + /* Set maximum frame size and jabber size based on configured MTU, + * accounting for Ethernet header, double VLAN tags, and FCS. + */ + frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; + + emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz); + emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz); + emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz); + /* Configure flow control (enabled in emac_adjust_link() later) */ emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH); emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH); @@ -924,14 +933,14 @@ static int emac_change_mtu(struct net_device *ndev, int mtu) return -EBUSY; } - frame_len = mtu + ETH_HLEN + ETH_FCS_LEN; + frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; if (frame_len <= EMAC_DEFAULT_BUFSIZE) priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; else if (frame_len <= EMAC_RX_BUF_2K) priv->dma_buf_sz = EMAC_RX_BUF_2K; else - priv->dma_buf_sz = EMAC_RX_BUF_4K; + priv->dma_buf_sz = EMAC_RX_BUF_MAX; ndev->mtu = mtu; @@ -1099,7 +1108,13 @@ static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res, 100, 10000); if (ret) { - netdev_err(priv->ndev, "Read stat timeout\n"); + /* + * This could be caused by the PHY stopping its refclk even when + * the link is up, for power saving. See also comments in + * emac_stats_update(). + */ + dev_err_ratelimited(&priv->ndev->dev, + "Read stat timeout. PHY clock stopped?\n"); return ret; } @@ -1147,17 +1162,25 @@ static void emac_stats_update(struct emac_priv *priv) assert_spin_locked(&priv->stats_lock); - if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) { - /* Not up, don't try to update */ + /* + * We can't read statistics if the interface is not up. Also, some PHYs + * stop their reference clocks for link down power saving, which also + * causes reading statistics to time out. Don't update and don't + * reschedule in these cases. + */ + if (!netif_running(priv->ndev) || + !netif_carrier_ok(priv->ndev) || + !netif_device_present(priv->ndev)) { return; } for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) { /* - * If reading stats times out, everything is broken and there's - * nothing we can do. Reading statistics also can't return an - * error, so just return without updating and without - * rescheduling. + * If reading stats times out anyway, the stat registers will be + * stuck, and we can't really recover from that. + * + * Reading statistics also can't return an error, so just return + * without updating and without rescheduling. */ if (emac_tx_read_stat_cnt(priv, i, &res)) return; @@ -1636,6 +1659,12 @@ static void emac_adjust_link(struct net_device *dev) emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl); emac_set_fc_autoneg(priv); + + /* + * Reschedule stats updates now that link is up. See comments in + * emac_stats_update(). + */ + mod_timer(&priv->stats_timer, jiffies); } phy_print_status(phydev); @@ -2005,7 +2034,7 @@ static int emac_probe(struct platform_device *pdev) ndev->hw_features = NETIF_F_SG; ndev->features |= ndev->hw_features; - ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN); + ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN); ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3f42843cd9ed01..a379221b96a348 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -8042,7 +8042,7 @@ int stmmac_suspend(struct device *dev) u32 chan; if (!ndev || !netif_running(ndev)) - return 0; + goto suspend_bsp; mutex_lock(&priv->lock); @@ -8082,6 +8082,7 @@ int stmmac_suspend(struct device *dev) if (stmmac_fpe_supported(priv)) ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); +suspend_bsp: if (priv->plat->suspend) return priv->plat->suspend(dev, priv->plat->bsp_priv); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 54c24cd3d3be63..b0e18bdc2c8510 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -305,12 +305,19 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) return 0; } -static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); struct cpsw_common *cpsw = priv->cpsw; + struct net_device *ndev = priv->ndev; int slave_port = -1; + rtnl_lock(); + if (!netif_running(ndev)) + goto unlock_rtnl; + + netif_addr_lock_bh(ndev); + if (cpsw->data.dual_emac) slave_port = priv->emac_port + 1; @@ -318,7 +325,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); - return; + goto unlock_addr; } else { /* Disable promiscuous mode */ cpsw_set_promiscious(ndev, false); @@ -331,6 +338,18 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* add/remove mcast address either for real netdev or for vlan */ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); + +unlock_addr: + netif_addr_unlock_bh(ndev); +unlock_rtnl: + rtnl_unlock(); +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + schedule_work(&priv->rx_mode_work); } static unsigned int cpsw_rxbuf_total_len(unsigned int len) @@ -1472,6 +1491,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) priv_sl2->ndev = ndev; priv_sl2->dev = &ndev->dev; priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + INIT_WORK(&priv_sl2->rx_mode_work, cpsw_ndo_set_rx_mode_work); if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, @@ -1653,6 +1673,7 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->emac_port = 0; + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); @@ -1758,6 +1779,8 @@ static int cpsw_probe(struct platform_device *pdev) static void cpsw_remove(struct platform_device *pdev) { struct cpsw_common *cpsw = platform_get_drvdata(pdev); + struct net_device *ndev; + struct cpsw_priv *priv; int i, ret; ret = pm_runtime_resume_and_get(&pdev->dev); @@ -1770,9 +1793,15 @@ static void cpsw_remove(struct platform_device *pdev) return; } - for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].ndev) - unregister_netdev(cpsw->slaves[i].ndev); + for (i = 0; i < cpsw->data.slaves; i++) { + ndev = cpsw->slaves[i].ndev; + if (!ndev) + continue; + + priv = netdev_priv(ndev); + unregister_netdev(ndev); + disable_work_sync(&priv->rx_mode_work); + } cpts_release(cpsw->cpts); cpdma_ctlr_destroy(cpsw->dma); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index ab88d4c02cbde7..21af0a10626aaf 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -248,16 +248,22 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) return 0; } -static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); struct cpsw_common *cpsw = priv->cpsw; + struct net_device *ndev = priv->ndev; + rtnl_lock(); + if (!netif_running(ndev)) + goto unlock_rtnl; + + netif_addr_lock_bh(ndev); if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); - return; + goto unlock_addr; } /* Disable promiscuous mode */ @@ -270,6 +276,18 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* add/remove mcast address either for real netdev or for vlan */ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); + +unlock_addr: + netif_addr_unlock_bh(ndev); +unlock_rtnl: + rtnl_unlock(); +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + schedule_work(&priv->rx_mode_work); } static unsigned int cpsw_rxbuf_total_len(unsigned int len) @@ -1398,6 +1416,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->emac_port = i + 1; priv->tx_packet_min = CPSW_MIN_PACKET_SIZE; + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); if (is_valid_ether_addr(slave_data->mac_addr)) { ether_addr_copy(priv->mac_addr, slave_data->mac_addr); @@ -1447,13 +1466,18 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) static void cpsw_unregister_ports(struct cpsw_common *cpsw) { + struct net_device *ndev; + struct cpsw_priv *priv; int i = 0; for (i = 0; i < cpsw->data.slaves; i++) { - if (!cpsw->slaves[i].ndev) + ndev = cpsw->slaves[i].ndev; + if (!ndev) continue; - unregister_netdev(cpsw->slaves[i].ndev); + priv = netdev_priv(ndev); + unregister_netdev(ndev); + disable_work_sync(&priv->rx_mode_work); } } diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 91add8925e235c..acb6181c5c9e1b 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -391,6 +391,7 @@ struct cpsw_priv { u32 tx_packet_min; struct cpsw_ale_ratelimit ale_bc_ratelimit; struct cpsw_ale_ratelimit ale_mc_ratelimit; + struct work_struct rx_mode_work; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b4df7e184791d0..c509228be84d1b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1567,9 +1567,10 @@ int macvlan_common_newlink(struct net_device *dev, /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(lowerdev)) { + if (macvlan_port_get_rtnl(lowerdev)) { macvlan_flush_sources(port, vlan); - macvlan_port_destroy(port->dev); + if (create) + macvlan_port_destroy(port->dev); } return err; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 05de68b9f7191f..8208ecbb575c5a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2643,11 +2643,21 @@ static int kszphy_probe(struct phy_device *phydev) kszphy_parse_led_mode(phydev); - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref"); + clk = devm_clk_get_optional(&phydev->mdio.dev, "rmii-ref"); /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ if (!IS_ERR_OR_NULL(clk)) { - unsigned long rate = clk_get_rate(clk); bool rmii_ref_clk_sel_25_mhz; + unsigned long rate; + int err; + + err = clk_prepare_enable(clk); + if (err) { + phydev_err(phydev, "Failed to enable rmii-ref clock\n"); + return err; + } + + rate = clk_get_rate(clk); + clk_disable_unprepare(clk); if (type) priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; @@ -2665,13 +2675,12 @@ static int kszphy_probe(struct phy_device *phydev) } } else if (!clk) { /* unnamed clock from the generic ethernet-phy binding */ - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL); + clk = devm_clk_get_optional(&phydev->mdio.dev, NULL); } if (IS_ERR(clk)) return PTR_ERR(clk); - clk_disable_unprepare(clk); priv->clk = clk; if (ksz8041_fiber_mode(phydev)) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 47f095bd91ceab..3e023723887c4b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -479,6 +479,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, linkmode_zero(caps->link_modes); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, caps->link_modes); + phy_interface_zero(caps->interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, caps->interfaces); } #define SFP_QUIRK(_v, _p, _s, _f) \ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index fa519258386070..2f3baa5f6e9c94 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -8535,19 +8535,6 @@ static int rtl8152_system_resume(struct r8152 *tp) usb_submit_urb(tp->intr_urb, GFP_NOIO); } - /* If the device is RTL8152_INACCESSIBLE here then we should do a - * reset. This is important because the usb_lock_device_for_reset() - * that happens as a result of usb_queue_reset_device() will silently - * fail if the device was suspended or if too much time passed. - * - * NOTE: The device is locked here so we can directly do the reset. - * We don't need usb_lock_device_for_reset() because that's just a - * wrapper over device_lock() and device_resume() (which calls us) - * does that for us. - */ - if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) - usb_reset_device(tp->udev); - return 0; } @@ -8658,19 +8645,33 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) static int rtl8152_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); + bool runtime_resume = test_bit(SELECTIVE_SUSPEND, &tp->flags); int ret; mutex_lock(&tp->control); rtl_reset_ocp_base(tp); - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) + if (runtime_resume) ret = rtl8152_runtime_resume(tp); else ret = rtl8152_system_resume(tp); mutex_unlock(&tp->control); + /* If the device is RTL8152_INACCESSIBLE here then we should do a + * reset. This is important because the usb_lock_device_for_reset() + * that happens as a result of usb_queue_reset_device() will silently + * fail if the device was suspended or if too much time passed. + * + * NOTE: The device is locked here so we can directly do the reset. + * We don't need usb_lock_device_for_reset() because that's just a + * wrapper over device_lock() and device_resume() (which calls us) + * does that for us. + */ + if (!runtime_resume && test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + usb_reset_device(tp->udev); + return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c index a5ececfc13e449..f15d1f5d1bf593 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c @@ -55,8 +55,6 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif) ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL); - wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->mlo_scan_start_wk); - CLEANUP_STRUCT(mld_vif); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c index 55b484c162807f..cd0dce8de85690 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -1759,6 +1759,8 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld, wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->emlsr.check_tpt_wk); + wiphy_delayed_work_cancel(mld->wiphy, + &mld_vif->mlo_scan_start_wk); iwl_mld_reset_cca_40mhz_workaround(mld, vif); iwl_mld_smps_workaround(mld, vif, true); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 07f1a84c274efe..af1a45845999bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2025 Intel Corporation + * Copyright (C) 2012-2014, 2018-2026 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -3239,6 +3239,8 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); + iwl_mvm_pause_tcm(mvm, true); + mvm->fast_resume = true; set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); @@ -3295,6 +3297,8 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) mvm->trans->state = IWL_TRANS_NO_FW; } + iwl_mvm_resume_tcm(mvm); + out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); mvm->fast_resume = false; diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index b76bea6ab2d762..5af90ca6e06318 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -395,6 +395,7 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, struct sk_buff *skb) { unsigned long long data_bus_addr, data_base_addr; + struct skb_shared_info *shinfo = skb_shinfo(skb); struct device *dev = rxq->dpmaif_ctrl->dev; struct dpmaif_bat_page *page_info; unsigned int data_len; @@ -402,18 +403,22 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, page_info = rxq->bat_frag->bat_skb; page_info += t7xx_normal_pit_bid(pkt_info); - dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); if (!page_info->page) return -EINVAL; + if (shinfo->nr_frags >= MAX_SKB_FRAGS) + return -EINVAL; + + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); data_base_addr = page_info->data_bus_addr; data_offset = data_bus_addr - data_base_addr; data_offset += page_info->offset; data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page, data_offset, data_len, page_info->data_len); page_info->page = NULL; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 58f3097888a76e..d86f2565a92cac 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -806,8 +806,8 @@ static void nvme_unmap_data(struct request *req) if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, map)) { if (nvme_pci_cmd_use_sgl(&iod->cmd)) - nvme_free_sgls(req, iod->descriptors[0], - &iod->cmd.common.dptr.sgl, attrs); + nvme_free_sgls(req, &iod->cmd.common.dptr.sgl, + iod->descriptors[0], attrs); else nvme_free_prps(req, attrs); } @@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req) nvme_free_descriptors(req); } +static bool nvme_pci_prp_save_mapping(struct request *req, + struct device *dma_dev, + struct blk_dma_iter *iter) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev)) + return true; + + if (!iod->nr_dma_vecs) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, + GFP_ATOMIC); + if (!iod->dma_vecs) { + iter->status = BLK_STS_RESOURCE; + return false; + } + } + + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; + iod->nr_dma_vecs++; + return true; +} + static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, struct blk_dma_iter *iter) { @@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, return true; if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) return false; - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { - iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; - iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; - iod->nr_dma_vecs++; - } - return true; + return nvme_pci_prp_save_mapping(req, dma_dev, iter); } static blk_status_t nvme_pci_setup_data_prp(struct request *req, @@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req, unsigned int prp_len, i; __le64 *prp_list; - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { - iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, - GFP_ATOMIC); - if (!iod->dma_vecs) - return BLK_STS_RESOURCE; - iod->dma_vecs[0].addr = iter->addr; - iod->dma_vecs[0].len = iter->len; - iod->nr_dma_vecs = 1; - } + if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter)) + return iter->status; /* * PRP1 always points to the start of the DMA transfers. @@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req) iod->nr_descriptors = 0; iod->total_len = 0; iod->meta_total_len = 0; + iod->nr_dma_vecs = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 8d246b8ca604c5..0103815542d49f 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -180,9 +180,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) static void nvmet_bio_done(struct bio *bio) { struct nvmet_req *req = bio->bi_private; + blk_status_t blk_status = bio->bi_status; - nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); nvmet_req_bio_put(req, bio); + nvmet_req_complete(req, blk_to_nvme_status(req, blk_status)); } #ifdef CONFIG_BLK_DEV_INTEGRITY diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 549a4786d1c3dd..bda816d6684650 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -349,11 +349,14 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) cmd->req.sg = NULL; } +static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); + static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct bio_vec *iov = cmd->iov; struct scatterlist *sg; u32 length, offset, sg_offset; + unsigned int sg_remaining; int nr_pages; length = cmd->pdu_len; @@ -361,9 +364,22 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) offset = cmd->rbytes_done; cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { + nvmet_tcp_fatal_error(cmd->queue); + return; + } sg = &cmd->req.sg[cmd->sg_idx]; + sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; while (length) { + if (!sg_remaining) { + nvmet_tcp_fatal_error(cmd->queue); + return; + } + if (!sg->length || sg->length <= sg_offset) { + nvmet_tcp_fatal_error(cmd->queue); + return; + } u32 iov_len = min_t(u32, length, sg->length - sg_offset); bvec_set_page(iov, sg_page(sg), iov_len, @@ -371,6 +387,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) length -= iov_len; sg = sg_next(sg); + sg_remaining--; iov++; sg_offset = 0; } diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 5619ec91785871..a2a13617c6f49e 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, phys_addr_t base, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); if (!prop) return -ENOENT; nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } for (i = 0; i < len; i++) { u64 b, s; @@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void) fdt_for_each_subnode(child, fdt, node) { const char *uname; + bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL); u64 b, s; if (!of_fdt_device_is_available(fdt, child)) continue; + if (default_cma && cma_skip_dt_default_reserved_mem()) + continue; if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) continue; @@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam phys_addr_t base = 0, align = 0, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; int ret; prop = of_get_flat_dt_prop(node, "size", &len); @@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam } nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } /* Need adjust the alignment to satisfy the CMA requirement */ if (IS_ENABLED(CONFIG_CMA) diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c index f0ef474e1a0da7..23f554490539f2 100644 --- a/drivers/pci/ide.c +++ b/drivers/pci/ide.c @@ -11,7 +11,6 @@ #include #include #include -#include #include "pci.h" @@ -168,7 +167,7 @@ void pci_ide_init(struct pci_dev *pdev) for (u16 i = 0; i < nr_streams; i++) { int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem); - pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val); + pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val); if (val & PCI_IDE_SEL_CTL_EN) continue; val &= ~PCI_IDE_SEL_CTL_ID; @@ -283,8 +282,8 @@ struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev) /* for SR-IOV case, cover all VFs */ num_vf = pci_num_vf(pdev); if (num_vf) - rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf), - pci_iov_virtfn_devfn(pdev, num_vf)); + rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf - 1), + pci_iov_virtfn_devfn(pdev, num_vf - 1)); else rid_end = pci_dev_id(pdev); @@ -373,9 +372,6 @@ void pci_ide_stream_release(struct pci_ide *ide) if (ide->partner[PCI_IDE_EP].enable) pci_ide_stream_disable(pdev, ide); - if (ide->tsm_dev) - tsm_ide_stream_unregister(ide); - if (ide->partner[PCI_IDE_RP].setup) pci_ide_stream_teardown(rp, ide); diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 18295b15ecd9dd..4507dc8b5563c0 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -619,7 +619,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc) pc->chip.set = meson_gpio_set; pc->chip.base = -1; pc->chip.ngpio = pc->data->num_pins; - pc->chip.can_sleep = false; + pc->chip.can_sleep = true; ret = gpiochip_add_data(&pc->chip, pc); if (ret) { diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index e44ef262beec6e..2fc67aeafdb39e 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3545,10 +3545,9 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, return 0; } -static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, - struct pinctrl_gpio_range *range, - unsigned offset, - bool input) +static int rockchip_pmx_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) { struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct rockchip_pin_bank *bank; @@ -3562,7 +3561,7 @@ static const struct pinmux_ops rockchip_pmx_ops = { .get_function_name = rockchip_pmx_get_func_name, .get_function_groups = rockchip_pmx_get_groups, .set_mux = rockchip_pmx_set, - .gpio_set_direction = rockchip_pmx_gpio_set_direction, + .gpio_request_enable = rockchip_pmx_gpio_request_enable, }; /* diff --git a/drivers/pinctrl/pinctrl-th1520.c b/drivers/pinctrl/pinctrl-th1520.c index e641bad6728cc5..83e9c9f77370c6 100644 --- a/drivers/pinctrl/pinctrl-th1520.c +++ b/drivers/pinctrl/pinctrl-th1520.c @@ -287,7 +287,7 @@ static const struct pinctrl_pin_desc th1520_group3_pins[] = { TH1520_PAD(5, QSPI0_D0_MOSI, QSPI, PWM, I2S, GPIO, ____, ____, 0), TH1520_PAD(6, QSPI0_D1_MISO, QSPI, PWM, I2S, GPIO, ____, ____, 0), TH1520_PAD(7, QSPI0_D2_WP, QSPI, PWM, I2S, GPIO, ____, ____, 0), - TH1520_PAD(8, QSPI1_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0), + TH1520_PAD(8, QSPI0_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0), TH1520_PAD(9, I2C2_SCL, I2C, UART, ____, GPIO, ____, ____, 0), TH1520_PAD(10, I2C2_SDA, I2C, UART, ____, GPIO, ____, ____, 0), TH1520_PAD(11, I2C3_SCL, I2C, ____, ____, GPIO, ____, ____, 0), diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index c480e8b7850329..f56592411cf6d9 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -61,13 +61,14 @@ config PINCTRL_LPASS_LPI (Low Power Island) found on the Qualcomm Technologies Inc SoCs. config PINCTRL_SC7280_LPASS_LPI - tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver" + tristate "Qualcomm Technologies Inc SC7280 and SM8350 LPASS LPI pin controller driver" depends on ARM64 || COMPILE_TEST depends on PINCTRL_LPASS_LPI help This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI - (Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform. + (Low Power Island) found on the Qualcomm Technologies Inc SC7280 + and SM8350 platforms. config PINCTRL_SDM660_LPASS_LPI tristate "Qualcomm Technologies Inc SDM660 LPASS LPI pin controller driver" @@ -106,16 +107,6 @@ config PINCTRL_SM8250_LPASS_LPI Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI (Low Power Island) found on the Qualcomm Technologies Inc SM8250 platform. -config PINCTRL_SM8350_LPASS_LPI - tristate "Qualcomm Technologies Inc SM8350 LPASS LPI pin controller driver" - depends on ARM64 || COMPILE_TEST - depends on PINCTRL_LPASS_LPI - help - This is the pinctrl, pinmux, pinconf and gpiolib driver for the - Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI - (Low Power Island) found on the Qualcomm Technologies Inc SM8350 - platform. - config PINCTRL_SM8450_LPASS_LPI tristate "Qualcomm Technologies Inc SM8450 LPASS LPI pin controller driver" depends on ARM64 || COMPILE_TEST diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 748b17a77b2cc4..4269d1781015ed 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -64,7 +64,6 @@ obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o -obj-$(CONFIG_PINCTRL_SM8350_LPASS_LPI) += pinctrl-sm8350-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 78212f9928430c..76aed32962794e 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -312,6 +312,22 @@ static const struct pinconf_ops lpi_gpio_pinconf_ops = { .pin_config_group_set = lpi_config_set, }; +static int lpi_gpio_get_direction(struct gpio_chip *chip, unsigned int pin) +{ + unsigned long config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, 0); + struct lpi_pinctrl *state = gpiochip_get_data(chip); + unsigned long arg; + int ret; + + ret = lpi_config_get(state->ctrl, pin, &config); + if (ret) + return ret; + + arg = pinconf_to_config_argument(config); + + return arg ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; +} + static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned int pin) { struct lpi_pinctrl *state = gpiochip_get_data(chip); @@ -409,6 +425,7 @@ static void lpi_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #endif static const struct gpio_chip lpi_gpio_template = { + .get_direction = lpi_gpio_get_direction, .direction_input = lpi_gpio_direction_input, .direction_output = lpi_gpio_direction_output, .get = lpi_gpio_get, diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c index 1161f0a91a002a..750f410311a8fb 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c @@ -131,6 +131,9 @@ static const struct of_device_id lpi_pinctrl_of_match[] = { { .compatible = "qcom,sc7280-lpass-lpi-pinctrl", .data = &sc7280_lpi_data, + }, { + .compatible = "qcom,sm8350-lpass-lpi-pinctrl", + .data = &sc7280_lpi_data, }, { } }; diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c deleted file mode 100644 index 7b146b4acfdf42..00000000000000 --- a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2020-2023 Linaro Ltd. - */ - -#include -#include -#include - -#include "pinctrl-lpass-lpi.h" - -enum lpass_lpi_functions { - LPI_MUX_dmic1_clk, - LPI_MUX_dmic1_data, - LPI_MUX_dmic2_clk, - LPI_MUX_dmic2_data, - LPI_MUX_dmic3_clk, - LPI_MUX_dmic3_data, - LPI_MUX_i2s1_clk, - LPI_MUX_i2s1_data, - LPI_MUX_i2s1_ws, - LPI_MUX_i2s2_clk, - LPI_MUX_i2s2_data, - LPI_MUX_i2s2_ws, - LPI_MUX_qua_mi2s_data, - LPI_MUX_qua_mi2s_sclk, - LPI_MUX_qua_mi2s_ws, - LPI_MUX_swr_rx_clk, - LPI_MUX_swr_rx_data, - LPI_MUX_swr_tx_clk, - LPI_MUX_swr_tx_data, - LPI_MUX_wsa_swr_clk, - LPI_MUX_wsa_swr_data, - LPI_MUX_gpio, - LPI_MUX__, -}; - -static const struct pinctrl_pin_desc sm8350_lpi_pins[] = { - PINCTRL_PIN(0, "gpio0"), - PINCTRL_PIN(1, "gpio1"), - PINCTRL_PIN(2, "gpio2"), - PINCTRL_PIN(3, "gpio3"), - PINCTRL_PIN(4, "gpio4"), - PINCTRL_PIN(5, "gpio5"), - PINCTRL_PIN(6, "gpio6"), - PINCTRL_PIN(7, "gpio7"), - PINCTRL_PIN(8, "gpio8"), - PINCTRL_PIN(9, "gpio9"), - PINCTRL_PIN(10, "gpio10"), - PINCTRL_PIN(11, "gpio11"), - PINCTRL_PIN(12, "gpio12"), - PINCTRL_PIN(13, "gpio13"), - PINCTRL_PIN(14, "gpio14"), -}; - -static const char * const swr_tx_clk_groups[] = { "gpio0" }; -static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5", "gpio14" }; -static const char * const swr_rx_clk_groups[] = { "gpio3" }; -static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; -static const char * const dmic1_clk_groups[] = { "gpio6" }; -static const char * const dmic1_data_groups[] = { "gpio7" }; -static const char * const dmic2_clk_groups[] = { "gpio8" }; -static const char * const dmic2_data_groups[] = { "gpio9" }; -static const char * const i2s2_clk_groups[] = { "gpio10" }; -static const char * const i2s2_ws_groups[] = { "gpio11" }; -static const char * const dmic3_clk_groups[] = { "gpio12" }; -static const char * const dmic3_data_groups[] = { "gpio13" }; -static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; -static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; -static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; -static const char * const i2s1_clk_groups[] = { "gpio6" }; -static const char * const i2s1_ws_groups[] = { "gpio7" }; -static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; -static const char * const wsa_swr_clk_groups[] = { "gpio10" }; -static const char * const wsa_swr_data_groups[] = { "gpio11" }; -static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" }; - -static const struct lpi_pingroup sm8350_groups[] = { - LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), - LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), - LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), - LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _), - LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), - LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), - LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), - LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), - LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), - LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), - LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), - LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), - LPI_PINGROUP(14, 6, swr_tx_data, _, _, _), -}; - -static const struct lpi_function sm8350_functions[] = { - LPI_FUNCTION(dmic1_clk), - LPI_FUNCTION(dmic1_data), - LPI_FUNCTION(dmic2_clk), - LPI_FUNCTION(dmic2_data), - LPI_FUNCTION(dmic3_clk), - LPI_FUNCTION(dmic3_data), - LPI_FUNCTION(i2s1_clk), - LPI_FUNCTION(i2s1_data), - LPI_FUNCTION(i2s1_ws), - LPI_FUNCTION(i2s2_clk), - LPI_FUNCTION(i2s2_data), - LPI_FUNCTION(i2s2_ws), - LPI_FUNCTION(qua_mi2s_data), - LPI_FUNCTION(qua_mi2s_sclk), - LPI_FUNCTION(qua_mi2s_ws), - LPI_FUNCTION(swr_rx_clk), - LPI_FUNCTION(swr_rx_data), - LPI_FUNCTION(swr_tx_clk), - LPI_FUNCTION(swr_tx_data), - LPI_FUNCTION(wsa_swr_clk), - LPI_FUNCTION(wsa_swr_data), -}; - -static const struct lpi_pinctrl_variant_data sm8350_lpi_data = { - .pins = sm8350_lpi_pins, - .npins = ARRAY_SIZE(sm8350_lpi_pins), - .groups = sm8350_groups, - .ngroups = ARRAY_SIZE(sm8350_groups), - .functions = sm8350_functions, - .nfunctions = ARRAY_SIZE(sm8350_functions), -}; - -static const struct of_device_id lpi_pinctrl_of_match[] = { - { - .compatible = "qcom,sm8350-lpass-lpi-pinctrl", - .data = &sm8350_lpi_data, - }, - { } -}; -MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); - -static struct platform_driver lpi_pinctrl_driver = { - .driver = { - .name = "qcom-sm8350-lpass-lpi-pinctrl", - .of_match_table = lpi_pinctrl_of_match, - }, - .probe = lpi_pinctrl_probe, - .remove = lpi_pinctrl_remove, -}; -module_platform_driver(lpi_pinctrl_driver); - -MODULE_AUTHOR("Krzysztof Kozlowski "); -MODULE_DESCRIPTION("QTI SM8350 LPI GPIO pin control driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 404e62ad293a98..ed285afaf9b0d7 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -302,6 +302,13 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), } }, + { + .ident = "MECHREVO Wujie 15X Pro", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "WUJIE Series-X5SP4NAG"), + } + }, {} }; diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 6b1b8e444e241c..74d3eb83f56a63 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -207,7 +207,12 @@ static ssize_t cmpc_accel_sensitivity_show_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->sensitivity); } @@ -224,7 +229,12 @@ static ssize_t cmpc_accel_sensitivity_store_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &sensitivity); if (r) @@ -256,7 +266,12 @@ static ssize_t cmpc_accel_g_select_show_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->g_select); } @@ -273,7 +288,12 @@ static ssize_t cmpc_accel_g_select_store_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &g_select); if (r) @@ -302,6 +322,8 @@ static int cmpc_accel_open_v4(struct input_dev *input) acpi = to_acpi_device(input->dev.parent); accel = dev_get_drvdata(&input->dev); + if (!accel) + return -ENXIO; cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity); cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select); @@ -549,7 +571,12 @@ static ssize_t cmpc_accel_sensitivity_show(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->sensitivity); } @@ -566,7 +593,12 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &sensitivity); if (r) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index dbe096eefa7582..51e8977d3eb4a8 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -696,6 +696,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, return ret; } + if (!str_value || !str_value[0]) { + pr_debug("Ignoring attribute with empty name\n"); + goto pack_attr_exit; + } + /* All duplicate attributes found are ignored */ duplicate = kset_find_obj(temp_kset, str_value); if (duplicate) { diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c index 58132da4774570..05727169f49c14 100644 --- a/drivers/platform/x86/intel/plr_tpmi.c +++ b/drivers/platform/x86/intel/plr_tpmi.c @@ -316,7 +316,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia snprintf(name, sizeof(name), "domain%d", i); dentry = debugfs_create_dir(name, plr->dbgfs_dir); - debugfs_create_file("status", 0444, dentry, &plr->die_info[i], + debugfs_create_file("status", 0644, dentry, &plr->die_info[i], &plr_status_fops); } diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c index 70e5736c44c71a..189c61ff7ff0c0 100644 --- a/drivers/platform/x86/intel/telemetry/debugfs.c +++ b/drivers/platform/x86/intel/telemetry/debugfs.c @@ -449,7 +449,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused) for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) { seq_printf(s, "%-32s\t%u\n", debugfs_conf->pss_ltr_data[index].name, - pss_s0ix_wakeup[index]); + pss_ltr_blkd[index]); } seq_puts(s, "\n--------------------------------------\n"); @@ -459,7 +459,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused) for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) { seq_printf(s, "%-32s\t%u\n", debugfs_conf->pss_wakeup[index].name, - pss_ltr_blkd[index]); + pss_s0ix_wakeup[index]); } return 0; diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c index f23c170a55dc67..d9aa349f81e412 100644 --- a/drivers/platform/x86/intel/telemetry/pltdrv.c +++ b/drivers/platform/x86/intel/telemetry/pltdrv.c @@ -610,7 +610,7 @@ static int telemetry_setup(struct platform_device *pdev) /* Get telemetry Info */ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >> TELEM_INFO_SRAMEVTS_SHIFT; - event_regs = read_buf & TELEM_INFO_SRAMEVTS_MASK; + event_regs = read_buf & TELEM_INFO_NENABLES_MASK; if ((events < TELEM_MAX_EVENTS_SRAM) || (event_regs < TELEM_MAX_EVENTS_SRAM)) { dev_err(&pdev->dev, "PSS:Insufficient Space for SRAM Trace\n"); diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index ecfc7703f20196..012d87878afdd8 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -766,6 +766,7 @@ static const struct intel_vsec_platform_info lnl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d #define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d #define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d +#define PCI_DEVICE_ID_INTEL_VSEC_NVL 0xd70d static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, @@ -778,6 +779,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_NVL, &mtl_info) }, { } }; MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index f92e89c75db98a..61ef7a218a80d6 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -838,8 +838,17 @@ static int acpi_add(struct acpi_device *device) case 'P': year = 2021; break; - default: + case 'Q': year = 2022; + break; + case 'R': + year = 2023; + break; + case 'S': + year = 2024; + break; + default: + year = 2025; } break; default: diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 255317e6fec881..937f1a5b78edfb 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -1089,7 +1089,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) PLATFORM_DEVID_NONE, NULL, 0); if (IS_ERR(pcc->platform)) { result = PTR_ERR(pcc->platform); - goto out_backlight; + goto out_sysfs; } result = device_create_file(&pcc->platform->dev, &dev_attr_cdpower); @@ -1105,6 +1105,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) out_platform: platform_device_unregister(pcc->platform); +out_sysfs: + sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); out_backlight: backlight_device_unregister(pcc->backlight); out_input: diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c index 03dfddeee0c0af..e9324bf16aea4b 100644 --- a/drivers/platform/x86/toshiba_haps.c +++ b/drivers/platform/x86/toshiba_haps.c @@ -183,7 +183,7 @@ static int toshiba_haps_add(struct acpi_device *acpi_dev) pr_info("Toshiba HDD Active Protection Sensor device\n"); - haps = kzalloc(sizeof(struct toshiba_haps_dev), GFP_KERNEL); + haps = devm_kzalloc(&acpi_dev->dev, sizeof(*haps), GFP_KERNEL); if (!haps) return -ENOMEM; diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c index 105fcaf13a34c7..cff738e4d54622 100644 --- a/drivers/pmdomain/imx/gpcv2.c +++ b/drivers/pmdomain/imx/gpcv2.c @@ -165,13 +165,11 @@ #define IMX8M_VPU_HSK_PWRDNREQN BIT(5) #define IMX8M_DISP_HSK_PWRDNREQN BIT(4) -#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29) -#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28)) +#define IMX8MM_GPU_HSK_PWRDNACKN GENMASK(29, 27) #define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26) #define IMX8MM_DISPMIX_HSK_PWRDNACKN BIT(25) #define IMX8MM_HSIO_HSK_PWRDNACKN (BIT(23) | BIT(24)) -#define IMX8MM_GPUMIX_HSK_PWRDNREQN BIT(11) -#define IMX8MM_GPU_HSK_PWRDNREQN (BIT(9) | BIT(10)) +#define IMX8MM_GPU_HSK_PWRDNREQN GENMASK(11, 9) #define IMX8MM_VPUMIX_HSK_PWRDNREQN BIT(8) #define IMX8MM_DISPMIX_HSK_PWRDNREQN BIT(7) #define IMX8MM_HSIO_HSK_PWRDNREQN (BIT(5) | BIT(6)) @@ -794,8 +792,6 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = { .bits = { .pxx = IMX8MM_GPUMIX_SW_Pxx_REQ, .map = IMX8MM_GPUMIX_A53_DOMAIN, - .hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN, - .hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN, }, .pgc = BIT(IMX8MM_PGC_GPUMIX), .keep_clocks = true, diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c index 74bf4936991d7c..19e992d2ee3b84 100644 --- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c @@ -340,7 +340,7 @@ static void imx8m_blk_ctrl_remove(struct platform_device *pdev) of_genpd_del_provider(pdev->dev.of_node); - for (i = 0; bc->onecell_data.num_domains; i++) { + for (i = 0; i < bc->onecell_data.num_domains; i++) { struct imx8m_blk_ctrl_domain *domain = &bc->domains[i]; pm_genpd_remove(&domain->genpd); diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c index 34576be606e31a..8fc79f9723f07e 100644 --- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c @@ -53,6 +53,7 @@ struct imx8mp_blk_ctrl_domain_data { const char * const *path_names; int num_paths; const char *gpc_name; + const unsigned int flags; }; #define DOMAIN_MAX_CLKS 3 @@ -65,6 +66,7 @@ struct imx8mp_blk_ctrl_domain { struct icc_bulk_data paths[DOMAIN_MAX_PATHS]; struct device *power_dev; struct imx8mp_blk_ctrl *bc; + struct notifier_block power_nb; int num_paths; int id; }; @@ -264,10 +266,12 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = { [IMX8MP_HSIOBLK_PD_USB_PHY1] = { .name = "hsioblk-usb-phy1", .gpc_name = "usb-phy1", + .flags = GENPD_FLAG_ACTIVE_WAKEUP, }, [IMX8MP_HSIOBLK_PD_USB_PHY2] = { .name = "hsioblk-usb-phy2", .gpc_name = "usb-phy2", + .flags = GENPD_FLAG_ACTIVE_WAKEUP, }, [IMX8MP_HSIOBLK_PD_PCIE] = { .name = "hsioblk-pcie", @@ -594,6 +598,20 @@ static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd) return 0; } +static int imx8mp_blk_ctrl_gpc_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct imx8mp_blk_ctrl_domain *domain = + container_of(nb, struct imx8mp_blk_ctrl_domain, power_nb); + + if (action == GENPD_NOTIFY_PRE_OFF) { + if (domain->genpd.status == GENPD_STATE_ON) + return NOTIFY_BAD; + } + + return NOTIFY_OK; +} + static struct lock_class_key blk_ctrl_genpd_lock_class; static int imx8mp_blk_ctrl_probe(struct platform_device *pdev) @@ -698,15 +716,25 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev) goto cleanup_pds; } + domain->power_nb.notifier_call = imx8mp_blk_ctrl_gpc_notifier; + ret = dev_pm_genpd_add_notifier(domain->power_dev, &domain->power_nb); + if (ret) { + dev_err_probe(dev, ret, "failed to add power notifier\n"); + dev_pm_domain_detach(domain->power_dev, true); + goto cleanup_pds; + } + domain->genpd.name = data->name; domain->genpd.power_on = imx8mp_blk_ctrl_power_on; domain->genpd.power_off = imx8mp_blk_ctrl_power_off; + domain->genpd.flags = data->flags; domain->bc = bc; domain->id = i; ret = pm_genpd_init(&domain->genpd, NULL, true); if (ret) { dev_err_probe(dev, ret, "failed to init power domain\n"); + dev_pm_genpd_remove_notifier(domain->power_dev); dev_pm_domain_detach(domain->power_dev, true); goto cleanup_pds; } @@ -755,6 +783,7 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev) cleanup_pds: for (i--; i >= 0; i--) { pm_genpd_remove(&bc->domains[i].genpd); + dev_pm_genpd_remove_notifier(bc->domains[i].power_dev); dev_pm_domain_detach(bc->domains[i].power_dev, true); } @@ -774,6 +803,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev) struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; pm_genpd_remove(&domain->genpd); + dev_pm_genpd_remove_notifier(domain->power_dev); dev_pm_domain_detach(domain->power_dev, true); } diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c index f8580ec0f73785..98ab4f9ea9bff4 100644 --- a/drivers/pmdomain/qcom/rpmpd.c +++ b/drivers/pmdomain/qcom/rpmpd.c @@ -1001,7 +1001,7 @@ static int rpmpd_aggregate_corner(struct rpmpd *pd) /* Clamp to the highest corner/level if sync_state isn't done yet */ if (!pd->state_synced) - this_active_corner = this_sleep_corner = pd->max_state - 1; + this_active_corner = this_sleep_corner = pd->max_state; else to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner); diff --git a/drivers/regulator/spacemit-p1.c b/drivers/regulator/spacemit-p1.c index 2bf9137e12b1d0..2b585ba01a93d0 100644 --- a/drivers/regulator/spacemit-p1.c +++ b/drivers/regulator/spacemit-p1.c @@ -87,13 +87,13 @@ static const struct linear_range p1_ldo_ranges[] = { } #define P1_BUCK_DESC(_n) \ - P1_REG_DESC(BUCK, buck, _n, "vin", 0x47, BUCK_MASK, 254, p1_buck_ranges) + P1_REG_DESC(BUCK, buck, _n, "vin", 0x47, BUCK_MASK, 255, p1_buck_ranges) #define P1_ALDO_DESC(_n) \ - P1_REG_DESC(ALDO, aldo, _n, "vin", 0x5b, LDO_MASK, 117, p1_ldo_ranges) + P1_REG_DESC(ALDO, aldo, _n, "vin", 0x5b, LDO_MASK, 128, p1_ldo_ranges) #define P1_DLDO_DESC(_n) \ - P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 117, p1_ldo_ranges) + P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 128, p1_ldo_ranges) static const struct regulator_desc p1_regulator_desc[] = { P1_BUCK_DESC(1), diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 4e899ec1477d46..b1cba986f0fbd3 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1025,6 +1025,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) &nonemb_cmd->dma, GFP_KERNEL); if (!nonemb_cmd->va) { + free_mcc_wrb(ctrl, tag); mutex_unlock(&ctrl->mbox_lock); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 16a44c0917e18b..e939bc88e15197 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4489,7 +4489,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, fail_elsrej: dma_pool_destroy(ha->purex_dma_pool); fail_flt: - dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + dma_free_coherent(&ha->pdev->dev, sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, ha->flt, ha->flt_dma); fail_flt_buffer: diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index fef840b5457407..c18a0c946f7627 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -396,7 +396,7 @@ EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host); */ bool qcom_smem_is_available(void) { - return !!__smem; + return !IS_ERR(__smem); } EXPORT_SYMBOL_GPL(qcom_smem_is_available); @@ -1247,7 +1247,8 @@ static void qcom_smem_remove(struct platform_device *pdev) { platform_device_unregister(__smem->socinfo); - __smem = NULL; + /* Set to -EPROBE_DEFER to signal unprobed state */ + __smem = ERR_PTR(-EPROBE_DEFER); } static const struct of_device_id qcom_smem_of_match[] = { diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 795a8482c2c700..48fb11fea55f2d 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -978,11 +978,14 @@ static int tegra_spi_setup(struct spi_device *spi) if (spi_get_csgpiod(spi, 0)) gpiod_set_value(spi_get_csgpiod(spi, 0), 0); + /* Update default register to include CS polarity and SPI mode */ val = tspi->def_command1_reg; if (spi->mode & SPI_CS_HIGH) val &= ~SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); else val |= SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); + val &= ~SPI_CONTROL_MODE_MASK; + val |= SPI_MODE_SEL(spi->mode & 0x3); tspi->def_command1_reg = val; tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); spin_unlock_irqrestore(&tspi->lock, flags); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index fe452d03c1ee43..709669610840bc 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1086,8 +1086,10 @@ static int tegra_slink_probe(struct platform_device *pdev) reset_control_deassert(tspi->rst); spi_irq = platform_get_irq(pdev, 0); - if (spi_irq < 0) - return spi_irq; + if (spi_irq < 0) { + ret = spi_irq; + goto exit_pm_put; + } tspi->irq = spi_irq; ret = request_threaded_irq(tspi->irq, tegra_slink_isr, tegra_slink_isr_thread, IRQF_ONESHOT, diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index cdc3cb7c01f9b7..f425d62e0c276a 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -839,6 +839,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran u32 command1, command2, speed = t->speed_hz; u8 bits_per_word = t->bits_per_word; u32 tx_tap = 0, rx_tap = 0; + unsigned long flags; int req_mode; if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) { @@ -846,10 +847,12 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran tqspi->cur_speed = speed; } + spin_lock_irqsave(&tqspi->lock, flags); tqspi->cur_pos = 0; tqspi->cur_rx_pos = 0; tqspi->cur_tx_pos = 0; tqspi->curr_xfer = t; + spin_unlock_irqrestore(&tqspi->lock, flags); if (is_first_of_msg) { tegra_qspi_mask_clear_irq(tqspi); @@ -1158,6 +1161,7 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, u32 address_value = 0; u32 cmd_config = 0, addr_config = 0; u8 cmd_value = 0, val = 0; + unsigned long flags; /* Enable Combined sequence mode */ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); @@ -1261,13 +1265,17 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, tegra_qspi_transfer_end(spi); spi_transfer_delay_exec(xfer); } + spin_lock_irqsave(&tqspi->lock, flags); tqspi->curr_xfer = NULL; + spin_unlock_irqrestore(&tqspi->lock, flags); transfer_phase++; } ret = 0; exit: + spin_lock_irqsave(&tqspi->lock, flags); tqspi->curr_xfer = NULL; + spin_unlock_irqrestore(&tqspi->lock, flags); msg->status = ret; return ret; @@ -1280,6 +1288,7 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, struct spi_transfer *transfer; bool is_first_msg = true; int ret = 0, val = 0; + unsigned long flags; msg->status = 0; msg->actual_length = 0; @@ -1360,7 +1369,9 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, msg->actual_length += xfer->len + dummy_bytes; complete_xfer: + spin_lock_irqsave(&tqspi->lock, flags); tqspi->curr_xfer = NULL; + spin_unlock_irqrestore(&tqspi->lock, flags); if (ret < 0) { tegra_qspi_transfer_end(spi); @@ -1440,10 +1451,16 @@ static int tegra_qspi_transfer_one_message(struct spi_controller *host, static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) { - struct spi_transfer *t = tqspi->curr_xfer; + struct spi_transfer *t; unsigned long flags; spin_lock_irqsave(&tqspi->lock, flags); + t = tqspi->curr_xfer; + + if (!t) { + spin_unlock_irqrestore(&tqspi->lock, flags); + return IRQ_HANDLED; + } if (tqspi->tx_status || tqspi->rx_status) { tegra_qspi_handle_error(tqspi); @@ -1474,7 +1491,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) { - struct spi_transfer *t = tqspi->curr_xfer; + struct spi_transfer *t; unsigned int total_fifo_words; unsigned long flags; long wait_status; @@ -1513,6 +1530,12 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) } spin_lock_irqsave(&tqspi->lock, flags); + t = tqspi->curr_xfer; + + if (!t) { + spin_unlock_irqrestore(&tqspi->lock, flags); + return IRQ_HANDLED; + } if (num_errors) { tegra_qspi_dma_unmap_xfer(tqspi, t); @@ -1552,15 +1575,33 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) { struct tegra_qspi *tqspi = context_data; + unsigned long flags; + u32 status; + + /* + * Read transfer status to check if interrupt was triggered by transfer + * completion + */ + status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); /* * Occasionally the IRQ thread takes a long time to wake up (usually * when the CPU that it's running on is excessively busy) and we have * already reached the timeout before and cleaned up the timed out * transfer. Avoid any processing in that case and bail out early. + * + * If no transfer is in progress, check if this was a real interrupt + * that the timeout handler already processed, or a spurious one. */ - if (!tqspi->curr_xfer) - return IRQ_NONE; + spin_lock_irqsave(&tqspi->lock, flags); + if (!tqspi->curr_xfer) { + spin_unlock_irqrestore(&tqspi->lock, flags); + /* Spurious interrupt - transfer not ready */ + if (!(status & QSPI_RDY)) + return IRQ_NONE; + /* Real interrupt, already handled by timeout path */ + return IRQ_HANDLED; + } tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); @@ -1571,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); tegra_qspi_mask_clear_irq(tqspi); + spin_unlock_irqrestore(&tqspi->lock, flags); + /* + * Lock is released here but handlers safely re-check curr_xfer under + * lock before dereferencing. + * DMA handler also needs to sleep in wait_for_completion_*(), which + * cannot be done while holding spinlock. + */ if (!tqspi->is_curr_dma_xfer) return handle_cpu_based_xfer(tqspi); diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 9f167ff8da7b07..09120a538a401c 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1960,12 +1960,12 @@ static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn, container_of(wwn, struct sbp_tport, tport_wwn); struct sbp_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 10, &tpgt)) return ERR_PTR(-EINVAL); if (tport->tpg) { diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c index 40543db621a136..6c454ae8a9c83a 100644 --- a/drivers/ufs/host/ufs-amd-versal2.c +++ b/drivers/ufs/host/ufs-amd-versal2.c @@ -367,7 +367,7 @@ static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, { int ret = 0; - if (status == PRE_CHANGE) { + if (status == POST_CHANGE) { ret = ufs_versal2_phy_init(hba); if (ret) dev_err(hba->dev, "Phy init failed (%d)\n", ret); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 05c6750702b609..fa467a40949d2e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -59,7 +59,6 @@ static struct ffs_data *__must_check ffs_data_new(const char *dev_name) __attribute__((malloc)); /* Opened counter handling. */ -static void ffs_data_opened(struct ffs_data *ffs); static void ffs_data_closed(struct ffs_data *ffs); /* Called with ffs->mutex held; take over ownership of data. */ @@ -636,23 +635,25 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf, return ret; } + +static void ffs_data_reset(struct ffs_data *ffs); + static int ffs_ep0_open(struct inode *inode, struct file *file) { struct ffs_data *ffs = inode->i_sb->s_fs_info; - int ret; - /* Acquire mutex */ - ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); - if (ret < 0) - return ret; - - ffs_data_opened(ffs); + spin_lock_irq(&ffs->eps_lock); if (ffs->state == FFS_CLOSING) { - ffs_data_closed(ffs); - mutex_unlock(&ffs->mutex); + spin_unlock_irq(&ffs->eps_lock); return -EBUSY; } - mutex_unlock(&ffs->mutex); + if (!ffs->opened++ && ffs->state == FFS_DEACTIVATED) { + ffs->state = FFS_CLOSING; + spin_unlock_irq(&ffs->eps_lock); + ffs_data_reset(ffs); + } else { + spin_unlock_irq(&ffs->eps_lock); + } file->private_data = ffs; return stream_open(inode, file); @@ -1202,15 +1203,10 @@ ffs_epfile_open(struct inode *inode, struct file *file) { struct ffs_data *ffs = inode->i_sb->s_fs_info; struct ffs_epfile *epfile; - int ret; - - /* Acquire mutex */ - ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); - if (ret < 0) - return ret; - if (!atomic_inc_not_zero(&ffs->opened)) { - mutex_unlock(&ffs->mutex); + spin_lock_irq(&ffs->eps_lock); + if (!ffs->opened) { + spin_unlock_irq(&ffs->eps_lock); return -ENODEV; } /* @@ -1220,11 +1216,11 @@ ffs_epfile_open(struct inode *inode, struct file *file) */ epfile = smp_load_acquire(&inode->i_private); if (unlikely(ffs->state != FFS_ACTIVE || !epfile)) { - mutex_unlock(&ffs->mutex); - ffs_data_closed(ffs); + spin_unlock_irq(&ffs->eps_lock); return -ENODEV; } - mutex_unlock(&ffs->mutex); + ffs->opened++; + spin_unlock_irq(&ffs->eps_lock); file->private_data = epfile; return stream_open(inode, file); @@ -2092,8 +2088,6 @@ static int ffs_fs_init_fs_context(struct fs_context *fc) return 0; } -static void ffs_data_reset(struct ffs_data *ffs); - static void ffs_fs_kill_sb(struct super_block *sb) { @@ -2150,15 +2144,6 @@ static void ffs_data_get(struct ffs_data *ffs) refcount_inc(&ffs->ref); } -static void ffs_data_opened(struct ffs_data *ffs) -{ - if (atomic_add_return(1, &ffs->opened) == 1 && - ffs->state == FFS_DEACTIVATED) { - ffs->state = FFS_CLOSING; - ffs_data_reset(ffs); - } -} - static void ffs_data_put(struct ffs_data *ffs) { if (refcount_dec_and_test(&ffs->ref)) { @@ -2176,28 +2161,29 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { - if (atomic_dec_and_test(&ffs->opened)) { - if (ffs->no_disconnect) { - struct ffs_epfile *epfiles; - unsigned long flags; - - ffs->state = FFS_DEACTIVATED; - spin_lock_irqsave(&ffs->eps_lock, flags); - epfiles = ffs->epfiles; - ffs->epfiles = NULL; - spin_unlock_irqrestore(&ffs->eps_lock, - flags); - - if (epfiles) - ffs_epfiles_destroy(ffs->sb, epfiles, - ffs->eps_count); - - if (ffs->setup_state == FFS_SETUP_PENDING) - __ffs_ep0_stall(ffs); - } else { - ffs->state = FFS_CLOSING; - ffs_data_reset(ffs); - } + spin_lock_irq(&ffs->eps_lock); + if (--ffs->opened) { // not the last opener? + spin_unlock_irq(&ffs->eps_lock); + return; + } + if (ffs->no_disconnect) { + struct ffs_epfile *epfiles; + + ffs->state = FFS_DEACTIVATED; + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irq(&ffs->eps_lock); + + if (epfiles) + ffs_epfiles_destroy(ffs->sb, epfiles, + ffs->eps_count); + + if (ffs->setup_state == FFS_SETUP_PENDING) + __ffs_ep0_stall(ffs); + } else { + ffs->state = FFS_CLOSING; + spin_unlock_irq(&ffs->eps_lock); + ffs_data_reset(ffs); } } @@ -2214,7 +2200,7 @@ static struct ffs_data *ffs_data_new(const char *dev_name) } refcount_set(&ffs->ref, 1); - atomic_set(&ffs->opened, 0); + ffs->opened = 0; ffs->state = FFS_READ_DESCRIPTORS; mutex_init(&ffs->mutex); spin_lock_init(&ffs->eps_lock); @@ -2266,6 +2252,7 @@ static void ffs_data_reset(struct ffs_data *ffs) { ffs_data_clear(ffs); + spin_lock_irq(&ffs->eps_lock); ffs->raw_descs_data = NULL; ffs->raw_descs = NULL; ffs->raw_strings = NULL; @@ -2289,6 +2276,7 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->ms_os_descs_ext_prop_count = 0; ffs->ms_os_descs_ext_prop_name_len = 0; ffs->ms_os_descs_ext_prop_data_len = 0; + spin_unlock_irq(&ffs->eps_lock); } @@ -3756,6 +3744,7 @@ static int ffs_func_set_alt(struct usb_function *f, { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; + unsigned long flags; int ret = 0, intf; if (alt > MAX_ALT_SETTINGS) @@ -3768,12 +3757,15 @@ static int ffs_func_set_alt(struct usb_function *f, if (ffs->func) ffs_func_eps_disable(ffs->func); + spin_lock_irqsave(&ffs->eps_lock, flags); if (ffs->state == FFS_DEACTIVATED) { ffs->state = FFS_CLOSING; + spin_unlock_irqrestore(&ffs->eps_lock, flags); INIT_WORK(&ffs->reset_work, ffs_reset_work); schedule_work(&ffs->reset_work); return -ENODEV; } + spin_unlock_irqrestore(&ffs->eps_lock, flags); if (ffs->state != FFS_ACTIVE) return -ENODEV; @@ -3791,16 +3783,20 @@ static void ffs_func_disable(struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; + unsigned long flags; if (ffs->func) ffs_func_eps_disable(ffs->func); + spin_lock_irqsave(&ffs->eps_lock, flags); if (ffs->state == FFS_DEACTIVATED) { ffs->state = FFS_CLOSING; + spin_unlock_irqrestore(&ffs->eps_lock, flags); INIT_WORK(&ffs->reset_work, ffs_reset_work); schedule_work(&ffs->reset_work); return; } + spin_unlock_irqrestore(&ffs->eps_lock, flags); if (ffs->state == FFS_ACTIVE) { ffs->func = NULL; diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h index 4b3365f23fd750..6a80182aadd70d 100644 --- a/drivers/usb/gadget/function/u_fs.h +++ b/drivers/usb/gadget/function/u_fs.h @@ -176,7 +176,7 @@ struct ffs_data { /* reference counter */ refcount_t ref; /* how many files are opened (EP0 and others) */ - atomic_t opened; + int opened; /* EP0 state */ enum ffs_state state; diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c index d4d0f7d08c53e2..4be4a85005cbcb 100644 --- a/drivers/vfio/pci/vfio_pci_dmabuf.c +++ b/drivers/vfio/pci/vfio_pci_dmabuf.c @@ -20,6 +20,16 @@ struct vfio_pci_dma_buf { u8 revoked : 1; }; +static int vfio_pci_dma_buf_pin(struct dma_buf_attachment *attachment) +{ + return -EOPNOTSUPP; +} + +static void vfio_pci_dma_buf_unpin(struct dma_buf_attachment *attachment) +{ + /* Do nothing */ +} + static int vfio_pci_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { @@ -76,6 +86,8 @@ static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf) } static const struct dma_buf_ops vfio_pci_dmabuf_ops = { + .pin = vfio_pci_dma_buf_pin, + .unpin = vfio_pci_dma_buf_unpin, .attach = vfio_pci_dma_buf_attach, .map_dma_buf = vfio_pci_dma_buf_map, .unmap_dma_buf = vfio_pci_dma_buf_unmap, diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c index f027876a2f1987..8712df8596a183 100644 --- a/drivers/virt/coco/tsm-core.c +++ b/drivers/virt/coco/tsm-core.c @@ -4,16 +4,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include -#include #include #include #include #include -#include static struct class *tsm_class; -static DECLARE_RWSEM(tsm_rwsem); static DEFINE_IDA(tsm_ida); static int match_id(struct device *dev, const void *data) @@ -108,32 +104,6 @@ void tsm_unregister(struct tsm_dev *tsm_dev) } EXPORT_SYMBOL_GPL(tsm_unregister); -/* must be invoked between tsm_register / tsm_unregister */ -int tsm_ide_stream_register(struct pci_ide *ide) -{ - struct pci_dev *pdev = ide->pdev; - struct pci_tsm *tsm = pdev->tsm; - struct tsm_dev *tsm_dev = tsm->tsm_dev; - int rc; - - rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name); - if (rc) - return rc; - - ide->tsm_dev = tsm_dev; - return 0; -} -EXPORT_SYMBOL_GPL(tsm_ide_stream_register); - -void tsm_ide_stream_unregister(struct pci_ide *ide) -{ - struct tsm_dev *tsm_dev = ide->tsm_dev; - - ide->tsm_dev = NULL; - sysfs_remove_link(&tsm_dev->dev.kobj, ide->name); -} -EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister); - static void tsm_release(struct device *dev) { struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e0d34e4e9076e3..af7f72abbb76aa 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -242,6 +242,7 @@ const struct file_operations v9fs_dir_operations = { .iterate_shared = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, + .setlease = simple_nosetlease, }; const struct file_operations v9fs_dir_operations_dotl = { @@ -251,4 +252,5 @@ const struct file_operations v9fs_dir_operations_dotl = { .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, + .setlease = simple_nosetlease, }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 89022e9f393b0a..2833b44f4b4f22 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -498,28 +498,6 @@ static int btree_migrate_folio(struct address_space *mapping, #define btree_migrate_folio NULL #endif -static int btree_writepages(struct address_space *mapping, - struct writeback_control *wbc) -{ - int ret; - - if (wbc->sync_mode == WB_SYNC_NONE) { - struct btrfs_fs_info *fs_info; - - if (wbc->for_kupdate) - return 0; - - fs_info = inode_to_fs_info(mapping->host); - /* this is a bit racy, but that's ok */ - ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, - BTRFS_DIRTY_METADATA_THRESH, - fs_info->dirty_metadata_batch); - if (ret < 0) - return 0; - } - return btree_write_cache_pages(mapping, wbc); -} - static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags) { if (folio_test_writeback(folio) || folio_test_dirty(folio)) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a4b74023618d30..f6cca3c97166f7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2286,8 +2286,7 @@ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, } } -int btree_write_cache_pages(struct address_space *mapping, - struct writeback_control *wbc) +int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct btrfs_eb_write_context ctx = { .wbc = wbc }; struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 02ebb2f238afc1..73571d5d3d5ad9 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -237,8 +237,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f u64 start, u64 end, struct writeback_control *wbc, bool pages_dirty); int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); -int btree_write_cache_pages(struct address_space *mapping, - struct writeback_control *wbc); +int btree_writepages(struct address_space *mapping, struct writeback_control *wbc); void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); void btrfs_readahead(struct readahead_control *rac); int set_folio_extent_mapped(struct folio *folio); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index f38d8305e46d7a..baadaaa189c05d 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -150,6 +150,7 @@ static void scrub_rbio_work_locked(struct work_struct *work); static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) { bitmap_free(rbio->error_bitmap); + bitmap_free(rbio->stripe_uptodate_bitmap); kfree(rbio->stripe_pages); kfree(rbio->bio_paddrs); kfree(rbio->stripe_paddrs); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 6caba8be7c845c..10ed48d4a84665 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -139,6 +139,7 @@ static int copy_data_into_buffer(struct address_space *mapping, data_in = kmap_local_folio(folio, offset); memcpy(workspace->buf + cur - filepos, data_in, copy_length); kunmap_local(data_in); + folio_put(folio); cur += copy_length; } return 0; diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c index 0ea4db650f8585..9a115282f67da9 100644 --- a/fs/ceph/crypto.c +++ b/fs/ceph/crypto.c @@ -166,12 +166,13 @@ static struct inode *parse_longname(const struct inode *parent, struct ceph_vino vino = { .snap = CEPH_NOSNAP }; char *name_end, *inode_number; int ret = -EIO; - /* NUL-terminate */ - char *str __free(kfree) = kmemdup_nul(name, *name_len, GFP_KERNEL); + /* Snapshot name must start with an underscore */ + if (*name_len <= 0 || name[0] != '_') + return ERR_PTR(-EIO); + /* Skip initial '_' and NUL-terminate */ + char *str __free(kfree) = kmemdup_nul(name + 1, *name_len - 1, GFP_KERNEL); if (!str) return ERR_PTR(-ENOMEM); - /* Skip initial '_' */ - str++; name_end = strrchr(str, '_'); if (!name_end) { doutc(cl, "failed to parse long snapshot name: %s\n", str); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 86d7aa594ea993..804588524cd570 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -2214,6 +2214,7 @@ const struct file_operations ceph_dir_fops = { .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, + .setlease = simple_nosetlease, }; const struct file_operations ceph_snapdir_fops = { @@ -2221,6 +2222,7 @@ const struct file_operations ceph_snapdir_fops = { .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, + .setlease = simple_nosetlease, }; const struct inode_operations ceph_dir_iops = { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 7e4eab824daef8..c45bd19d4b1ca7 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -5671,7 +5671,7 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc, u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid); u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid); struct ceph_client *cl = mdsc->fsc->client; - const char *fs_name = mdsc->fsc->mount_options->mds_namespace; + const char *fs_name = mdsc->mdsmap->m_fs_name; const char *spath = mdsc->fsc->mount_options->server_path; bool gid_matched = false; u32 gid, tlen, len; @@ -5679,7 +5679,8 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc, doutc(cl, "fsname check fs_name=%s match.fs_name=%s\n", fs_name, auth->match.fs_name ? auth->match.fs_name : ""); - if (auth->match.fs_name && strcmp(auth->match.fs_name, fs_name)) { + + if (!ceph_namespace_match(auth->match.fs_name, fs_name)) { /* fsname mismatch, try next one */ return 0; } diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 2c7b151a7c95cc..b228e5ecfb926d 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -353,22 +353,33 @@ struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p, __decode_and_drop_type(p, end, u8, bad_ext); } if (mdsmap_ev >= 8) { - u32 fsname_len; + size_t fsname_len; + /* enabled */ ceph_decode_8_safe(p, end, m->m_enabled, bad_ext); + /* fs_name */ - ceph_decode_32_safe(p, end, fsname_len, bad_ext); + m->m_fs_name = ceph_extract_encoded_string(p, end, + &fsname_len, + GFP_NOFS); + if (IS_ERR(m->m_fs_name)) { + m->m_fs_name = NULL; + goto nomem; + } /* validate fsname against mds_namespace */ - if (!namespace_equals(mdsc->fsc->mount_options, *p, + if (!namespace_equals(mdsc->fsc->mount_options, m->m_fs_name, fsname_len)) { - pr_warn_client(cl, "fsname %*pE doesn't match mds_namespace %s\n", - (int)fsname_len, (char *)*p, + pr_warn_client(cl, "fsname %s doesn't match mds_namespace %s\n", + m->m_fs_name, mdsc->fsc->mount_options->mds_namespace); goto bad; } - /* skip fsname after validation */ - ceph_decode_skip_n(p, end, fsname_len, bad); + } else { + m->m_enabled = false; + m->m_fs_name = kstrdup(CEPH_OLD_FS_NAME, GFP_NOFS); + if (!m->m_fs_name) + goto nomem; } /* damaged */ if (mdsmap_ev >= 9) { @@ -430,6 +441,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m) kfree(m->m_info); } kfree(m->m_data_pg_pools); + kfree(m->m_fs_name); kfree(m); } diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h index 1f2171dd01bfa3..d48d07c3516d44 100644 --- a/fs/ceph/mdsmap.h +++ b/fs/ceph/mdsmap.h @@ -45,6 +45,7 @@ struct ceph_mdsmap { bool m_enabled; bool m_damaged; int m_num_laggy; + char *m_fs_name; }; static inline struct ceph_entity_addr * diff --git a/fs/ceph/super.h b/fs/ceph/super.h index a1f781c46b41d3..29a980e22dc26c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -104,14 +104,26 @@ struct ceph_mount_options { struct fscrypt_dummy_policy dummy_enc_policy; }; +#define CEPH_NAMESPACE_WILDCARD "*" + +static inline bool ceph_namespace_match(const char *pattern, + const char *target) +{ + if (!pattern || !pattern[0] || + !strcmp(pattern, CEPH_NAMESPACE_WILDCARD)) + return true; + + return !strcmp(pattern, target); +} + /* * Check if the mds namespace in ceph_mount_options matches * the passed in namespace string. First time match (when * ->mds_namespace is NULL) is treated specially, since * ->mds_namespace needs to be initialized by the caller. */ -static inline int namespace_equals(struct ceph_mount_options *fsopt, - const char *namespace, size_t len) +static inline bool namespace_equals(struct ceph_mount_options *fsopt, + const char *namespace, size_t len) { return !(fsopt->mds_namespace && (strlen(fsopt->mds_namespace) != len || diff --git a/fs/dcache.c b/fs/dcache.c index dc2fff4811d153..66dd1bb830d1a8 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1104,6 +1104,16 @@ struct dentry *d_find_alias_rcu(struct inode *inode) return de; } +/** + * d_dispose_if_unused - move unreferenced dentries to shrink list + * @dentry: dentry in question + * @dispose: head of shrink list + * + * If dentry has no external references, move it to shrink list. + * + * NOTE!!! The caller is responsible for preventing eviction of the dentry by + * holding dentry->d_inode->i_lock or equivalent. + */ void d_dispose_if_unused(struct dentry *dentry, struct list_head *dispose) { spin_lock(&dentry->d_lock); diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c index 6edc10958ecf55..70e13db260dba0 100644 --- a/fs/efivarfs/vars.c +++ b/fs/efivarfs/vars.c @@ -552,7 +552,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, err = __efivar_entry_get(entry, attributes, size, data); efivar_unlock(); - return 0; + return err; } /** diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index baa2f214114661..5444fc706ac7d6 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2492,7 +2492,9 @@ static void wakeup_dirtytime_writeback(struct work_struct *w) wb_wakeup(wb); } rcu_read_unlock(); - schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); + if (dirtytime_expire_interval) + schedule_delayed_work(&dirtytime_work, + round_jiffies_relative(dirtytime_expire_interval * HZ)); } static int dirtytime_interval_handler(const struct ctl_table *table, int write, @@ -2501,8 +2503,12 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write, int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - mod_delayed_work(system_percpu_wq, &dirtytime_work, 0); + if (ret == 0 && write) { + if (dirtytime_expire_interval) + mod_delayed_work(system_percpu_wq, &dirtytime_work, 0); + else + cancel_delayed_work_sync(&dirtytime_work); + } return ret; } @@ -2519,7 +2525,9 @@ static const struct ctl_table vm_fs_writeback_table[] = { static int __init start_dirtytime_writeback(void) { - schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); + if (dirtytime_expire_interval) + schedule_delayed_work(&dirtytime_work, + round_jiffies_relative(dirtytime_expire_interval * HZ)); register_sysctl_init("vm", vm_fs_writeback_table); return 0; } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4b6b3d2758ff22..3927cb069236e9 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -32,9 +32,9 @@ struct dentry_bucket { spinlock_t lock; }; -#define HASH_BITS 5 -#define HASH_SIZE (1 << HASH_BITS) -static struct dentry_bucket dentry_hash[HASH_SIZE]; +#define FUSE_HASH_BITS 5 +#define FUSE_HASH_SIZE (1 << FUSE_HASH_BITS) +static struct dentry_bucket dentry_hash[FUSE_HASH_SIZE]; struct delayed_work dentry_tree_work; /* Minimum invalidation work queue frequency */ @@ -83,7 +83,7 @@ MODULE_PARM_DESC(inval_wq, static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry) { - int i = hash_ptr(dentry, HASH_BITS); + int i = hash_ptr(dentry, FUSE_HASH_BITS); return &dentry_hash[i]; } @@ -164,25 +164,31 @@ static void fuse_dentry_tree_work(struct work_struct *work) struct rb_node *node; int i; - for (i = 0; i < HASH_SIZE; i++) { + for (i = 0; i < FUSE_HASH_SIZE; i++) { spin_lock(&dentry_hash[i].lock); node = rb_first(&dentry_hash[i].tree); while (node) { fd = rb_entry(node, struct fuse_dentry, node); - if (time_after64(get_jiffies_64(), fd->time)) { - rb_erase(&fd->node, &dentry_hash[i].tree); - RB_CLEAR_NODE(&fd->node); + if (!time_before64(fd->time, get_jiffies_64())) + break; + + rb_erase(&fd->node, &dentry_hash[i].tree); + RB_CLEAR_NODE(&fd->node); + spin_lock(&fd->dentry->d_lock); + /* If dentry is still referenced, let next dput release it */ + fd->dentry->d_flags |= DCACHE_OP_DELETE; + spin_unlock(&fd->dentry->d_lock); + d_dispose_if_unused(fd->dentry, &dispose); + if (need_resched()) { spin_unlock(&dentry_hash[i].lock); - d_dispose_if_unused(fd->dentry, &dispose); cond_resched(); spin_lock(&dentry_hash[i].lock); - } else - break; + } node = rb_first(&dentry_hash[i].tree); } spin_unlock(&dentry_hash[i].lock); - shrink_dentry_list(&dispose); } + shrink_dentry_list(&dispose); if (inval_wq) schedule_delayed_work(&dentry_tree_work, @@ -213,7 +219,7 @@ void fuse_dentry_tree_init(void) { int i; - for (i = 0; i < HASH_SIZE; i++) { + for (i = 0; i < FUSE_HASH_SIZE; i++) { spin_lock_init(&dentry_hash[i].lock); dentry_hash[i].tree = RB_ROOT; } @@ -227,7 +233,7 @@ void fuse_dentry_tree_cleanup(void) inval_wq = 0; cancel_delayed_work_sync(&dentry_tree_work); - for (i = 0; i < HASH_SIZE; i++) + for (i = 0; i < FUSE_HASH_SIZE; i++) WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree)); } @@ -479,18 +485,12 @@ static int fuse_dentry_init(struct dentry *dentry) return 0; } -static void fuse_dentry_prune(struct dentry *dentry) +static void fuse_dentry_release(struct dentry *dentry) { struct fuse_dentry *fd = dentry->d_fsdata; if (!RB_EMPTY_NODE(&fd->node)) fuse_dentry_tree_del_node(dentry); -} - -static void fuse_dentry_release(struct dentry *dentry) -{ - struct fuse_dentry *fd = dentry->d_fsdata; - kfree_rcu(fd, rcu); } @@ -527,7 +527,6 @@ const struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, .d_delete = fuse_dentry_delete, .d_init = fuse_dentry_init, - .d_prune = fuse_dentry_prune, .d_release = fuse_dentry_release, .d_automount = fuse_dentry_automount, }; @@ -1584,8 +1583,8 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, { int err = -ENOTDIR; struct inode *parent; - struct dentry *dir; - struct dentry *entry; + struct dentry *dir = NULL; + struct dentry *entry = NULL; parent = fuse_ilookup(fc, parent_nodeid, NULL); if (!parent) @@ -1598,11 +1597,19 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, dir = d_find_alias(parent); if (!dir) goto put_parent; - - entry = start_removing_noperm(dir, name); - dput(dir); - if (IS_ERR(entry)) - goto put_parent; + while (!entry) { + struct dentry *child = try_lookup_noperm(name, dir); + if (!child || IS_ERR(child)) + goto put_parent; + entry = start_removing_dentry(dir, child); + dput(child); + if (IS_ERR(entry)) + goto put_parent; + if (!d_same_name(entry, dir, name)) { + end_removing(entry); + entry = NULL; + } + } fuse_dir_changed(parent); if (!(flags & FUSE_EXPIRE_ONLY)) @@ -1640,6 +1647,7 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, end_removing(entry); put_parent: + dput(dir); iput(parent); return err; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index b2d23c98c99655..86376f0dbf3a55 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1608,6 +1608,7 @@ const struct file_operations gfs2_dir_fops = { .lock = gfs2_lock, .flock = gfs2_flock, .llseek = default_llseek, + .setlease = simple_nosetlease, .fop_flags = FOP_ASYNC_LOCK, }; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index fd9a2cf9562024..6beb876658c09b 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -851,6 +851,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter, } folio_get(folio); + folio_wait_stable(folio); return folio; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8f9ea79b788230..e3654f4a1b9aab 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -66,6 +66,7 @@ const struct file_operations nfs_dir_operations = { .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, + .setlease = simple_nosetlease, }; const struct address_space_operations nfs_dir_aops = { diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 7317f26892c578..7f43e890d3564a 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -431,8 +431,6 @@ void nfs42_ssc_unregister_ops(void) static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) { - if (!S_ISREG(file_inode(file)->i_mode)) - return -EINVAL; return nfs4_proc_setlease(file, arg, lease, priv); } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 81dfc26bfae8af..26188a4ad1abd9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -656,6 +656,7 @@ static int do_procmap_query(struct mm_struct *mm, void __user *uarg) struct proc_maps_locking_ctx lock_ctx = { .mm = mm }; struct procmap_query karg; struct vm_area_struct *vma; + struct file *vm_file = NULL; const char *name = NULL; char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; __u64 usize; @@ -727,21 +728,6 @@ static int do_procmap_query(struct mm_struct *mm, void __user *uarg) karg.inode = 0; } - if (karg.build_id_size) { - __u32 build_id_sz; - - err = build_id_parse(vma, build_id_buf, &build_id_sz); - if (err) { - karg.build_id_size = 0; - } else { - if (karg.build_id_size < build_id_sz) { - err = -ENAMETOOLONG; - goto out; - } - karg.build_id_size = build_id_sz; - } - } - if (karg.vma_name_size) { size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); const struct path *path; @@ -775,10 +761,34 @@ static int do_procmap_query(struct mm_struct *mm, void __user *uarg) karg.vma_name_size = name_sz; } + if (karg.build_id_size && vma->vm_file) + vm_file = get_file(vma->vm_file); + /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ query_vma_teardown(&lock_ctx); mmput(mm); + if (karg.build_id_size) { + __u32 build_id_sz; + + if (vm_file) + err = build_id_parse_file(vm_file, build_id_buf, &build_id_sz); + else + err = -ENOENT; + if (err) { + karg.build_id_size = 0; + } else { + if (karg.build_id_size < build_id_sz) { + err = -ENAMETOOLONG; + goto out; + } + karg.build_id_size = build_id_sz; + } + } + + if (vm_file) + fput(vm_file); + if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), name, karg.vma_name_size)) { kfree(name_buf); @@ -798,6 +808,8 @@ static int do_procmap_query(struct mm_struct *mm, void __user *uarg) out: query_vma_teardown(&lock_ctx); mmput(mm); + if (vm_file) + fput(vm_file); kfree(name_buf); return err; } diff --git a/fs/readdir.c b/fs/readdir.c index 7764b863897888..73707b6816e9aa 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -316,6 +316,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, struct getdents_callback buf = { .ctx.actor = filldir, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent }; int error; @@ -400,6 +401,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, struct getdents_callback64 buf = { .ctx.actor = filldir64, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent }; int error; @@ -569,6 +571,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent, }; int error; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 360b008541154e..ac55193bf39889 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -458,7 +458,10 @@ static int romfs_fill_super(struct super_block *sb, struct fs_context *fc) #ifdef CONFIG_BLOCK if (!sb->s_mtd) { - sb_set_blocksize(sb, ROMBSIZE); + if (!sb_set_blocksize(sb, ROMBSIZE)) { + errorf(fc, "romfs: unable to set blocksize\n"); + return -EINVAL; + } } else { sb->s_blocksize = ROMBSIZE; sb->s_blocksize_bits = blksize_bits(ROMBSIZE); diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index d9664634144d3e..a3dc7cb1ab541d 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1149,9 +1149,6 @@ cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - /* Check if file is oplocked if this is request for new lease */ if (arg == F_UNLCK || ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || @@ -1712,6 +1709,7 @@ const struct file_operations cifs_dir_ops = { .remap_file_range = cifs_remap_file_range, .llseek = generic_file_llseek, .fsync = cifs_dir_fsync, + .setlease = simple_nosetlease, }; static void diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c index 28d1cee9062505..98287132626e34 100644 --- a/fs/smb/client/cifstransport.c +++ b/fs/smb/client/cifstransport.c @@ -251,13 +251,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_send_recv(xid, ses, ses->server, &rqst, &resp_buf_type, flags, &resp_iov); if (rc < 0) - return rc; + goto out; if (out_buf) { *pbytes_returned = resp_iov.iov_len; if (resp_iov.iov_len) memcpy(out_buf, resp_iov.iov_base, resp_iov.iov_len); } + +out: free_rsp_buf(resp_buf_type, resp_iov.iov_base); return rc; } diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index 7f11ae6bb785dd..2dd08388ea8733 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -178,6 +178,7 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); if (rc == -EACCES && retry_without_read_attributes) { + free_rsp_buf(err_buftype, err_iov.iov_base); oparms->desired_access &= ~FILE_READ_ATTRIBUTES; rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index 42bedc4ec7af77..230d7589d15cc9 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -186,6 +186,7 @@ const struct file_operations vboxsf_dir_fops = { .release = vboxsf_dir_release, .read = generic_read_dir, .llseek = generic_file_llseek, + .setlease = simple_nosetlease, }; /* diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 831c1b4b626c87..7acc06b22fb7d2 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -7,7 +7,10 @@ #define BUILD_ID_SIZE_MAX 20 struct vm_area_struct; +struct file; + int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size); int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index c7f2c63b3bc3fb..08e5dbe15ca444 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -31,6 +31,12 @@ #define CEPH_INO_CEPH 2 /* hidden .ceph dir */ #define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */ +/* + * name for "old" CephFS file systems, + * see ceph.git e2b151d009640114b2565c901d6f41f6cd5ec652 + */ +#define CEPH_OLD_FS_NAME "cephfs" + /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ #define CEPH_MAX_MON 31 diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf632652..2e6931735880bd 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e extern void cma_reserve_pages_on_error(struct cma *cma); +#ifdef CONFIG_DMA_CMA +extern bool cma_skip_dt_default_reserved_mem(void); +#else +static inline bool cma_skip_dt_default_reserved_mem(void) +{ + return false; +} +#endif + #ifdef CONFIG_CMA struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); diff --git a/include/linux/fs.h b/include/linux/fs.h index f5c9cf28c4dcf9..a01621fa636a60 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1855,6 +1855,8 @@ struct dir_context { * INT_MAX unlimited */ int count; + /* @actor supports these flags in d_type high bits */ + unsigned int dt_flags_mask; }; /* If OR-ed with d_type, pending signals are not checked */ @@ -3524,7 +3526,9 @@ static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) { - return ctx->actor(ctx, name, namelen, ctx->pos, ino, type); + unsigned int dt_mask = S_DT_MASK | ctx->dt_flags_mask; + + return ctx->actor(ctx, name, namelen, ctx->pos, ino, type & dt_mask); } static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9c6ac4b62eb995..338a1921a50ad4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, __kasan_unpoison_vmap_areas(vms, nr_vms, flags); } +void __kasan_vrealloc(const void *start, unsigned long old_size, + unsigned long new_size); + +static __always_inline void kasan_vrealloc(const void *start, + unsigned long old_size, + unsigned long new_size) +{ + if (kasan_enabled()) + __kasan_vrealloc(start, old_size, new_size); +} + #else /* CONFIG_KASAN_VMALLOC */ static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, kasan_vmalloc_flags_t flags) { } +static inline void kasan_vrealloc(const void *start, unsigned long old_size, + unsigned long new_size) { } + #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 772919e8096aaf..ba9e3988c07c31 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -175,6 +175,9 @@ int klp_enable_patch(struct klp_patch *); int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); +void *klp_find_section_by_name(const struct module *mod, const char *name, + size_t *sec_size); + void klp_copy_process(struct task_struct *child); void klp_update_patch_state(struct task_struct *task); diff --git a/include/linux/memfd.h b/include/linux/memfd.h index cc74de3dbcfe93..c328a7b356d0e5 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -17,6 +17,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); * to by vm_flags_ptr. */ int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr); +struct file *memfd_alloc_file(const char *name, unsigned int flags); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -31,6 +32,11 @@ static inline int memfd_check_seals_mmap(struct file *file, { return 0; } + +static inline struct file *memfd_alloc_file(const char *name, unsigned int flags) +{ + return ERR_PTR(-EINVAL); +} #endif #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 713ec0435b48ad..e3c2ccf872a8af 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -224,7 +224,8 @@ static inline bool is_fsdax_page(const struct page *page) } #ifdef CONFIG_ZONE_DEVICE -void zone_device_page_init(struct page *page, unsigned int order); +void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap, + unsigned int order); void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); @@ -234,9 +235,11 @@ bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long memremap_compat_align(void); -static inline void zone_device_folio_init(struct folio *folio, unsigned int order) +static inline void zone_device_folio_init(struct folio *folio, + struct dev_pagemap *pgmap, + unsigned int order) { - zone_device_page_init(&folio->page, order); + zone_device_page_init(&folio->page, pgmap, order); if (order) folio_set_large_rmappable(folio); } diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h index 37a1ad9501b05d..ae07d9f699c001 100644 --- a/include/linux/pci-ide.h +++ b/include/linux/pci-ide.h @@ -26,7 +26,7 @@ enum pci_ide_partner_select { /** * struct pci_ide_partner - Per port pair Selective IDE Stream settings * @rid_start: Partner Port Requester ID range start - * @rid_end: Partner Port Requester ID range end + * @rid_end: Partner Port Requester ID range end (inclusive) * @stream_index: Selective IDE Stream Register Block selection * @mem_assoc: PCI bus memory address association for targeting peer partner * @pref_assoc: PCI bus prefetchable memory address association for @@ -82,7 +82,6 @@ struct pci_ide_regs { * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool * @stream_id: unique Stream ID (within Partner Port pairing) * @name: name of the established Selective IDE Stream in sysfs - * @tsm_dev: For TSM established IDE, the TSM device context * * Negative @stream_id values indicate "uninitialized" on the * expectation that with TSM established IDE the TSM owns the stream_id @@ -94,7 +93,6 @@ struct pci_ide { u8 host_bridge_stream; int stream_id; const char *name; - struct tsm_dev *tsm_dev; }; /* diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index b9c8520b4bd3d6..509c5592aab04f 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -59,7 +59,8 @@ struct davinci_mcasp_pdata { bool i2s_accurate_sck; /* McASP specific fields */ - int tdm_slots; + int tdm_slots_tx; + int tdm_slots_rx; u8 op_mode; u8 dismod; u8 num_serializer; diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index 332dc14b81c977..ef0811379c5401 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -121,8 +121,7 @@ struct mm_cid_pcpu { /** * struct mm_mm_cid - Storage for per MM CID data * @pcpu: Per CPU storage for CIDs associated to a CPU - * @percpu: Set, when CIDs are in per CPU mode - * @transit: Set to MM_CID_TRANSIT during a mode change transition phase + * @mode: Indicates per CPU and transition mode * @max_cids: The exclusive maximum CID value for allocation and convergence * @irq_work: irq_work to handle the affinity mode change case * @work: Regular work to handle the affinity mode change case @@ -139,8 +138,7 @@ struct mm_cid_pcpu { struct mm_mm_cid { /* Hotpath read mostly members */ struct mm_cid_pcpu __percpu *pcpu; - unsigned int percpu; - unsigned int transit; + unsigned int mode; unsigned int max_cids; /* Rarely used. Moves @lock and @mutex into the second cacheline */ diff --git a/include/linux/sched.h b/include/linux/sched.h index da0133524d08c2..5f00b5ed0f3b7d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1776,6 +1776,11 @@ static __always_inline bool is_percpu_thread(void) (current->nr_cpus_allowed == 1); } +static __always_inline bool is_user_task(struct task_struct *task) +{ + return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER)); +} + /* Per-process atomic flags. */ #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 86737076101d4a..112e48970338fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4301,6 +4301,18 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) skb_headlen(skb), buffer); } +/* Variant of skb_header_pointer() where @offset is user-controlled + * and potentially negative. + */ +static inline void * __must_check +skb_header_pointer_careful(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + if (unlikely(offset < 0 && -offset > skb_headroom(skb))) + return NULL; + return skb_header_pointer(skb, offset, len, buffer); +} + static inline void * __must_check skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) { diff --git a/include/linux/tsm.h b/include/linux/tsm.h index a3b7ab668effff..22e05b2aac69cf 100644 --- a/include/linux/tsm.h +++ b/include/linux/tsm.h @@ -123,7 +123,4 @@ int tsm_report_unregister(const struct tsm_report_ops *ops); struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops); void tsm_unregister(struct tsm_dev *tsm_dev); struct tsm_dev *find_tsm_dev(int id); -struct pci_ide; -int tsm_ide_stream_register(struct pci_ide *ide); -void tsm_ide_stream_unregister(struct pci_ide *ide); #endif /* __TSM_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 49edc7da05867f..46207840355709 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -521,13 +521,14 @@ static inline int bond_is_ip6_target_ok(struct in6_addr *addr) static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, struct slave *slave) { + unsigned long tmp, ret = READ_ONCE(slave->target_last_arp_rx[0]); int i = 1; - unsigned long ret = slave->target_last_arp_rx[0]; - - for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) - if (time_before(slave->target_last_arp_rx[i], ret)) - ret = slave->target_last_arp_rx[i]; + for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) { + tmp = READ_ONCE(slave->target_last_arp_rx[i]); + if (time_before(tmp, ret)) + ret = tmp; + } return ret; } @@ -537,7 +538,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond, if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) return slave_oldest_target_arp_rx(bond, slave); - return slave->last_rx; + return READ_ONCE(slave->last_rx); } static inline void slave_update_last_tx(struct slave *slave) diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 127e6c7d910dcb..c54df042db6be2 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -219,6 +219,8 @@ static inline void nfc_free_device(struct nfc_dev *dev) int nfc_register_device(struct nfc_dev *dev); +void nfc_unregister_rfkill(struct nfc_dev *dev); +void nfc_remove_device(struct nfc_dev *dev); void nfc_unregister_device(struct nfc_dev *dev); /** diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 5928af539c4681..ae1e1489b6710a 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -9,6 +9,7 @@ #ifndef __CS35L56_H #define __CS35L56_H +#include #include #include #include @@ -26,6 +27,9 @@ struct snd_ctl_elem_value; #define CS35L56_GLOBAL_ENABLES 0x0002014 #define CS35L56_BLOCK_ENABLES 0x0002018 #define CS35L56_BLOCK_ENABLES2 0x000201C +#define CS35L56_SYNC_GPIO1_CFG 0x0002410 +#define CS35L56_ASP2_DIO_GPIO13_CFG 0x0002440 +#define CS35L56_UPDATE_REGS 0x0002A0C #define CS35L56_REFCLK_INPUT 0x0002C04 #define CS35L56_GLOBAL_SAMPLE_RATE 0x0002C0C #define CS35L56_OTP_MEM_53 0x00300D4 @@ -65,6 +69,9 @@ struct snd_ctl_elem_value; #define CS35L56_IRQ1_MASK_8 0x000E0AC #define CS35L56_IRQ1_MASK_18 0x000E0D4 #define CS35L56_IRQ1_MASK_20 0x000E0DC +#define CS35L56_GPIO_STATUS1 0x000F000 +#define CS35L56_GPIO1_CTRL1 0x000F008 +#define CS35L56_GPIO13_CTRL1 0x000F038 #define CS35L56_MIXER_NGATE_CH1_CFG 0x0010004 #define CS35L56_MIXER_NGATE_CH2_CFG 0x0010008 #define CS35L56_DSP_MBOX_1_RAW 0x0011000 @@ -130,6 +137,17 @@ struct snd_ctl_elem_value; #define CS35L56_MTLREVID_MASK 0x0000000F #define CS35L56_REVID_B0 0x000000B0 +/* PAD_INTF */ +#define CS35L56_PAD_GPIO_PULL_MASK GENMASK(3, 2) +#define CS35L56_PAD_GPIO_IE BIT(0) + +#define CS35L56_PAD_PULL_NONE 0 +#define CS35L56_PAD_PULL_UP 1 +#define CS35L56_PAD_PULL_DOWN 2 + +/* UPDATE_REGS */ +#define CS35L56_UPDT_GPIO_PRES BIT(6) + /* ASP_ENABLES1 */ #define CS35L56_ASP_RX2_EN_SHIFT 17 #define CS35L56_ASP_RX1_EN_SHIFT 16 @@ -185,6 +203,12 @@ struct snd_ctl_elem_value; /* MIXER_NGATE_CHn_CFG */ #define CS35L56_AUX_NGATE_CHn_EN 0x00000001 +/* GPIOn_CTRL1 */ +#define CS35L56_GPIO_DIR_MASK BIT(31) +#define CS35L56_GPIO_FN_MASK GENMASK(2, 0) + +#define CS35L56_GPIO_FN_GPIO 0x00000001 + /* Mixer input sources */ #define CS35L56_INPUT_SRC_NONE 0x00 #define CS35L56_INPUT_SRC_ASP1RX1 0x08 @@ -279,6 +303,7 @@ struct snd_ctl_elem_value; #define CS35L56_HALO_STATE_TIMEOUT_US 250000 #define CS35L56_RESET_PULSE_MIN_US 1100 #define CS35L56_WAKE_HOLD_TIME_US 1000 +#define CS35L56_PAD_PULL_SETTLE_US 10 #define CS35L56_CALIBRATION_POLL_US (100 * USEC_PER_MSEC) #define CS35L56_CALIBRATION_TIMEOUT_US (5 * USEC_PER_SEC) @@ -289,6 +314,9 @@ struct snd_ctl_elem_value; #define CS35L56_NUM_BULK_SUPPLIES 3 #define CS35L56_NUM_DSP_REGIONS 5 +#define CS35L56_MAX_GPIO 13 +#define CS35L63_MAX_GPIO 9 + /* Additional margin for SYSTEM_RESET to control port ready on SPI */ #define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500) @@ -338,6 +366,10 @@ struct cs35l56_base { const struct cirrus_amp_cal_controls *calibration_controls; struct dentry *debugfs; u64 silicon_uid; + u8 onchip_spkid_gpios[5]; + u8 num_onchip_spkid_gpios; + u8 onchip_spkid_pulls[5]; + u8 num_onchip_spkid_pulls; }; static inline bool cs35l56_is_otp_register(unsigned int reg) @@ -413,6 +445,11 @@ void cs35l56_warn_if_firmware_missing(struct cs35l56_base *cs35l56_base); void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp); int cs35l56_hw_init(struct cs35l56_base *cs35l56_base); int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base); +int cs35l56_check_and_save_onchip_spkid_gpios(struct cs35l56_base *cs35l56_base, + const u32 *gpios, int num_gpios, + const u32 *pulls, int num_pulls); +int cs35l56_configure_onchip_spkid_pads(struct cs35l56_base *cs35l56_base); +int cs35l56_read_onchip_spkid(struct cs35l56_base *cs35l56_base); int cs35l56_get_bclk_freq_id(unsigned int freq); void cs35l56_fill_supply_names(struct regulator_bulk_data *data); diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h index 6e9391b3816c6a..79bd5a7a0f884e 100644 --- a/include/sound/sdca_function.h +++ b/include/sound/sdca_function.h @@ -798,6 +798,7 @@ struct sdca_control_range { * @sel: Identifier used for addressing. * @nbits: Number of bits used in the Control. * @values: Holds the Control value for constants and defaults. + * @reset: Defined reset value for the Control. * @cn_list: A bitmask showing the valid Control Numbers within this Control, * Control Numbers typically represent channels. * @interrupt_position: SCDA interrupt line that will alert to changes on this @@ -808,6 +809,7 @@ struct sdca_control_range { * @layers: Bitmask of access layers of the Control. * @deferrable: Indicates if the access to the Control can be deferred. * @has_default: Indicates the Control has a default value to be written. + * @has_reset: Indicates the Control has a defined reset value. * @has_fixed: Indicates the Control only supports a single value. */ struct sdca_control { @@ -816,6 +818,7 @@ struct sdca_control { int nbits; int *values; + int reset; u64 cn_list; int interrupt_position; @@ -827,6 +830,7 @@ struct sdca_control { bool deferrable; bool is_volatile; bool has_default; + bool has_reset; bool has_fixed; }; diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index 9d3c54cb8223c9..7c03bdc951bb92 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -2,7 +2,7 @@ // // ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier // -// Copyright (C) 2022 - 2025 Texas Instruments Incorporated +// Copyright (C) 2022 - 2026 Texas Instruments Incorporated // https://www.ti.com // // The TAS2563/TAS2781 driver implements a flexible and configurable @@ -233,7 +233,6 @@ struct tasdevice_priv { bool playback_started; bool isacpi; bool isspi; - bool is_user_space_calidata; unsigned int global_addr; int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv, diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h index b3fef140ae155b..33e99e792f1aa2 100644 --- a/include/trace/events/dma.h +++ b/include/trace/events/dma.h @@ -275,6 +275,8 @@ TRACE_EVENT(dma_free_sgt, sizeof(u64), sizeof(u64))) ); +#define DMA_TRACE_MAX_ENTRIES 128 + TRACE_EVENT(dma_map_sg, TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, int ents, enum dma_data_direction dir, unsigned long attrs), @@ -282,9 +284,12 @@ TRACE_EVENT(dma_map_sg, TP_STRUCT__entry( __string(device, dev_name(dev)) - __dynamic_array(u64, phys_addrs, nents) - __dynamic_array(u64, dma_addrs, ents) - __dynamic_array(unsigned int, lengths, ents) + __field(int, full_nents) + __field(int, full_ents) + __field(bool, truncated) + __dynamic_array(u64, phys_addrs, min(nents, DMA_TRACE_MAX_ENTRIES)) + __dynamic_array(u64, dma_addrs, min(ents, DMA_TRACE_MAX_ENTRIES)) + __dynamic_array(unsigned int, lengths, min(ents, DMA_TRACE_MAX_ENTRIES)) __field(enum dma_data_direction, dir) __field(unsigned long, attrs) ), @@ -292,11 +297,16 @@ TRACE_EVENT(dma_map_sg, TP_fast_assign( struct scatterlist *sg; int i; + int traced_nents = min_t(int, nents, DMA_TRACE_MAX_ENTRIES); + int traced_ents = min_t(int, ents, DMA_TRACE_MAX_ENTRIES); __assign_str(device); - for_each_sg(sgl, sg, nents, i) + __entry->full_nents = nents; + __entry->full_ents = ents; + __entry->truncated = (nents > DMA_TRACE_MAX_ENTRIES) || (ents > DMA_TRACE_MAX_ENTRIES); + for_each_sg(sgl, sg, traced_nents, i) ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg); - for_each_sg(sgl, sg, ents, i) { + for_each_sg(sgl, sg, traced_ents, i) { ((u64 *)__get_dynamic_array(dma_addrs))[i] = sg_dma_address(sg); ((unsigned int *)__get_dynamic_array(lengths))[i] = @@ -306,9 +316,12 @@ TRACE_EVENT(dma_map_sg, __entry->attrs = attrs; ), - TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s", + TP_printk("%s dir=%s nents=%d/%d ents=%d/%d%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s", __get_str(device), decode_dma_data_direction(__entry->dir), + min_t(int, __entry->full_nents, DMA_TRACE_MAX_ENTRIES), __entry->full_nents, + min_t(int, __entry->full_ents, DMA_TRACE_MAX_ENTRIES), __entry->full_ents, + __entry->truncated ? " [TRUNCATED]" : "", __print_array(__get_dynamic_array(dma_addrs), __get_dynamic_array_len(dma_addrs) / sizeof(u64), sizeof(u64)), diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index a87d4e26eee84e..80178b69e05a25 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -67,7 +67,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) unsigned int cq_head = READ_ONCE(r->cq.head); unsigned int cq_tail = READ_ONCE(r->cq.tail); unsigned int sq_shift = 0; - unsigned int sq_entries; + unsigned int cq_entries, sq_entries; int sq_pid = -1, sq_cpu = -1; u64 sq_total_time = 0, sq_work_time = 0; unsigned int i; @@ -146,9 +146,11 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) } } seq_printf(m, "\n"); + cond_resched(); } seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); - while (cq_head < cq_tail) { + cq_entries = min(cq_tail - cq_head, ctx->cq_entries); + for (i = 0; i < cq_entries; i++) { struct io_uring_cqe *cqe; bool cqe32 = false; @@ -159,12 +161,15 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) cq_head & cq_mask, cqe->user_data, cqe->res, cqe->flags); if (cqe32) - seq_printf(m, ", extra1:%llu, extra2:%llu\n", + seq_printf(m, ", extra1:%llu, extra2:%llu", cqe->big_cqe[0], cqe->big_cqe[1]); seq_printf(m, "\n"); cq_head++; - if (cqe32) + if (cqe32) { cq_head++; + i++; + } + cond_resched(); } if (ctx->flags & IORING_SETUP_SQPOLL) { diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index b99cf2c6670aa8..3d398283cf3405 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -197,6 +197,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq, GFP_KERNEL_ACCOUNT); if (ret) { unpin_user_pages(pages, nr_pages); + kvfree(pages); return ret; } @@ -1068,8 +1069,6 @@ static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr, unsigned int mask = zcrx->rq_entries - 1; unsigned int i; - guard(spinlock_bh)(&zcrx->rq_lock); - nr = min(nr, io_zcrx_rqring_entries(zcrx)); for (i = 0; i < nr; i++) { struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask); @@ -1114,9 +1113,11 @@ static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx, return -EINVAL; do { - nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx); + scoped_guard(spinlock_bh, &zcrx->rq_lock) { + nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx); + zcrx_return_buffers(netmems, nr); + } - zcrx_return_buffers(netmems, nr); total += nr; if (fatal_signal_pending(current)) diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c index e12b946278b6c6..1ea6afffa985c4 100644 --- a/kernel/cgroup/dmem.c +++ b/kernel/cgroup/dmem.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -71,7 +72,9 @@ struct dmem_cgroup_pool_state { struct rcu_head rcu; struct page_counter cnt; + struct dmem_cgroup_pool_state *parent; + refcount_t ref; bool inited; }; @@ -88,6 +91,9 @@ struct dmem_cgroup_pool_state { static DEFINE_SPINLOCK(dmemcg_lock); static LIST_HEAD(dmem_cgroup_regions); +static void dmemcg_free_region(struct kref *ref); +static void dmemcg_pool_free_rcu(struct rcu_head *rcu); + static inline struct dmemcg_state * css_to_dmemcs(struct cgroup_subsys_state *css) { @@ -104,10 +110,38 @@ static struct dmemcg_state *parent_dmemcs(struct dmemcg_state *cg) return cg->css.parent ? css_to_dmemcs(cg->css.parent) : NULL; } +static void dmemcg_pool_get(struct dmem_cgroup_pool_state *pool) +{ + refcount_inc(&pool->ref); +} + +static bool dmemcg_pool_tryget(struct dmem_cgroup_pool_state *pool) +{ + return refcount_inc_not_zero(&pool->ref); +} + +static void dmemcg_pool_put(struct dmem_cgroup_pool_state *pool) +{ + if (!refcount_dec_and_test(&pool->ref)) + return; + + call_rcu(&pool->rcu, dmemcg_pool_free_rcu); +} + +static void dmemcg_pool_free_rcu(struct rcu_head *rcu) +{ + struct dmem_cgroup_pool_state *pool = container_of(rcu, typeof(*pool), rcu); + + if (pool->parent) + dmemcg_pool_put(pool->parent); + kref_put(&pool->region->ref, dmemcg_free_region); + kfree(pool); +} + static void free_cg_pool(struct dmem_cgroup_pool_state *pool) { list_del(&pool->region_node); - kfree(pool); + dmemcg_pool_put(pool); } static void @@ -342,6 +376,12 @@ alloc_pool_single(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region page_counter_init(&pool->cnt, ppool ? &ppool->cnt : NULL, true); reset_all_resource_limits(pool); + refcount_set(&pool->ref, 1); + kref_get(®ion->ref); + if (ppool && !pool->parent) { + pool->parent = ppool; + dmemcg_pool_get(ppool); + } list_add_tail_rcu(&pool->css_node, &dmemcs->pools); list_add_tail(&pool->region_node, ®ion->pools); @@ -389,6 +429,10 @@ get_cg_pool_locked(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *regio /* Fix up parent links, mark as inited. */ pool->cnt.parent = &ppool->cnt; + if (ppool && !pool->parent) { + pool->parent = ppool; + dmemcg_pool_get(ppool); + } pool->inited = true; pool = ppool; @@ -423,7 +467,7 @@ static void dmemcg_free_region(struct kref *ref) */ void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region) { - struct list_head *entry; + struct dmem_cgroup_pool_state *pool, *next; if (!region) return; @@ -433,11 +477,10 @@ void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region) /* Remove from global region list */ list_del_rcu(®ion->region_node); - list_for_each_rcu(entry, ®ion->pools) { - struct dmem_cgroup_pool_state *pool = - container_of(entry, typeof(*pool), region_node); - + list_for_each_entry_safe(pool, next, ®ion->pools, region_node) { list_del_rcu(&pool->css_node); + list_del(&pool->region_node); + dmemcg_pool_put(pool); } /* @@ -518,8 +561,10 @@ static struct dmem_cgroup_region *dmemcg_get_region_by_name(const char *name) */ void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool) { - if (pool) + if (pool) { css_put(&pool->cs->css); + dmemcg_pool_put(pool); + } } EXPORT_SYMBOL_GPL(dmem_cgroup_pool_state_put); @@ -533,6 +578,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) pool = find_cg_pool_locked(cg, region); if (pool && !READ_ONCE(pool->inited)) pool = NULL; + if (pool && !dmemcg_pool_tryget(pool)) + pool = NULL; rcu_read_unlock(); while (!pool) { @@ -541,6 +588,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) pool = get_cg_pool_locked(cg, region, &allocpool); else pool = ERR_PTR(-ENODEV); + if (!IS_ERR(pool)) + dmemcg_pool_get(pool); spin_unlock(&dmemcg_lock); if (pool == ERR_PTR(-ENOMEM)) { @@ -576,6 +625,7 @@ void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size) page_counter_uncharge(&pool->cnt, size); css_put(&pool->cs->css); + dmemcg_pool_put(pool); } EXPORT_SYMBOL_GPL(dmem_cgroup_uncharge); @@ -627,7 +677,9 @@ int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size, if (ret_limit_pool) { *ret_limit_pool = container_of(fail, struct dmem_cgroup_pool_state, cnt); css_get(&(*ret_limit_pool)->cs->css); + dmemcg_pool_get(*ret_limit_pool); } + dmemcg_pool_put(pool); ret = -EAGAIN; goto err; } @@ -700,6 +752,9 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of, if (!region_name[0]) continue; + if (!options || !*options) + return -EINVAL; + rcu_read_lock(); region = dmemcg_get_region_by_name(region_name); rcu_read_unlock(); @@ -719,6 +774,7 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of, /* And commit */ apply(pool, new_limit); + dmemcg_pool_put(pool); out_put: kref_put(®ion->ref, dmemcg_free_region); diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index d8fd6f779f797f..c56004d314dc2e 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -91,6 +91,16 @@ static int __init early_cma(char *p) } early_param("cma", early_cma); +/* + * cma_skip_dt_default_reserved_mem - This is called from the + * reserved_mem framework to detect if the default cma region is being + * set by the "cma=" kernel parameter. + */ +bool __init cma_skip_dt_default_reserved_mem(void) +{ + return size_cmdline != -1; +} + #ifdef CONFIG_DMA_NUMA_CMA static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; @@ -247,10 +257,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_contiguous_reserve_area(selected_size, selected_base, - selected_limit, - &dma_contiguous_default_area, - fixed); + ret = dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); + if (ret) + return; ret = dma_heap_cma_register_heap(dma_contiguous_default_area); if (ret) @@ -470,12 +482,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) struct cma *cma; int err; - if (size_cmdline != -1 && default_cma) { - pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", - rmem->name); - return -EBUSY; - } - if (!of_get_flat_dt_prop(node, "reusable", NULL) || of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL; diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index c5da29ad010c4d..2b2fbb7092429a 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -277,15 +277,20 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size, { struct gen_pool *pool = NULL; struct page *page; + bool pool_found = false; while ((pool = dma_guess_pool(pool, gfp))) { + pool_found = true; page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, phys_addr_ok); if (page) return page; } - WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); + if (pool_found) + WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev)); + else + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); return NULL; } diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 1f658957870391..9d24b6e0c91f1f 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -246,7 +246,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, if (user && !crosstask) { if (!user_mode(regs)) { - if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) + if (!is_user_task(current)) goto exit_put; regs = task_pt_regs(current); } diff --git a/kernel/events/core.c b/kernel/events/core.c index a0fa488bce8465..8cca8009462481 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7460,7 +7460,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, if (user_mode(regs)) { regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; - } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + } else if (is_user_task(current)) { perf_get_regs_user(regs_user, regs); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; @@ -8100,7 +8100,7 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe get_user_page_fast_only first. * If failed, leave phys_addr as 0. */ - if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + if (is_user_task(current)) { struct page *p; pagefault_disable(); @@ -8215,7 +8215,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) { bool kernel = !event->attr.exclude_callchain_kernel; bool user = !event->attr.exclude_callchain_user && - !(current->flags & (PF_KTHREAD | PF_USER_WORKER)); + is_user_task(current); /* Disallow cross-task user callchains. */ bool crosstask = event->ctx->task && event->ctx->task != current; bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user && diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 9917756dae46c8..1acbad2dbfdf54 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1356,6 +1356,25 @@ void klp_module_going(struct module *mod) mutex_unlock(&klp_mutex); } +void *klp_find_section_by_name(const struct module *mod, const char *name, + size_t *sec_size) +{ + struct klp_modinfo *info = mod->klp_info; + + for (int i = 1; i < info->hdr.e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + + if (!strcmp(info->secstrings + shdr->sh_name, name)) { + *sec_size = shdr->sh_size; + return (void *)shdr->sh_addr; + } + } + + *sec_size = 0; + return NULL; +} +EXPORT_SYMBOL_GPL(klp_find_section_by_name); + static int __init klp_init(void) { klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c index d4482b6e3cae39..90d411a59f76d9 100644 --- a/kernel/liveupdate/kexec_handover.c +++ b/kernel/liveupdate/kexec_handover.c @@ -255,6 +255,14 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio) if (is_folio && info.order) prep_compound_page(page, info.order); + /* Always mark headpage's codetag as empty to avoid accounting mismatch */ + clear_page_tag_ref(page); + if (!is_folio) { + /* Also do that for the non-compound tail pages */ + for (unsigned int i = 1; i < nr_pages; i++) + clear_page_tag_ref(page + i); + } + adjust_managed_page_count(page, nr_pages); return page; } @@ -1006,8 +1014,10 @@ int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) chunk->phys[idx++] = phys; if (idx == ARRAY_SIZE(chunk->phys)) { chunk = new_vmalloc_chunk(chunk); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto err_free; + } idx = 0; } } diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c index a32a777f6df8f3..9f7283379ebc06 100644 --- a/kernel/liveupdate/luo_file.c +++ b/kernel/liveupdate/luo_file.c @@ -402,8 +402,6 @@ static void luo_file_unfreeze_one(struct luo_file_set *file_set, luo_file->fh->ops->unfreeze(&args); } - - luo_file->serialized_data = 0; } static void __luo_file_unfreeze(struct luo_file_set *file_set, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 045f83ad261e25..854984967fe295 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10269,7 +10269,8 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) * Serialization rules: * * mm::mm_cid::mutex: Serializes fork() and exit() and therefore - * protects mm::mm_cid::users. + * protects mm::mm_cid::users and mode switch + * transitions * * mm::mm_cid::lock: Serializes mm_update_max_cids() and * mm_update_cpus_allowed(). Nests in mm_cid::mutex @@ -10285,14 +10286,70 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) * * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the - * MM_CID_ONCPU bit set. During transition from CPU to task ownership mode, - * MM_CID_TRANSIT is set on the per task CIDs. When this bit is set the - * task needs to drop the CID into the pool when scheduling out. Both bits - * (ONCPU and TRANSIT) are filtered out by task_cid() when the CID is - * actually handed over to user space in the RSEQ memory. + * MM_CID_ONCPU bit set. + * + * During the transition of ownership mode, the MM_CID_TRANSIT bit is set + * on the CIDs. When this bit is set the tasks drop the CID back into the + * pool when scheduling out. + * + * Both bits (ONCPU and TRANSIT) are filtered out by task_cid() when the + * CID is actually handed over to user space in the RSEQ memory. * * Mode switching: * + * The ownership mode is per process and stored in mm:mm_cid::mode with the + * following possible states: + * + * 0: Per task ownership + * 0 | MM_CID_TRANSIT: Transition from per CPU to per task + * MM_CID_ONCPU: Per CPU ownership + * MM_CID_ONCPU | MM_CID_TRANSIT: Transition from per task to per CPU + * + * All transitions of ownership mode happen in two phases: + * + * 1) mm:mm_cid::mode has the MM_CID_TRANSIT bit set. This is OR'ed on the + * CIDs and denotes that the CID is only temporarily owned by a + * task. When the task schedules out it drops the CID back into the + * pool if this bit is set. + * + * 2) The initiating context walks the per CPU space or the tasks to fixup + * or drop the CIDs and after completion it clears MM_CID_TRANSIT in + * mm:mm_cid::mode. After that point the CIDs are strictly task or CPU + * owned again. + * + * This two phase transition is required to prevent CID space exhaustion + * during the transition as a direct transfer of ownership would fail: + * + * - On task to CPU mode switch if a task is scheduled in on one CPU and + * then migrated to another CPU before the fixup freed enough per task + * CIDs. + * + * - On CPU to task mode switch if two tasks are scheduled in on the same + * CPU before the fixup freed per CPU CIDs. + * + * Both scenarios can result in a live lock because sched_in() is invoked + * with runqueue lock held and loops in search of a CID and the fixup + * thread can't make progress freeing them up because it is stuck on the + * same runqueue lock. + * + * While MM_CID_TRANSIT is active during the transition phase the MM_CID + * bitmap can be contended, but that's a temporary contention bound to the + * transition period. After that everything goes back into steady state and + * nothing except fork() and exit() will touch the bitmap. This is an + * acceptable tradeoff as it completely avoids complex serialization, + * memory barriers and atomic operations for the common case. + * + * Aside of that this mechanism also ensures RT compability: + * + * - The task which runs the fixup is fully preemptible except for the + * short runqueue lock held sections. + * + * - The transient impact of the bitmap contention is only problematic + * when there is a thundering herd scenario of tasks scheduling in and + * out concurrently. There is not much which can be done about that + * except for avoiding mode switching by a proper overall system + * configuration. + * * Switching to per CPU mode happens when the user count becomes greater * than the maximum number of CIDs, which is calculated by: * @@ -10306,12 +10363,13 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) * * At the point of switching to per CPU mode the new user is not yet * visible in the system, so the task which initiated the fork() runs the - * fixup function: mm_cid_fixup_tasks_to_cpu() walks the thread list and - * either transfers each tasks owned CID to the CPU the task runs on or - * drops it into the CID pool if a task is not on a CPU at that point in - * time. Tasks which schedule in before the task walk reaches them do the - * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus() completes - * it's guaranteed that no task related to that MM owns a CID anymore. + * fixup function. mm_cid_fixup_tasks_to_cpu() walks the thread list and + * either marks each task owned CID with MM_CID_TRANSIT if the task is + * running on a CPU or drops it into the CID pool if a task is not on a + * CPU. Tasks which schedule in before the task walk reaches them do the + * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus() + * completes it is guaranteed that no task related to that MM owns a CID + * anymore. * * Switching back to task mode happens when the user count goes below the * threshold which was recorded on the per CPU mode switch: @@ -10327,28 +10385,11 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) * run either in the deferred update function in context of a workqueue or * by a task which forks a new one or by a task which exits. Whatever * happens first. mm_cid_fixup_cpus_to_task() walks through the possible - * CPUs and either transfers the CPU owned CIDs to a related task which - * runs on the CPU or drops it into the pool. Tasks which schedule in on a - * CPU which the walk did not cover yet do the handover themself. - * - * This transition from CPU to per task ownership happens in two phases: - * - * 1) mm:mm_cid.transit contains MM_CID_TRANSIT This is OR'ed on the task - * CID and denotes that the CID is only temporarily owned by the - * task. When it schedules out the task drops the CID back into the - * pool if this bit is set. - * - * 2) The initiating context walks the per CPU space and after completion - * clears mm:mm_cid.transit. So after that point the CIDs are strictly - * task owned again. - * - * This two phase transition is required to prevent CID space exhaustion - * during the transition as a direct transfer of ownership would fail if - * two tasks are scheduled in on the same CPU before the fixup freed per - * CPU CIDs. - * - * When mm_cid_fixup_cpus_to_tasks() completes it's guaranteed that no CID - * related to that MM is owned by a CPU anymore. + * CPUs and either marks the CPU owned CIDs with MM_CID_TRANSIT if a + * related task is running on the CPU or drops it into the pool. Tasks + * which are scheduled in before the fixup covered them do the handover + * themself. When mm_cid_fixup_cpus_to_tasks() completes it is guaranteed + * that no CID related to that MM is owned by a CPU anymore. */ /* @@ -10379,6 +10420,7 @@ static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc) static bool mm_update_max_cids(struct mm_struct *mm) { struct mm_mm_cid *mc = &mm->mm_cid; + bool percpu = cid_on_cpu(mc->mode); lockdep_assert_held(&mm->mm_cid.lock); @@ -10387,7 +10429,7 @@ static bool mm_update_max_cids(struct mm_struct *mm) __mm_update_max_cids(mc); /* Check whether owner mode must be changed */ - if (!mc->percpu) { + if (!percpu) { /* Enable per CPU mode when the number of users is above max_cids */ if (mc->users > mc->max_cids) mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc); @@ -10398,12 +10440,17 @@ static bool mm_update_max_cids(struct mm_struct *mm) } /* Mode change required? */ - if (!!mc->percpu == !!mc->pcpu_thrs) + if (percpu == !!mc->pcpu_thrs) return false; - /* When switching back to per TASK mode, set the transition flag */ - if (!mc->pcpu_thrs) - WRITE_ONCE(mc->transit, MM_CID_TRANSIT); - WRITE_ONCE(mc->percpu, !!mc->pcpu_thrs); + + /* Flip the mode and set the transition flag to bridge the transfer */ + WRITE_ONCE(mc->mode, mc->mode ^ (MM_CID_TRANSIT | MM_CID_ONCPU)); + /* + * Order the store against the subsequent fixups so that + * acquire(rq::lock) cannot be reordered by the CPU before the + * store. + */ + smp_mb(); return true; } @@ -10428,7 +10475,7 @@ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpu WRITE_ONCE(mc->nr_cpus_allowed, weight); __mm_update_max_cids(mc); - if (!mc->percpu) + if (!cid_on_cpu(mc->mode)) return; /* Adjust the threshold to the wider set */ @@ -10446,6 +10493,16 @@ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpu irq_work_queue(&mc->irq_work); } +static inline void mm_cid_complete_transit(struct mm_struct *mm, unsigned int mode) +{ + /* + * Ensure that the store removing the TRANSIT bit cannot be + * reordered by the CPU before the fixups have been completed. + */ + smp_mb(); + WRITE_ONCE(mm->mm_cid.mode, mode); +} + static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp) { if (cid_on_cpu(t->mm_cid.cid)) { @@ -10489,14 +10546,13 @@ static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm) } } } - /* Clear the transition bit */ - WRITE_ONCE(mm->mm_cid.transit, 0); + mm_cid_complete_transit(mm, 0); } -static inline void mm_cid_transfer_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp) +static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp) { if (cid_on_task(t->mm_cid.cid)) { - t->mm_cid.cid = cid_to_cpu_cid(t->mm_cid.cid); + t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid); pcp->cid = t->mm_cid.cid; } } @@ -10509,18 +10565,17 @@ static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm if (!t->mm_cid.active) return false; if (cid_on_task(t->mm_cid.cid)) { - /* If running on the CPU, transfer the CID, otherwise drop it */ + /* If running on the CPU, put the CID in transit mode, otherwise drop it */ if (task_rq(t)->curr == t) - mm_cid_transfer_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t))); + mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t))); else mm_unset_cid_on_task(t); } return true; } -static void mm_cid_fixup_tasks_to_cpus(void) +static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm) { - struct mm_struct *mm = current->mm; struct task_struct *p, *t; unsigned int users; @@ -10558,6 +10613,14 @@ static void mm_cid_fixup_tasks_to_cpus(void) } } +static void mm_cid_fixup_tasks_to_cpus(void) +{ + struct mm_struct *mm = current->mm; + + mm_cid_do_fixup_tasks_to_cpus(mm); + mm_cid_complete_transit(mm, MM_CID_ONCPU); +} + static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm) { t->mm_cid.active = 1; @@ -10586,17 +10649,17 @@ void sched_mm_cid_fork(struct task_struct *t) } if (!sched_mm_cid_add_user(t, mm)) { - if (!mm->mm_cid.percpu) + if (!cid_on_cpu(mm->mm_cid.mode)) t->mm_cid.cid = mm_get_cid(mm); return; } /* Handle the mode change and transfer current's CID */ - percpu = !!mm->mm_cid.percpu; + percpu = cid_on_cpu(mm->mm_cid.mode); if (!percpu) mm_cid_transit_to_task(current, pcp); else - mm_cid_transfer_to_cpu(current, pcp); + mm_cid_transit_to_cpu(current, pcp); } if (percpu) { @@ -10631,7 +10694,7 @@ static bool __sched_mm_cid_exit(struct task_struct *t) * affinity change increased the number of allowed CPUs and the * deferred fixup did not run yet. */ - if (WARN_ON_ONCE(mm->mm_cid.percpu)) + if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode))) return false; /* * A failed fork(2) cleanup never gets here, so @current must have @@ -10664,8 +10727,14 @@ void sched_mm_cid_exit(struct task_struct *t) scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { if (!__sched_mm_cid_exit(t)) return; - /* Mode change required. Transfer currents CID */ - mm_cid_transit_to_task(current, this_cpu_ptr(mm->mm_cid.pcpu)); + /* + * Mode change. The task has the CID unset + * already. The CPU CID is still valid and + * does not have MM_CID_TRANSIT set as the + * mode change has just taken effect under + * mm::mm_cid::lock. Drop it. + */ + mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu)); } mm_cid_fixup_cpus_to_tasks(mm); return; @@ -10722,7 +10791,7 @@ static void mm_cid_work_fn(struct work_struct *work) if (!mm_update_max_cids(mm)) return; /* Affinity changes can only switch back to task mode */ - if (WARN_ON_ONCE(mm->mm_cid.percpu)) + if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode))) return; } mm_cid_fixup_cpus_to_tasks(mm); @@ -10743,8 +10812,7 @@ static void mm_cid_irq_work(struct irq_work *work) void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { mm->mm_cid.max_cids = 0; - mm->mm_cid.percpu = 0; - mm->mm_cid.transit = 0; + mm->mm_cid.mode = 0; mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; mm->mm_cid.users = 0; mm->mm_cid.pcpu_thrs = 0; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index c509f2e7d69de2..7bcde7114f1b65 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1034,6 +1034,12 @@ static void update_dl_entity(struct sched_dl_entity *dl_se) return; } + /* + * When [4] D->A is followed by [1] A->B, dl_defer_running + * needs to be cleared, otherwise it will fail to properly + * start the zero-laxity timer. + */ + dl_se->dl_defer_running = 0; replenish_dl_new_period(dl_se, rq); } else if (dl_server(dl_se) && dl_se->dl_defer) { /* @@ -1655,6 +1661,12 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) * dl_server_active = 1; * enqueue_dl_entity() * update_dl_entity(WAKEUP) + * if (dl_time_before() || dl_entity_overflow()) + * dl_defer_running = 0; + * replenish_dl_new_period(); + * // fwd period + * dl_throttled = 1; + * dl_defer_armed = 1; * if (!dl_defer_running) * dl_defer_armed = 1; * dl_throttled = 1; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index afe28c04d5aa73..0bb8fa927e9e9f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -194,6 +194,7 @@ MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microsecond #include static void process_ddsp_deferred_locals(struct rq *rq); +static bool task_dead_and_done(struct task_struct *p); static u32 reenq_local(struct rq *rq); static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, @@ -2619,6 +2620,9 @@ static void set_cpus_allowed_scx(struct task_struct *p, set_cpus_allowed_common(p, ac); + if (task_dead_and_done(p)) + return; + /* * The effective cpumask is stored in @p->cpus_ptr which may temporarily * differ from the configured one in @p->cpus_mask. Always tell the bpf @@ -3034,10 +3038,45 @@ void scx_cancel_fork(struct task_struct *p) percpu_up_read(&scx_fork_rwsem); } +/** + * task_dead_and_done - Is a task dead and done running? + * @p: target task + * + * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the + * task no longer exists from SCX's POV. However, certain sched_class ops may be + * invoked on these dead tasks leading to failures - e.g. sched_setscheduler() + * may try to switch a task which finished sched_ext_dead() back into SCX + * triggering invalid SCX task state transitions and worse. + * + * Once a task has finished the final switch, sched_ext_dead() is the only thing + * that needs to happen on the task. Use this test to short-circuit sched_class + * operations which may be called on dead tasks. + */ +static bool task_dead_and_done(struct task_struct *p) +{ + struct rq *rq = task_rq(p); + + lockdep_assert_rq_held(rq); + + /* + * In do_task_dead(), a dying task sets %TASK_DEAD with preemption + * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p + * won't ever run again. + */ + return unlikely(READ_ONCE(p->__state) == TASK_DEAD) && + !task_on_cpu(rq, p); +} + void sched_ext_dead(struct task_struct *p) { unsigned long flags; + /* + * By the time control reaches here, @p has %TASK_DEAD set, switched out + * for the last time and then dropped the rq lock - task_dead_and_done() + * should be returning %true nullifying the straggling sched_class ops. + * Remove from scx_tasks and exit @p. + */ raw_spin_lock_irqsave(&scx_tasks_lock, flags); list_del_init(&p->scx.tasks_node); raw_spin_unlock_irqrestore(&scx_tasks_lock, flags); @@ -3063,6 +3102,9 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p, lockdep_assert_rq_held(task_rq(p)); + if (task_dead_and_done(p)) + return; + p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); if (SCX_HAS_OP(sch, set_weight)) SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, @@ -3077,6 +3119,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) { struct scx_sched *sch = scx_root; + if (task_dead_and_done(p)) + return; + scx_enable_task(p); /* @@ -3090,6 +3135,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) static void switched_from_scx(struct rq *rq, struct task_struct *p) { + if (task_dead_and_done(p)) + return; + scx_disable_task(p); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 93fce4bbff5eac..bd350e40859d89 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3816,7 +3816,8 @@ static __always_inline void mm_cid_update_pcpu_cid(struct mm_struct *mm, unsigne __this_cpu_write(mm->mm_cid.pcpu->cid, cid); } -static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid) +static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid, + unsigned int mode) { unsigned int max_cids, tcid = t->mm_cid.cid; struct mm_struct *mm = t->mm; @@ -3841,12 +3842,17 @@ static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int /* Still nothing, allocate a new one */ if (!cid_on_cpu(cpu_cid)) cpu_cid = cid_to_cpu_cid(mm_get_cid(mm)); + + /* Handle the transition mode flag if required */ + if (mode & MM_CID_TRANSIT) + cpu_cid = cpu_cid_to_cid(cpu_cid) | MM_CID_TRANSIT; } mm_cid_update_pcpu_cid(mm, cpu_cid); mm_cid_update_task_cid(t, cpu_cid); } -static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid) +static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid, + unsigned int mode) { unsigned int max_cids, tcid = t->mm_cid.cid; struct mm_struct *mm = t->mm; @@ -3872,7 +3878,7 @@ static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int if (!cid_on_task(tcid)) tcid = mm_get_cid(mm); /* Set the transition mode flag if required */ - tcid |= READ_ONCE(mm->mm_cid.transit); + tcid |= mode & MM_CID_TRANSIT; } mm_cid_update_pcpu_cid(mm, tcid); mm_cid_update_task_cid(t, tcid); @@ -3881,26 +3887,46 @@ static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int static __always_inline void mm_cid_schedin(struct task_struct *next) { struct mm_struct *mm = next->mm; - unsigned int cpu_cid; + unsigned int cpu_cid, mode; if (!next->mm_cid.active) return; cpu_cid = __this_cpu_read(mm->mm_cid.pcpu->cid); - if (likely(!READ_ONCE(mm->mm_cid.percpu))) - mm_cid_from_task(next, cpu_cid); + mode = READ_ONCE(mm->mm_cid.mode); + if (likely(!cid_on_cpu(mode))) + mm_cid_from_task(next, cpu_cid, mode); else - mm_cid_from_cpu(next, cpu_cid); + mm_cid_from_cpu(next, cpu_cid, mode); } static __always_inline void mm_cid_schedout(struct task_struct *prev) { + struct mm_struct *mm = prev->mm; + unsigned int mode, cid; + /* During mode transitions CIDs are temporary and need to be dropped */ if (likely(!cid_in_transit(prev->mm_cid.cid))) return; - mm_drop_cid(prev->mm, cid_from_transit_cid(prev->mm_cid.cid)); - prev->mm_cid.cid = MM_CID_UNSET; + mode = READ_ONCE(mm->mm_cid.mode); + cid = cid_from_transit_cid(prev->mm_cid.cid); + + /* + * If transition mode is done, transfer ownership when the CID is + * within the convergence range to optimize the next schedule in. + */ + if (!cid_in_transit(mode) && cid < READ_ONCE(mm->mm_cid.max_cids)) { + if (cid_on_cpu(mode)) + cid = cid_to_cpu_cid(cid); + + /* Update both so that the next schedule in goes into the fast path */ + mm_cid_update_pcpu_cid(mm, cid); + prev->mm_cid.cid = cid; + } else { + mm_drop_cid(mm, cid); + prev->mm_cid.cid = MM_CID_UNSET; + } } static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b6d42fe0611502..c11edec5d8f5ca 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -68,14 +68,17 @@ enum trace_type { #undef __field_fn #define __field_fn(type, item) type item; +#undef __field_packed +#define __field_packed(type, item) type item; + #undef __field_struct #define __field_struct(type, item) __field(type, item) #undef __field_desc #define __field_desc(type, container, item) -#undef __field_packed -#define __field_packed(type, container, item) +#undef __field_desc_packed +#define __field_desc_packed(type, container, item) #undef __array #define __array(type, item, size) type item[size]; diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index f6a8d29c0d7631..54417468fdeb16 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -79,8 +79,8 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, F_STRUCT( __field_struct( struct ftrace_graph_ent, graph_ent ) - __field_packed( unsigned long, graph_ent, func ) - __field_packed( unsigned long, graph_ent, depth ) + __field_desc_packed(unsigned long, graph_ent, func ) + __field_desc_packed(unsigned long, graph_ent, depth ) __dynamic_array(unsigned long, args ) ), @@ -96,9 +96,9 @@ FTRACE_ENTRY_PACKED(fgraph_retaddr_entry, fgraph_retaddr_ent_entry, F_STRUCT( __field_struct( struct fgraph_retaddr_ent, graph_rent ) - __field_packed( unsigned long, graph_rent.ent, func ) - __field_packed( unsigned long, graph_rent.ent, depth ) - __field_packed( unsigned long, graph_rent, retaddr ) + __field_desc_packed( unsigned long, graph_rent.ent, func ) + __field_desc_packed( unsigned long, graph_rent.ent, depth ) + __field_desc_packed( unsigned long, graph_rent, retaddr ) __dynamic_array(unsigned long, args ) ), @@ -123,12 +123,12 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry, F_STRUCT( __field_struct( struct ftrace_graph_ret, ret ) - __field_packed( unsigned long, ret, func ) - __field_packed( unsigned long, ret, retval ) - __field_packed( unsigned int, ret, depth ) - __field_packed( unsigned int, ret, overrun ) - __field(unsigned long long, calltime ) - __field(unsigned long long, rettime ) + __field_desc_packed( unsigned long, ret, func ) + __field_desc_packed( unsigned long, ret, retval ) + __field_desc_packed( unsigned int, ret, depth ) + __field_desc_packed( unsigned int, ret, overrun ) + __field_packed(unsigned long long, calltime) + __field_packed(unsigned long long, rettime ) ), F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u retval: %lx", @@ -146,11 +146,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry, F_STRUCT( __field_struct( struct ftrace_graph_ret, ret ) - __field_packed( unsigned long, ret, func ) - __field_packed( unsigned int, ret, depth ) - __field_packed( unsigned int, ret, overrun ) - __field(unsigned long long, calltime ) - __field(unsigned long long, rettime ) + __field_desc_packed( unsigned long, ret, func ) + __field_desc_packed( unsigned int, ret, depth ) + __field_desc_packed( unsigned int, ret, overrun ) + __field_packed(unsigned long long, calltime ) + __field_packed(unsigned long long, rettime ) ), F_printk("<-- %ps (%u) (start: %llx end: %llx) over: %u", diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 1698fc22afa0a5..32a42ef31855d8 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -42,11 +42,14 @@ static int ftrace_event_register(struct trace_event_call *call, #undef __field_fn #define __field_fn(type, item) type item; +#undef __field_packed +#define __field_packed(type, item) type item; + #undef __field_desc #define __field_desc(type, container, item) type item; -#undef __field_packed -#define __field_packed(type, container, item) type item; +#undef __field_desc_packed +#define __field_desc_packed(type, container, item) type item; #undef __array #define __array(type, item, size) type item[size]; @@ -104,11 +107,14 @@ static void __always_unused ____ftrace_check_##name(void) \ #undef __field_fn #define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN) +#undef __field_packed +#define __field_packed(_type, _item) __field_ext_packed(_type, _item, FILTER_OTHER) + #undef __field_desc #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER) -#undef __field_packed -#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER) +#undef __field_desc_packed +#define __field_desc_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER) #undef __array #define __array(_type, _item, _len) { \ @@ -146,11 +152,14 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \ #undef __field_fn #define __field_fn(type, item) +#undef __field_packed +#define __field_packed(type, item) + #undef __field_desc #define __field_desc(type, container, item) -#undef __field_packed -#define __field_packed(type, container, item) +#undef __field_desc_packed +#define __field_desc_packed(type, container, item) #undef __array #define __array(type, item, len) diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c index fe9bf8db1922e6..e2784038bbed79 100644 --- a/kernel/vmcore_info.c +++ b/kernel/vmcore_info.c @@ -36,7 +36,11 @@ struct hwerr_info { time64_t timestamp; }; -static struct hwerr_info hwerr_data[HWERR_RECOV_MAX]; +/* + * The hwerr_data[] array is declared with global scope so that it remains + * accessible to vmcoreinfo even when Link Time Optimization (LTO) is enabled. + */ +struct hwerr_info hwerr_data[HWERR_RECOV_MAX]; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len) diff --git a/lib/buildid.c b/lib/buildid.c index 818331051afe25..c4b73764062159 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -279,7 +279,7 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ #define MAX_FREADER_BUF_SZ 64 -static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, +static int __build_id_parse(struct file *file, unsigned char *build_id, __u32 *size, bool may_fault) { const Elf32_Ehdr *ehdr; @@ -287,11 +287,7 @@ static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, char buf[MAX_FREADER_BUF_SZ]; int ret; - /* only works for page backed storage */ - if (!vma->vm_file) - return -EINVAL; - - freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); + freader_init_from_file(&r, buf, sizeof(buf), file, may_fault); /* fetch first 18 bytes of ELF header for checks */ ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); @@ -319,8 +315,8 @@ static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, return ret; } -/* - * Parse build ID of ELF file mapped to vma +/** + * build_id_parse_nofault() - Parse build ID of ELF file mapped to vma * @vma: vma object * @build_id: buffer to store build id, at least BUILD_ID_SIZE long * @size: returns actual build id size in case of success @@ -332,11 +328,14 @@ static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, */ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { - return __build_id_parse(vma, build_id, size, false /* !may_fault */); + if (!vma->vm_file) + return -EINVAL; + + return __build_id_parse(vma->vm_file, build_id, size, false /* !may_fault */); } -/* - * Parse build ID of ELF file mapped to VMA +/** + * build_id_parse() - Parse build ID of ELF file mapped to VMA * @vma: vma object * @build_id: buffer to store build id, at least BUILD_ID_SIZE long * @size: returns actual build id size in case of success @@ -348,7 +347,26 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, */ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { - return __build_id_parse(vma, build_id, size, true /* may_fault */); + if (!vma->vm_file) + return -EINVAL; + + return __build_id_parse(vma->vm_file, build_id, size, true /* may_fault */); +} + +/** + * build_id_parse_file() - Parse build ID of ELF file + * @file: file object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes faultable context and can cause page faults to bring in file data + * into page cache. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(file, build_id, size, true /* may_fault */); } /** diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 84ecccddc77182..012d5614efb9ab 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -64,13 +64,14 @@ void fprop_global_destroy(struct fprop_global *p) bool fprop_new_period(struct fprop_global *p, int periods) { s64 events = percpu_counter_sum(&p->events); + unsigned long flags; /* * Don't do anything if there are no events. */ if (events <= 1) return false; - preempt_disable_nested(); + local_irq_save(flags); write_seqcount_begin(&p->sequence); if (periods < 64) events -= events >> periods; @@ -78,7 +79,7 @@ bool fprop_new_period(struct fprop_global *p, int periods) percpu_counter_add(&p->events, -events); p->period += periods; write_seqcount_end(&p->sequence); - preempt_enable_nested(); + local_irq_restore(flags); return true; } diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 8af169d3873ae4..455a6862ae503b 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -662,7 +662,9 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror *dmirror, goto error; } - zone_device_folio_init(page_folio(dpage), order); + zone_device_folio_init(page_folio(dpage), + page_pgmap(folio_page(page_folio(dpage), 0)), + order); dpage->zone_device_data = rpage; return dpage; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ed489a14dddf74..b7d05c2a6d93dd 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -606,4 +606,25 @@ void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG); } } + +void __kasan_vrealloc(const void *addr, unsigned long old_size, + unsigned long new_size) +{ + if (new_size < old_size) { + kasan_poison_last_granule(addr, new_size); + + new_size = round_up(new_size, KASAN_GRANULE_SIZE); + old_size = round_up(old_size, KASAN_GRANULE_SIZE); + if (new_size < old_size) + __kasan_poison_vmalloc(addr + new_size, + old_size - new_size); + } else if (new_size > old_size) { + old_size = round_down(old_size, KASAN_GRANULE_SIZE); + __kasan_unpoison_vmalloc(addr + old_size, + new_size - old_size, + KASAN_VMALLOC_PROT_NORMAL | + KASAN_VMALLOC_VM_ALLOC | + KASAN_VMALLOC_KEEP_TAG); + } +} #endif diff --git a/mm/kfence/core.c b/mm/kfence/core.c index da0f5b6f57446f..4f79ec72075254 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -596,7 +596,7 @@ static void rcu_guarded_free(struct rcu_head *h) static unsigned long kfence_init_pool(void) { unsigned long addr, start_pfn; - int i; + int i, rand; if (!arch_kfence_init_pool()) return (unsigned long)__kfence_pool; @@ -647,13 +647,27 @@ static unsigned long kfence_init_pool(void) INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state = KFENCE_OBJECT_UNUSED; - meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); + /* Use addr to randomize the freelist. */ + meta->addr = i; /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE))) goto reset_slab; + } + + for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) { + rand = get_random_u32_below(i); + swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr); + } + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + struct kfence_metadata *meta_1 = &kfence_metadata_init[i]; + struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr]; + + list_add_tail(&meta_2->list, &kfence_freelist); + } + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + kfence_metadata_init[i].addr = addr; addr += 2 * PAGE_SIZE; } @@ -666,6 +680,7 @@ static unsigned long kfence_init_pool(void) return 0; reset_slab: + addr += 2 * i * PAGE_SIZE; for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { struct page *page; diff --git a/mm/memfd.c b/mm/memfd.c index ab5312aff14b97..f032c60529265c 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -456,7 +456,7 @@ static char *alloc_name(const char __user *uname) return ERR_PTR(error); } -static struct file *alloc_file(const char *name, unsigned int flags) +struct file *memfd_alloc_file(const char *name, unsigned int flags) { unsigned int *file_seals; struct file *file; @@ -520,5 +520,5 @@ SYSCALL_DEFINE2(memfd_create, return PTR_ERR(name); fd_flags = (flags & MFD_CLOEXEC) ? O_CLOEXEC : 0; - return FD_ADD(fd_flags, alloc_file(name, flags)); + return FD_ADD(fd_flags, memfd_alloc_file(name, flags)); } diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c index 4f6ba63b43105e..a34fccc23b6a42 100644 --- a/mm/memfd_luo.c +++ b/mm/memfd_luo.c @@ -78,6 +78,7 @@ #include #include #include +#include #include "internal.h" static int memfd_luo_preserve_folios(struct file *file, @@ -443,11 +444,11 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args) if (!ser) return -EINVAL; - file = shmem_file_setup("", 0, VM_NORESERVE); - + file = memfd_alloc_file("", 0); if (IS_ERR(file)) { pr_err("failed to setup file: %pe\n", file); - return PTR_ERR(file); + err = PTR_ERR(file); + goto free_ser; } vfs_setpos(file, ser->pos, MAX_LFS_FILESIZE); @@ -473,7 +474,8 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args) put_file: fput(file); - +free_ser: + kho_restore_free(ser); return err; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c80c2907da3332..9fd8355176eb9f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -692,6 +692,8 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, unsigned long poisoned_pfn, struct to_kill *tk) { unsigned long pfn = 0; + unsigned long hwpoison_vaddr; + unsigned long mask; if (pte_present(pte)) { pfn = pte_pfn(pte); @@ -702,10 +704,12 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, pfn = softleaf_to_pfn(entry); } - if (!pfn || pfn != poisoned_pfn) + mask = ~((1UL << (shift - PAGE_SHIFT)) - 1); + if (!pfn || pfn != (poisoned_pfn & mask)) return 0; - set_to_kill(tk, addr, shift); + hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT); + set_to_kill(tk, hwpoison_vaddr, shift); return 1; } @@ -1883,12 +1887,22 @@ static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) return count; } -static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) +#define MF_HUGETLB_FREED 0 /* freed hugepage */ +#define MF_HUGETLB_IN_USED 1 /* in-use hugepage */ +#define MF_HUGETLB_NON_HUGEPAGE 2 /* not a hugepage */ +#define MF_HUGETLB_FOLIO_PRE_POISONED 3 /* folio already poisoned */ +#define MF_HUGETLB_PAGE_PRE_POISONED 4 /* exact page already poisoned */ +#define MF_HUGETLB_RETRY 5 /* hugepage is busy, retry */ +/* + * Set hugetlb folio as hwpoisoned, update folio private raw hwpoison list + * to keep track of the poisoned pages. + */ +static int hugetlb_update_hwpoison(struct folio *folio, struct page *page) { struct llist_head *head; struct raw_hwp_page *raw_hwp; struct raw_hwp_page *p; - int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; + int ret = folio_test_set_hwpoison(folio) ? MF_HUGETLB_FOLIO_PRE_POISONED : 0; /* * Once the hwpoison hugepage has lost reliable raw error info, @@ -1896,20 +1910,17 @@ static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) * so skip to add additional raw error info. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) - return -EHWPOISON; + return MF_HUGETLB_FOLIO_PRE_POISONED; head = raw_hwp_list_head(folio); llist_for_each_entry(p, head->first, node) { if (p->page == page) - return -EHWPOISON; + return MF_HUGETLB_PAGE_PRE_POISONED; } raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); if (raw_hwp) { raw_hwp->page = page; llist_add(&raw_hwp->node, head); - /* the first error event will be counted in action_result(). */ - if (ret) - num_poisoned_pages_inc(page_to_pfn(page)); } else { /* * Failed to save raw error info. We no longer trace all @@ -1957,42 +1968,39 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio) /* * Called from hugetlb code with hugetlb_lock held. - * - * Return values: - * 0 - free hugepage - * 1 - in-use hugepage - * 2 - not a hugepage - * -EBUSY - the hugepage is busy (try to retry) - * -EHWPOISON - the hugepage is already hwpoisoned */ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) { struct page *page = pfn_to_page(pfn); struct folio *folio = page_folio(page); - int ret = 2; /* fallback to normal page handling */ bool count_increased = false; + int ret, rc; - if (!folio_test_hugetlb(folio)) + if (!folio_test_hugetlb(folio)) { + ret = MF_HUGETLB_NON_HUGEPAGE; goto out; - - if (flags & MF_COUNT_INCREASED) { - ret = 1; + } else if (flags & MF_COUNT_INCREASED) { + ret = MF_HUGETLB_IN_USED; count_increased = true; } else if (folio_test_hugetlb_freed(folio)) { - ret = 0; + ret = MF_HUGETLB_FREED; } else if (folio_test_hugetlb_migratable(folio)) { - ret = folio_try_get(folio); - if (ret) + if (folio_try_get(folio)) { + ret = MF_HUGETLB_IN_USED; count_increased = true; + } else { + ret = MF_HUGETLB_FREED; + } } else { - ret = -EBUSY; + ret = MF_HUGETLB_RETRY; if (!(flags & MF_NO_RETRY)) goto out; } - if (folio_set_hugetlb_hwpoison(folio, page)) { - ret = -EHWPOISON; + rc = hugetlb_update_hwpoison(folio, page); + if (rc >= MF_HUGETLB_FOLIO_PRE_POISONED) { + ret = rc; goto out; } @@ -2017,10 +2025,16 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, * with basic operations like hugepage allocation/free/demotion. * So some of prechecks for hwpoison (pinning, and testing/setting * PageHWPoison) should be done in single hugetlb_lock range. + * Returns: + * 0 - not hugetlb, or recovered + * -EBUSY - not recovered + * -EOPNOTSUPP - hwpoison_filter'ed + * -EHWPOISON - folio or exact page already poisoned + * -EFAULT - kill_accessing_process finds current->mm null */ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { - int res; + int res, rv; struct page *p = pfn_to_page(pfn); struct folio *folio; unsigned long page_flags; @@ -2029,22 +2043,29 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb *hugetlb = 1; retry: res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); - if (res == 2) { /* fallback to normal page handling */ + switch (res) { + case MF_HUGETLB_NON_HUGEPAGE: /* fallback to normal page handling */ *hugetlb = 0; return 0; - } else if (res == -EHWPOISON) { - if (flags & MF_ACTION_REQUIRED) { - folio = page_folio(p); - res = kill_accessing_process(current, folio_pfn(folio), flags); - } - action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); - return res; - } else if (res == -EBUSY) { + case MF_HUGETLB_RETRY: if (!(flags & MF_NO_RETRY)) { flags |= MF_NO_RETRY; goto retry; } return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); + case MF_HUGETLB_FOLIO_PRE_POISONED: + case MF_HUGETLB_PAGE_PRE_POISONED: + rv = -EHWPOISON; + if (flags & MF_ACTION_REQUIRED) + rv = kill_accessing_process(current, pfn, flags); + if (res == MF_HUGETLB_PAGE_PRE_POISONED) + action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); + else + action_result(pfn, MF_MSG_HUGE, MF_FAILED); + return rv; + default: + WARN_ON((res != MF_HUGETLB_FREED) && (res != MF_HUGETLB_IN_USED)); + break; } folio = page_folio(p); @@ -2055,7 +2076,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb if (migratable_cleared) folio_set_hugetlb_migratable(folio); folio_unlock(folio); - if (res == 1) + if (res == MF_HUGETLB_IN_USED) folio_put(folio); return -EOPNOTSUPP; } @@ -2064,7 +2085,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb * Handling free hugepage. The possible race with hugepage allocation * or demotion can be prevented by PageHWPoison flag. */ - if (res == 0) { + if (res == MF_HUGETLB_FREED) { folio_unlock(folio); if (__page_handle_poison(p) > 0) { page_ref_inc(p); @@ -2390,31 +2411,29 @@ int memory_failure(unsigned long pfn, int flags) * In fact it's dangerous to directly bump up page count from 0, * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. */ - if (!(flags & MF_COUNT_INCREASED)) { - res = get_hwpoison_page(p, flags); - if (!res) { - if (is_free_buddy_page(p)) { - if (take_page_off_buddy(p)) { - page_ref_inc(p); - res = MF_RECOVERED; - } else { - /* We lost the race, try again */ - if (retry) { - ClearPageHWPoison(p); - retry = false; - goto try_again; - } - res = MF_FAILED; - } - res = action_result(pfn, MF_MSG_BUDDY, res); + res = get_hwpoison_page(p, flags); + if (!res) { + if (is_free_buddy_page(p)) { + if (take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; } else { - res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); + /* We lost the race, try again */ + if (retry) { + ClearPageHWPoison(p); + retry = false; + goto try_again; + } + res = MF_FAILED; } - goto unlock_mutex; - } else if (res < 0) { - res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); - goto unlock_mutex; + res = action_result(pfn, MF_MSG_BUDDY, res); + } else { + res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); } + goto unlock_mutex; + } else if (res < 0) { + res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); + goto unlock_mutex; } folio = page_folio(p); diff --git a/mm/memremap.c b/mm/memremap.c index 63c6ab4fdf082c..ac7be07e3361ae 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -477,10 +477,43 @@ void free_zone_device_folio(struct folio *folio) } } -void zone_device_page_init(struct page *page, unsigned int order) +void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap, + unsigned int order) { + struct page *new_page = page; + unsigned int i; + VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES); + for (i = 0; i < (1UL << order); ++i, ++new_page) { + struct folio *new_folio = (struct folio *)new_page; + + /* + * new_page could have been part of previous higher order folio + * which encodes the order, in page + 1, in the flags bits. We + * blindly clear bits which could have set my order field here, + * including page head. + */ + new_page->flags.f &= ~0xffUL; /* Clear possible order, page head */ + +#ifdef NR_PAGES_IN_LARGE_FOLIO + /* + * This pointer math looks odd, but new_page could have been + * part of a previous higher order folio, which sets _nr_pages + * in page + 1 (new_page). Therefore, we use pointer casting to + * correctly locate the _nr_pages bits within new_page which + * could have modified by previous higher order folio. + */ + ((struct folio *)(new_page - 1))->_nr_pages = 0; +#endif + + new_folio->mapping = NULL; + new_folio->pgmap = pgmap; /* Also clear compound head */ + new_folio->share = 0; /* fsdax only, unused for device private */ + VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio); + VM_WARN_ON_FOLIO(!folio_is_zone_device(new_folio), new_folio); + } + /* * Drivers shouldn't be allocating pages after calling * memunmap_pages(). diff --git a/mm/mm_init.c b/mm/mm_init.c index fc2a6f1e518f1a..2a809cd8e7fae0 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2059,7 +2059,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone, */ static unsigned long __init deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, - struct zone *zone) + struct zone *zone, bool can_resched) { int nid = zone_to_nid(zone); unsigned long nr_pages = 0; @@ -2085,10 +2085,10 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, spfn = chunk_end; - if (irqs_disabled()) - touch_nmi_watchdog(); - else + if (can_resched) cond_resched(); + else + touch_nmi_watchdog(); } } @@ -2101,7 +2101,7 @@ deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn, { struct zone *zone = arg; - deferred_init_memmap_chunk(start_pfn, end_pfn, zone); + deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true); } static unsigned int __init @@ -2216,7 +2216,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order) for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1); nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone); spfn = epfn, epfn += PAGES_PER_SECTION) { - nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone); + nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false); } /* diff --git a/mm/shmem.c b/mm/shmem.c index ec6c01378e9d2b..79af5f9f8b908d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -962,17 +962,29 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) * being freed). */ static long shmem_free_swap(struct address_space *mapping, - pgoff_t index, void *radswap) + pgoff_t index, pgoff_t end, void *radswap) { - int order = xa_get_order(&mapping->i_pages, index); - void *old; + XA_STATE(xas, &mapping->i_pages, index); + unsigned int nr_pages = 0; + pgoff_t base; + void *entry; - old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); - if (old != radswap) - return 0; - free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order); + xas_lock_irq(&xas); + entry = xas_load(&xas); + if (entry == radswap) { + nr_pages = 1 << xas_get_order(&xas); + base = round_down(xas.xa_index, nr_pages); + if (base < index || base + nr_pages - 1 > end) + nr_pages = 0; + else + xas_store(&xas, NULL); + } + xas_unlock_irq(&xas); - return 1 << order; + if (nr_pages) + free_swap_and_cache_nr(radix_to_swp_entry(radswap), nr_pages); + + return nr_pages; } /* @@ -1124,8 +1136,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, if (xa_is_value(folio)) { if (unfalloc) continue; - nr_swaps_freed += shmem_free_swap(mapping, - indices[i], folio); + nr_swaps_freed += shmem_free_swap(mapping, indices[i], + end - 1, folio); continue; } @@ -1191,14 +1203,30 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, folio = fbatch.folios[i]; if (xa_is_value(folio)) { + int order; long swaps_freed; if (unfalloc) continue; - swaps_freed = shmem_free_swap(mapping, indices[i], folio); + swaps_freed = shmem_free_swap(mapping, indices[i], + end - 1, folio); if (!swaps_freed) { - /* Swap was replaced by page: retry */ - index = indices[i]; + pgoff_t base = indices[i]; + + order = shmem_confirm_swap(mapping, indices[i], + radix_to_swp_entry(folio)); + /* + * If found a large swap entry cross the end or start + * border, skip it as the truncate_inode_partial_folio + * above should have at least zerod its content once. + */ + if (order > 0) { + base = round_down(base, 1 << order); + if (base < start || base + (1 << order) > end) + continue; + } + /* Swap was replaced by page or extended, retry */ + index = base; break; } nr_swaps_freed += swaps_freed; diff --git a/mm/slub.c b/mm/slub.c index f77b7407c51bca..cdc1e652ec52fe 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6689,8 +6689,12 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object, static noinline void memcg_alloc_abort_single(struct kmem_cache *s, void *object) { + struct slab *slab = virt_to_slab(object); + + alloc_tagging_slab_free_hook(s, slab, &object, 1); + if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) - do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); + do_slab_free(s, slab, object, object, 1, _RET_IP_); } #endif diff --git a/mm/swap.h b/mm/swap.h index d034c13d8dd260..1bd466da303939 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -198,7 +198,7 @@ int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug); void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug); /* linux/mm/swap_state.c */ -extern struct address_space swap_space __ro_after_init; +extern struct address_space swap_space __read_mostly; static inline struct address_space *swap_address_space(swp_entry_t entry) { return &swap_space; diff --git a/mm/swap_state.c b/mm/swap_state.c index 5f97c6ae70a2f7..44d228982521ea 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -37,8 +37,7 @@ static const struct address_space_operations swap_aops = { #endif }; -/* Set swap_space as read only as swap cache is handled by swap table */ -struct address_space swap_space __ro_after_init = { +struct address_space swap_space __read_mostly = { .a_ops = &swap_aops, }; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 628f96e83b1187..e286c2d2068cbd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4322,7 +4322,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align if (want_init_on_free() || want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); vm->requested_size = size; - kasan_poison_vmalloc(p + size, old_size - size); + kasan_vrealloc(p, old_size, size); return (void *)p; } @@ -4330,16 +4330,13 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align * We already have the bytes available in the allocation; use them. */ if (size <= alloced_size) { - kasan_unpoison_vmalloc(p + old_size, size - old_size, - KASAN_VMALLOC_PROT_NORMAL | - KASAN_VMALLOC_VM_ALLOC | - KASAN_VMALLOC_KEEP_TAG); /* * No need to zero memory here, as unused memory will have * already been zeroed at initial allocation time or during * realloc shrink time. */ vm->requested_size = size; + kasan_vrealloc(p, old_size, size); return (void *)p; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 5be9b8c919490d..0e46f9e08b1067 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1966,6 +1966,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) } mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); + mgmt_pending_free(cmd); return; } @@ -1984,6 +1985,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) sock_put(match.sk); hci_update_eir_sync(hdev); + mgmt_pending_free(cmd); } static int set_ssp_sync(struct hci_dev *hdev, void *data) @@ -6438,6 +6440,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) hci_dev_clear_flag(hdev, HCI_ADVERTISING); settings_rsp(cmd, &match); + mgmt_pending_free(cmd); new_settings(hdev, match.sk); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e355a15bf5ab13..1405f1061a5493 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -274,7 +274,7 @@ static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb) int ret; net = dev_net(skb->dev); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) goto frame_finish; #endif diff --git a/net/core/filter.c b/net/core/filter.c index 616e0520a0bb81..029e560e32ce3e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2289,12 +2289,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, err = bpf_out_neigh_v6(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) - DEV_STATS_INC(dev, tx_errors); + dev_core_stats_tx_dropped_inc(dev); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: - DEV_STATS_INC(dev, tx_errors); + dev_core_stats_tx_dropped_inc(dev); kfree_skb(skb); out_xmit: return ret; @@ -2396,12 +2396,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, err = bpf_out_neigh_v4(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) - DEV_STATS_INC(dev, tx_errors); + dev_core_stats_tx_dropped_inc(dev); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: - DEV_STATS_INC(dev, tx_errors); + dev_core_stats_tx_dropped_inc(dev); kfree_skb(skb); out_xmit: return ret; @@ -3353,6 +3353,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } + shinfo->gso_type |= SKB_GSO_DODGY; } bpf_skb_change_protocol(skb, ETH_P_IPV6); @@ -3383,6 +3384,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } + shinfo->gso_type |= SKB_GSO_DODGY; } bpf_skb_change_protocol(skb, ETH_P_IP); diff --git a/net/core/gro.c b/net/core/gro.c index 76f9c371242210..482fa7d7f5981a 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -265,6 +265,8 @@ static void gro_complete(struct gro_node *gro, struct sk_buff *skb) goto out; } + /* NICs can feed encapsulated packets into GRO */ + skb->encapsulation = 0; rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 212cde35affa7b..25c455c10a01cf 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -185,10 +185,6 @@ static void linkwatch_do_dev(struct net_device *dev) netif_state_change(dev); } - /* Note: our callers are responsible for calling netdev_tracker_free(). - * This is the reason we use __dev_put() instead of dev_put(). - */ - __dev_put(dev); } static void __linkwatch_run_queue(int urgent_only) @@ -243,6 +239,11 @@ static void __linkwatch_run_queue(int urgent_only) netdev_lock_ops(dev); linkwatch_do_dev(dev); netdev_unlock_ops(dev); + /* Use __dev_put() because netdev_tracker_free() was already + * called above. Must be after netdev_unlock_ops() to prevent + * netdev_run_todo() from freeing the device while still in use. + */ + __dev_put(dev); do_dev--; spin_lock_irq(&lweventlist_lock); } @@ -278,8 +279,13 @@ void __linkwatch_sync_dev(struct net_device *dev) { netdev_ops_assert_locked(dev); - if (linkwatch_clean_dev(dev)) + if (linkwatch_clean_dev(dev)) { linkwatch_do_dev(dev); + /* Use __dev_put() because netdev_tracker_free() was already + * called inside linkwatch_clean_dev(). + */ + __dev_put(dev); + } } void linkwatch_sync_dev(struct net_device *dev) @@ -288,6 +294,10 @@ void linkwatch_sync_dev(struct net_device *dev) netdev_lock_ops(dev); linkwatch_do_dev(dev); netdev_unlock_ops(dev); + /* Use __dev_put() because netdev_tracker_free() was already + * called inside linkwatch_clean_dev(). + */ + __dev_put(dev); } } diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 70e0e9a3b650c0..7dbfa6109f0b87 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -170,8 +170,14 @@ static const struct seq_operations softnet_seq_ops = { .show = softnet_seq_show, }; +struct ptype_iter_state { + struct seq_net_private p; + struct net_device *dev; +}; + static void *ptype_get_idx(struct seq_file *seq, loff_t pos) { + struct ptype_iter_state *iter = seq->private; struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; struct net_device *dev; @@ -181,12 +187,16 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos) for_each_netdev_rcu(seq_file_net(seq), dev) { ptype_list = &dev->ptype_all; list_for_each_entry_rcu(pt, ptype_list, list) { - if (i == pos) + if (i == pos) { + iter->dev = dev; return pt; + } ++i; } } + iter->dev = NULL; + list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { if (i == pos) return pt; @@ -218,6 +228,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct ptype_iter_state *iter = seq->private; struct net *net = seq_file_net(seq); struct net_device *dev; struct packet_type *pt; @@ -229,19 +240,21 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) return ptype_get_idx(seq, 0); pt = v; - nxt = pt->list.next; - if (pt->dev) { - if (nxt != &pt->dev->ptype_all) + nxt = READ_ONCE(pt->list.next); + dev = iter->dev; + if (dev) { + if (nxt != &dev->ptype_all) goto found; - dev = pt->dev; for_each_netdev_continue_rcu(seq_file_net(seq), dev) { - if (!list_empty(&dev->ptype_all)) { - nxt = dev->ptype_all.next; + nxt = READ_ONCE(dev->ptype_all.next); + if (nxt != &dev->ptype_all) { + iter->dev = dev; goto found; } } - nxt = net->ptype_all.next; + iter->dev = NULL; + nxt = READ_ONCE(net->ptype_all.next); goto net_ptype_all; } @@ -252,20 +265,20 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (nxt == &net->ptype_all) { /* continue with ->ptype_specific if it's not empty */ - nxt = net->ptype_specific.next; + nxt = READ_ONCE(net->ptype_specific.next); if (nxt != &net->ptype_specific) goto found; } hash = 0; - nxt = ptype_base[0].next; + nxt = READ_ONCE(ptype_base[0].next); } else hash = ntohs(pt->type) & PTYPE_HASH_MASK; while (nxt == &ptype_base[hash]) { if (++hash >= PTYPE_HASH_SIZE) return NULL; - nxt = ptype_base[hash].next; + nxt = READ_ONCE(ptype_base[hash].next); } found: return list_entry(nxt, struct packet_type, list); @@ -279,19 +292,24 @@ static void ptype_seq_stop(struct seq_file *seq, void *v) static int ptype_seq_show(struct seq_file *seq, void *v) { + struct ptype_iter_state *iter = seq->private; struct packet_type *pt = v; + struct net_device *dev; - if (v == SEQ_START_TOKEN) + if (v == SEQ_START_TOKEN) { seq_puts(seq, "Type Device Function\n"); - else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && - (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { + return 0; + } + dev = iter->dev; + if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && + (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else seq_printf(seq, "%04x", ntohs(pt->type)); seq_printf(seq, " %-8s %ps\n", - pt->dev ? pt->dev->name : "", pt->func); + dev ? dev->name : "", pt->func); } return 0; @@ -315,7 +333,7 @@ static int __net_init dev_proc_net_init(struct net *net) &softnet_seq_ops)) goto out_dev; if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, - sizeof(struct seq_net_private))) + sizeof(struct ptype_iter_state))) goto out_softnet; if (wext_proc_init(net)) diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 369c05cf8163c1..d47a279eb8b999 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -862,9 +862,6 @@ ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, ctx->key_off = key_off; ctx->priv_size = ops->rxfh_priv_size; - ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; - ctx->input_xfrm = RXH_XFRM_NO_CHANGE; - return ctx; } diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c index 4dced53be4b3bb..da5934cceb0757 100644 --- a/net/ethtool/rss.c +++ b/net/ethtool/rss.c @@ -824,8 +824,8 @@ rss_set_ctx_update(struct ethtool_rxfh_context *ctx, struct nlattr **tb, static int ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) { - bool indir_reset = false, indir_mod, xfrm_sym = false; struct rss_req_info *request = RSS_REQINFO(req_info); + bool indir_reset = false, indir_mod, xfrm_sym; struct ethtool_rxfh_context *ctx = NULL; struct net_device *dev = req_info->dev; bool mod = false, fields_mod = false; @@ -860,12 +860,7 @@ ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) rxfh.input_xfrm = data.input_xfrm; ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); - /* For drivers which don't support input_xfrm it will be set to 0xff - * in the RSS context info. In all other case input_xfrm != 0 means - * symmetric hashing is requested. - */ - if (!request->rss_context || ops->rxfh_per_ctx_key) - xfrm_sym = rxfh.input_xfrm || data.input_xfrm; + xfrm_sym = rxfh.input_xfrm || data.input_xfrm; if (rxfh.input_xfrm == data.input_xfrm) rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index fdda18b1abda0f..942a948f1a3109 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -107,7 +107,8 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { struct tcphdr *th = tcp_hdr(skb); - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) return __tcp4_gso_segment_list(skb, features); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 19d0b5b09ffae6..589456bd8b5f1b 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -514,7 +514,8 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) { /* Detect modified geometry and pass those to skb_segment. */ - if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) + if ((skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) && + !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_DODGY)) return __udp_gso_segment_list(gso_skb, features, is_ipv6); ret = __skb_linearize(gso_skb); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5d2f90babaa5f1..9d37e7711bc2b8 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -965,7 +965,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; - fl6.flowi6_oif = icmp6_iif(skb); + fl6.flowi6_oif = ipv6_addr_loopback(&fl6.daddr) ? + skb->dev->ifindex : + icmp6_iif(skb); fl6.fl6_icmp_type = type; fl6.flowi6_mark = mark; fl6.flowi6_uid = sock_net_uid(net, NULL); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 2111af022d946d..c6439e30e892af 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1138,7 +1138,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, fib6_set_expires(iter, rt->expires); fib6_add_gc_list(iter); } - if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT))) { + if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT)) && + !iter->fib6_nh->fib_nh_gw_family) { iter->fib6_flags &= ~RTF_ADDRCONF; iter->fib6_flags &= ~RTF_PREFIX_RT; } diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index effeba58630b5a..5670d32c27f80e 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -170,7 +170,8 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { struct tcphdr *th = tcp_hdr(skb); - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) return __tcp6_gso_segment_list(skb, features); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b72345c779c0f3..73f57b9e0ebf76 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2025 Intel Corporation + * Copyright (C) 2018 - 2026 Intel Corporation */ #include @@ -6190,8 +6190,10 @@ ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata, return -EINVAL; } - link_map_presence = *pos; - pos++; + if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) { + link_map_presence = *pos; + pos++; + } if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) { ttlm_info->switch_time = get_unaligned_le16(pos); diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c index 57570a44e41853..b26675054b0dc7 100644 --- a/net/mptcp/pm_kernel.c +++ b/net/mptcp/pm_kernel.c @@ -1294,16 +1294,26 @@ static void __reset_counters(struct pm_nl_pernet *pernet) int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) { struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - LIST_HEAD(free_list); + struct list_head free_list; spin_lock_bh(&pernet->lock); - list_splice_init(&pernet->endp_list, &free_list); + free_list = pernet->endp_list; + INIT_LIST_HEAD_RCU(&pernet->endp_list); __reset_counters(pernet); pernet->next_id = 1; bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); spin_unlock_bh(&pernet->lock); - mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); + + if (free_list.next == &pernet->endp_list) + return 0; + synchronize_rcu(); + + /* Adjust the pointers to free_list instead of pernet->endp_list */ + free_list.prev->next = &free_list; + free_list.next->prev = &free_list; + + mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); __flush_addrs(&free_list); return 0; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f505b780f7139b..8d323366741810 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -821,11 +821,8 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) { - int err = sock_error(ssk); int ssk_state; - - if (!err) - return false; + int err; /* only propagate errors on fallen-back sockets or * on MPC connect @@ -833,6 +830,10 @@ static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) return false; + err = sock_error(ssk); + if (!err) + return false; + /* We need to propagate only transition to CLOSE state. * Orphaned socket will see such state change via * subflow_sched_work_if_closed() and that path will properly @@ -2598,8 +2599,8 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb; - /* The first subflow can already be closed and still in the list */ - if (subflow->close_event_done) + /* The first subflow can already be closed or disconnected */ + if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0) return; subflow->close_event_done = true; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 729a92781a1a47..be92750e2af3ad 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5914,7 +5914,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx, list_for_each_entry(catchall, &set->catchall_list, list) { ext = nft_set_elem_ext(set, catchall->elem); - if (!nft_set_elem_active(ext, genmask)) + if (nft_set_elem_active(ext, genmask)) continue; nft_clear(ctx->net, ext); diff --git a/net/nfc/core.c b/net/nfc/core.c index 82f023f377541b..f50e5bab35d8ec 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1147,14 +1147,14 @@ int nfc_register_device(struct nfc_dev *dev) EXPORT_SYMBOL(nfc_register_device); /** - * nfc_unregister_device - unregister a nfc device in the nfc subsystem + * nfc_unregister_rfkill - unregister a nfc device in the rfkill subsystem * * @dev: The nfc device to unregister */ -void nfc_unregister_device(struct nfc_dev *dev) +void nfc_unregister_rfkill(struct nfc_dev *dev) { - int rc; struct rfkill *rfk = NULL; + int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); @@ -1175,7 +1175,16 @@ void nfc_unregister_device(struct nfc_dev *dev) rfkill_unregister(rfk); rfkill_destroy(rfk); } +} +EXPORT_SYMBOL(nfc_unregister_rfkill); +/** + * nfc_remove_device - remove a nfc device in the nfc subsystem + * + * @dev: The nfc device to remove + */ +void nfc_remove_device(struct nfc_dev *dev) +{ if (dev->ops->check_presence) { timer_delete_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); @@ -1188,6 +1197,18 @@ void nfc_unregister_device(struct nfc_dev *dev) device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); } +EXPORT_SYMBOL(nfc_remove_device); + +/** + * nfc_unregister_device - unregister a nfc device in the nfc subsystem + * + * @dev: The nfc device to unregister + */ +void nfc_unregister_device(struct nfc_dev *dev) +{ + nfc_unregister_rfkill(dev); + nfc_remove_device(dev); +} EXPORT_SYMBOL(nfc_unregister_device); static int __init nfc_init(void) diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index e2680a3bef7995..b652323bc2c12b 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -778,8 +778,23 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); + spin_lock(&local->tx_queue.lock); + + if (list_empty(&local->list)) { + spin_unlock(&local->tx_queue.lock); + + kfree_skb(pdu); + + len -= remaining_len; + if (len == 0) + len = -ENXIO; + break; + } + /* No need to check for the peer RW for UI frames */ - skb_queue_tail(&local->tx_queue, pdu); + __skb_queue_tail(&local->tx_queue, pdu); + + spin_unlock(&local->tx_queue.lock); remaining_len -= frag_len; msg_ptr += frag_len; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index beeb3b4d28caba..444a3774c8e806 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -316,7 +316,9 @@ static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev) spin_lock(&llcp_devices_lock); list_for_each_entry_safe(local, tmp, &llcp_devices, list) if (local->dev == dev) { - list_del(&local->list); + spin_lock(&local->tx_queue.lock); + list_del_init(&local->list); + spin_unlock(&local->tx_queue.lock); spin_unlock(&llcp_devices_lock); return local; } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index fc921cd2cdffa5..e419e020a70a33 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1303,6 +1303,8 @@ void nci_unregister_device(struct nci_dev *ndev) { struct nci_conn_info *conn_info, *n; + nfc_unregister_rfkill(ndev->nfc_dev); + /* This set_bit is not protected with specialized barrier, * However, it is fine because the mutex_lock(&ndev->req_lock); * in nci_close_device() will help to emit one. @@ -1320,7 +1322,7 @@ void nci_unregister_device(struct nci_dev *ndev) /* conn_info is allocated with devm_kzalloc */ } - nfc_unregister_device(ndev->nfc_dev); + nfc_remove_device(ndev->nfc_dev); } EXPORT_SYMBOL(nci_unregister_device); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 2a1c00048fd6f4..58e849c0acf412 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -161,10 +161,8 @@ TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb, int toff = off + key->off + (off2 & key->offmask); __be32 *data, hdata; - if (skb_headroom(skb) + toff > INT_MAX) - goto out; - - data = skb_header_pointer(skb, toff, 4, &hdata); + data = skb_header_pointer_careful(skb, toff, 4, + &hdata); if (!data) goto out; if ((*data ^ key->val) & key->mask) { @@ -214,8 +212,9 @@ TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb, if (ht->divisor) { __be32 *data, hdata; - data = skb_header_pointer(skb, off + n->sel.hoff, 4, - &hdata); + data = skb_header_pointer_careful(skb, + off + n->sel.hoff, + 4, &hdata); if (!data) goto out; sel = ht->divisor & u32_hash_fold(*data, &n->sel, @@ -229,7 +228,7 @@ TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb, if (n->sel.flags & TC_U32_VAROFFSET) { __be16 *data, hdata; - data = skb_header_pointer(skb, + data = skb_header_pointer_careful(skb, off + n->sel.offoff, 2, &hdata); if (!data) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 751904f10aab00..970db62bd029b2 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -1219,7 +1219,7 @@ void tipc_crypto_key_flush(struct tipc_crypto *c) rx = c; tx = tipc_net(rx->net)->crypto_tx; if (cancel_delayed_work(&rx->work)) { - kfree(rx->skey); + kfree_sensitive(rx->skey); rx->skey = NULL; atomic_xchg(&rx->key_distr, 0); tipc_node_put(rx->node); @@ -2394,7 +2394,7 @@ static void tipc_crypto_work_rx(struct work_struct *work) break; default: synchronize_rcu(); - kfree(rx->skey); + kfree_sensitive(rx->skey); rx->skey = NULL; break; } diff --git a/rust/Makefile b/rust/Makefile index 5d357dce1704d1..4dcc2eff51cb26 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -383,6 +383,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -fno-inline-functions-called-once -fsanitize=bounds-strict \ -fstrict-flex-arrays=% -fmin-function-alignment=% \ -fzero-init-padding-bits=% -mno-fdpic \ + -fdiagnostics-show-context -fdiagnostics-show-context=% \ --param=% --param asan-% -fno-isolate-erroneous-paths-dereference # Derived from `scripts/Makefile.clang`. diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs index 553d5026588329..2daead125626ae 100644 --- a/rust/kernel/bits.rs +++ b/rust/kernel/bits.rs @@ -27,7 +27,8 @@ macro_rules! impl_bit_fn { /// /// This version is the default and should be used if `n` is known at /// compile time. - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub const fn [](n: u32) -> $ty { build_assert!(n < <$ty>::BITS); (1 as $ty) << n @@ -75,7 +76,8 @@ macro_rules! impl_genmask_fn { /// This version is the default and should be used if the range is known /// at compile time. $(#[$genmask_ex])* - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub const fn [](range: RangeInclusive) -> $ty { let start = *range.start(); let end = *range.end(); diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs index 84d634201d90a7..1e8725eb44ed7d 100644 --- a/rust/kernel/fmt.rs +++ b/rust/kernel/fmt.rs @@ -6,7 +6,7 @@ pub use core::fmt::{Arguments, Debug, Error, Formatter, Result, Write}; -/// Internal adapter used to route allow implementations of formatting traits for foreign types. +/// Internal adapter used to route and allow implementations of formatting traits for foreign types. /// /// It is inserted automatically by the [`fmt!`] macro and is not meant to be used directly. /// diff --git a/rust/kernel/num/bounded.rs b/rust/kernel/num/bounded.rs index f870080af8ac0a..fa81acbdc8c258 100644 --- a/rust/kernel/num/bounded.rs +++ b/rust/kernel/num/bounded.rs @@ -40,11 +40,11 @@ fn fits_within(value: T, num_bits: u32) -> bool { fits_within!(value, T, num_bits) } -/// An integer value that requires only the `N` less significant bits of the wrapped type to be +/// An integer value that requires only the `N` least significant bits of the wrapped type to be /// encoded. /// /// This limits the number of usable bits in the wrapped integer type, and thus the stored value to -/// a narrower range, which provides guarantees that can be useful when working with in e.g. +/// a narrower range, which provides guarantees that can be useful when working within e.g. /// bitfields. /// /// # Invariants @@ -56,7 +56,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// # Examples /// /// The preferred way to create values is through constants and the [`Bounded::new`] family of -/// constructors, as they trigger a build error if the type invariants cannot be withheld. +/// constructors, as they trigger a build error if the type invariants cannot be upheld. /// /// ``` /// use kernel::num::Bounded; @@ -82,7 +82,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// ``` /// use kernel::num::Bounded; /// -/// // This succeeds because `15` can be represented with 4 unsigned bits. +/// // This succeeds because `15` can be represented with 4 unsigned bits. /// assert!(Bounded::::try_new(15).is_some()); /// /// // This fails because `16` cannot be represented with 4 unsigned bits. @@ -221,7 +221,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v.as_deref().copied(), Some(128)); /// -/// // Fails because `128` doesn't fits into 6 bits. +/// // Fails because `128` doesn't fit into 6 bits. /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v, None); /// ``` @@ -259,9 +259,9 @@ macro_rules! impl_const_new { assert!(fits_within!(VALUE, $type, N)); } - // INVARIANT: `fits_within` confirmed that `VALUE` can be represented within + // SAFETY: `fits_within` confirmed that `VALUE` can be represented within // `N` bits. - Self::__new(VALUE) + unsafe { Self::__new(VALUE) } } } )* @@ -282,9 +282,10 @@ where /// All instances of [`Bounded`] must be created through this method as it enforces most of the /// type invariants. /// - /// The caller remains responsible for checking, either statically or dynamically, that `value` - /// can be represented as a `T` using at most `N` bits. - const fn __new(value: T) -> Self { + /// # Safety + /// + /// The caller must ensure that `value` can be represented within `N` bits. + const unsafe fn __new(value: T) -> Self { // Enforce the type invariants. const { // `N` cannot be zero. @@ -293,6 +294,7 @@ where assert!(N <= T::BITS); } + // INVARIANT: The caller ensures `value` fits within `N` bits. Self(value) } @@ -328,8 +330,8 @@ where /// ``` pub fn try_new(value: T) -> Option { fits_within(value, N).then(|| { - // INVARIANT: `fits_within` confirmed that `value` can be represented within `N` bits. - Self::__new(value) + // SAFETY: `fits_within` confirmed that `value` can be represented within `N` bits. + unsafe { Self::__new(value) } }) } @@ -363,6 +365,7 @@ where /// assert_eq!(Bounded::::from_expr(1).get(), 1); /// assert_eq!(Bounded::::from_expr(0xff).get(), 0xff); /// ``` + // Always inline to optimize out error path of `build_assert`. #[inline(always)] pub fn from_expr(expr: T) -> Self { crate::build_assert!( @@ -370,8 +373,8 @@ where "Requested value larger than maximal representable value." ); - // INVARIANT: `fits_within` confirmed that `expr` can be represented within `N` bits. - Self::__new(expr) + // SAFETY: `fits_within` confirmed that `expr` can be represented within `N` bits. + unsafe { Self::__new(expr) } } /// Returns the wrapped value as the backing type. @@ -410,9 +413,9 @@ where ); } - // INVARIANT: The value did fit within `N` bits, so it will all the more fit within + // SAFETY: The value did fit within `N` bits, so it will all the more fit within // the larger `M` bits. - Bounded::__new(self.0) + unsafe { Bounded::__new(self.0) } } /// Attempts to shrink the number of bits usable for `self`. @@ -466,9 +469,9 @@ where // `U` and `T` have the same sign, hence this conversion cannot fail. let value = unsafe { U::try_from(self.get()).unwrap_unchecked() }; - // INVARIANT: Although the backing type has changed, the value is still represented within + // SAFETY: Although the backing type has changed, the value is still represented within // `N` bits, and with the same signedness. - Bounded::__new(value) + unsafe { Bounded::__new(value) } } } @@ -501,7 +504,7 @@ where /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v.as_deref().copied(), Some(128)); /// -/// // Fails because `128` doesn't fits into 6 bits. +/// // Fails because `128` doesn't fit into 6 bits. /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v, None); /// ``` @@ -944,9 +947,9 @@ macro_rules! impl_from_primitive { Self: AtLeastXBits<{ <$type as Integer>::BITS as usize }>, { fn from(value: $type) -> Self { - // INVARIANT: The trait bound on `Self` guarantees that `N` bits is + // SAFETY: The trait bound on `Self` guarantees that `N` bits is // enough to hold any value of the source type. - Self::__new(T::from(value)) + unsafe { Self::__new(T::from(value)) } } } )* @@ -1051,8 +1054,8 @@ where T: Integer + From, { fn from(value: bool) -> Self { - // INVARIANT: A boolean can be represented using a single bit, and thus fits within any + // SAFETY: A boolean can be represented using a single bit, and thus fits within any // integer type for any `N` > 0. - Self::__new(T::from(value)) + unsafe { Self::__new(T::from(value)) } } } diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs index 4729eb56827a12..312cecab72e780 100644 --- a/rust/kernel/rbtree.rs +++ b/rust/kernel/rbtree.rs @@ -985,7 +985,7 @@ impl<'a, K, V> CursorMut<'a, K, V> { self.peek(Direction::Prev) } - /// Access the previous node without moving the cursor. + /// Access the next node without moving the cursor. pub fn peek_next(&self) -> Option<(&K, &V)> { self.peek(Direction::Next) } @@ -1130,7 +1130,7 @@ pub struct IterMut<'a, K, V> { } // SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`. -// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same +// The iterator only gives out immutable references to the keys, but since the iterator has exclusive access to those same // keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user. unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {} diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs index 45a17985cda45e..0fca1ba3c2dbd1 100644 --- a/rust/kernel/sync/atomic/predefine.rs +++ b/rust/kernel/sync/atomic/predefine.rs @@ -35,12 +35,23 @@ unsafe impl super::AtomicAdd for i64 { // as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to // `isize_atomic_repr`, which also always implements `AtomicImpl`. #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(not(CONFIG_64BIT))] type isize_atomic_repr = i32; #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(CONFIG_64BIT)] type isize_atomic_repr = i64; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "32")] +type isize_atomic_repr = i32; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "64")] +type isize_atomic_repr = i64; + // Ensure size and alignment requirements are checked. static_assert!(size_of::() == size_of::()); static_assert!(align_of::() == align_of::()); diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs index 19236a5bccdeb0..6c7ae8b05a0b85 100644 --- a/rust/kernel/sync/refcount.rs +++ b/rust/kernel/sync/refcount.rs @@ -23,7 +23,8 @@ impl Refcount { /// Construct a new [`Refcount`] from an initial value. /// /// The initial value should be non-saturated. - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub fn new(value: i32) -> Self { build_assert!(value >= 0, "initial value saturated"); // SAFETY: There are no safety requirements for this FFI call. diff --git a/rust/macros/fmt.rs b/rust/macros/fmt.rs index 2f4b9f6e221104..8354abd5450222 100644 --- a/rust/macros/fmt.rs +++ b/rust/macros/fmt.rs @@ -67,7 +67,7 @@ pub(crate) fn fmt(input: TokenStream) -> TokenStream { } (None, acc) })(); - args.extend(quote_spanned!(first_span => #lhs #adapter(&#rhs))); + args.extend(quote_spanned!(first_span => #lhs #adapter(&(#rhs)))); } }; diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs index b38002151871a3..33f66e86418ab8 100644 --- a/rust/macros/lib.rs +++ b/rust/macros/lib.rs @@ -59,7 +59,7 @@ use proc_macro::TokenStream; /// /// # Examples /// -/// ``` +/// ```ignore /// use kernel::prelude::*; /// /// module!{ diff --git a/rust/proc-macro2/lib.rs b/rust/proc-macro2/lib.rs index 7b78d065d51ce8..5d408943fa0d4e 100644 --- a/rust/proc-macro2/lib.rs +++ b/rust/proc-macro2/lib.rs @@ -1,5 +1,9 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT` +// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is +// touched by Kconfig when the version string from the compiler changes. + //! [![github]](https://github.com/dtolnay/proc-macro2) [![crates-io]](https://crates.io/crates/proc-macro2) [![docs-rs]](crate) //! //! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 5037f4715d7491..32e209bc7985cb 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -166,11 +166,13 @@ else ifeq ($(KBUILD_CHECKSRC),2) cmd_force_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $< endif +ifeq ($(KBUILD_EXTMOD),) ifneq ($(KBUILD_EXTRA_WARN),) cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(KDOCFLAGS) \ $(if $(findstring 2, $(KBUILD_EXTRA_WARN)), -Wall) \ $< endif +endif # Compile C sources (.c) # --------------------------------------------------------------------------- @@ -356,7 +358,7 @@ $(obj)/%.o: $(obj)/%.rs FORCE quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ cmd_rustc_rsi_rs = \ $(rust_common_cmd) -Zunpretty=expanded $< >$@; \ - command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@ + command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) --config-path $(srctree)/.rustfmt.toml $@ $(obj)/%.rsi: $(obj)/%.rs FORCE +$(call if_changed_dep,rustc_rsi_rs) diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux index cd788cac9d91da..276c3134a563ad 100644 --- a/scripts/Makefile.vmlinux +++ b/scripts/Makefile.vmlinux @@ -113,7 +113,8 @@ vmlinux: vmlinux.unstripped FORCE # what kmod expects to parse. quiet_cmd_modules_builtin_modinfo = GEN $@ cmd_modules_builtin_modinfo = $(cmd_objcopy); \ - sed -i 's/\x00\+$$/\x00/g' $@ + sed -i 's/\x00\+$$/\x00/g' $@; \ + chmod -x $@ OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index 147d0cc9406814..766c2d91cd8111 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -61,7 +61,6 @@ def append_sysroot_crate( display_name, deps, cfg=[], - edition="2021", ): append_crate( display_name, @@ -69,13 +68,37 @@ def append_sysroot_crate( deps, cfg, is_workspace_member=False, - edition=edition, + # Miguel Ojeda writes: + # + # > ... in principle even the sysroot crates may have different + # > editions. + # > + # > For instance, in the move to 2024, it seems all happened at once + # > in 1.87.0 in these upstream commits: + # > + # > 0e071c2c6a58 ("Migrate core to Rust 2024") + # > f505d4e8e380 ("Migrate alloc to Rust 2024") + # > 0b2489c226c3 ("Migrate proc_macro to Rust 2024") + # > 993359e70112 ("Migrate std to Rust 2024") + # > + # > But in the previous move to 2021, `std` moved in 1.59.0, while + # > the others in 1.60.0: + # > + # > b656384d8398 ("Update stdlib to the 2021 edition") + # > 06a1c14d52a8 ("Switch all libraries to the 2021 edition") + # + # Link: https://lore.kernel.org/all/CANiq72kd9bHdKaAm=8xCUhSHMy2csyVed69bOc4dXyFAW4sfuw@mail.gmail.com/ + # + # At the time of writing all rust versions we support build the + # sysroot crates with the same edition. We may need to relax this + # assumption if future edition moves span multiple rust versions. + edition=core_edition, ) # NB: sysroot crates reexport items from one another so setting up our transitive dependencies # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. - append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []), edition=core_edition) + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) append_sysroot_crate("alloc", ["core"]) append_sysroot_crate("std", ["alloc", "core"]) append_sysroot_crate("proc_macro", ["core", "std"]) @@ -83,7 +106,7 @@ def append_sysroot_crate( append_crate( "compiler_builtins", srctree / "rust" / "compiler_builtins.rs", - [], + ["core"], ) append_crate( @@ -96,14 +119,15 @@ def append_sysroot_crate( append_crate( "quote", srctree / "rust" / "quote" / "lib.rs", - ["alloc", "proc_macro", "proc_macro2"], + ["core", "alloc", "std", "proc_macro", "proc_macro2"], cfg=crates_cfgs["quote"], + edition="2018", ) append_crate( "syn", srctree / "rust" / "syn" / "lib.rs", - ["proc_macro", "proc_macro2", "quote"], + ["std", "proc_macro", "proc_macro2", "quote"], cfg=crates_cfgs["syn"], ) @@ -123,7 +147,7 @@ def append_sysroot_crate( append_crate( "pin_init_internal", srctree / "rust" / "pin-init" / "internal" / "src" / "lib.rs", - [], + ["std", "proc_macro"], cfg=["kernel"], is_proc_macro=True, ) @@ -131,7 +155,7 @@ def append_sysroot_crate( append_crate( "pin_init", srctree / "rust" / "pin-init" / "src" / "lib.rs", - ["core", "pin_init_internal", "macros"], + ["core", "compiler_builtins", "pin_init_internal", "macros"], cfg=["kernel"], ) @@ -190,7 +214,7 @@ def is_root_crate(build_file, target): append_crate( name, path, - ["core", "kernel"], + ["core", "kernel", "pin_init"], cfg=cfg, ) @@ -213,9 +237,6 @@ def main(): level=logging.INFO if args.verbose else logging.WARNING ) - # Making sure that the `sysroot` and `sysroot_src` belong to the same toolchain. - assert args.sysroot in args.sysroot_src.parents - rust_project = { "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs, args.core_edition), "sysroot": str(args.sysroot), diff --git a/scripts/livepatch/init.c b/scripts/livepatch/init.c index 2274d8f5a4826c..638c95cffe76de 100644 --- a/scripts/livepatch/init.c +++ b/scripts/livepatch/init.c @@ -9,19 +9,19 @@ #include #include -extern struct klp_object_ext __start_klp_objects[]; -extern struct klp_object_ext __stop_klp_objects[]; - static struct klp_patch *patch; static int __init livepatch_mod_init(void) { + struct klp_object_ext *obj_exts; + size_t obj_exts_sec_size; struct klp_object *objs; unsigned int nr_objs; int ret; - nr_objs = __stop_klp_objects - __start_klp_objects; - + obj_exts = klp_find_section_by_name(THIS_MODULE, ".init.klp_objects", + &obj_exts_sec_size); + nr_objs = obj_exts_sec_size / sizeof(*obj_exts); if (!nr_objs) { pr_err("nothing to patch!\n"); ret = -EINVAL; @@ -41,7 +41,7 @@ static int __init livepatch_mod_init(void) } for (int i = 0; i < nr_objs; i++) { - struct klp_object_ext *obj_ext = __start_klp_objects + i; + struct klp_object_ext *obj_ext = obj_exts + i; struct klp_func_ext *funcs_ext = obj_ext->funcs; unsigned int nr_funcs = obj_ext->nr_funcs; struct klp_func *funcs = objs[i].funcs; @@ -90,12 +90,10 @@ static int __init livepatch_mod_init(void) static void __exit livepatch_mod_exit(void) { - unsigned int nr_objs; - - nr_objs = __stop_klp_objects - __start_klp_objects; + struct klp_object *obj; - for (int i = 0; i < nr_objs; i++) - kfree(patch->objs[i].funcs); + klp_for_each_object_static(patch, obj) + kfree(obj->funcs); kfree(patch->objs); kfree(patch); diff --git a/scripts/livepatch/klp-build b/scripts/livepatch/klp-build index 882272120c9ef1..809e198a561d54 100755 --- a/scripts/livepatch/klp-build +++ b/scripts/livepatch/klp-build @@ -249,6 +249,10 @@ validate_config() { [[ -v CONFIG_GCC_PLUGIN_RANDSTRUCT ]] && \ die "kernel option 'CONFIG_GCC_PLUGIN_RANDSTRUCT' not supported" + [[ -v CONFIG_AS_IS_LLVM ]] && \ + [[ "$CONFIG_AS_VERSION" -lt 200000 ]] && \ + die "Clang assembler version < 20 not supported" + return 0 } @@ -555,13 +559,11 @@ copy_orig_objects() { local file_dir="$(dirname "$file")" local orig_file="$ORIG_DIR/$rel_file" local orig_dir="$(dirname "$orig_file")" - local cmd_file="$file_dir/.$(basename "$file").cmd" [[ ! -f "$file" ]] && die "missing $(basename "$file") for $_file" mkdir -p "$orig_dir" cp -f "$file" "$orig_dir" - [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$orig_dir" done xtrace_restore @@ -740,15 +742,17 @@ build_patch_module() { local orig_dir="$(dirname "$orig_file")" local kmod_file="$KMOD_DIR/$rel_file" local kmod_dir="$(dirname "$kmod_file")" - local cmd_file="$orig_dir/.$(basename "$file").cmd" + local cmd_file="$kmod_dir/.$(basename "$file").cmd" mkdir -p "$kmod_dir" cp -f "$file" "$kmod_dir" - [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$kmod_dir" # Tell kbuild this is a prebuilt object cp -f "$file" "${kmod_file}_shipped" + # Make modpost happy + touch "$cmd_file" + echo -n " $rel_file" >> "$makefile" done diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 3037d5e5527c07..054ef99e828867 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -34,13 +34,8 @@ SECTIONS { __patchable_function_entries : { *(__patchable_function_entries) } - __klp_funcs 0: ALIGN(8) { KEEP(*(__klp_funcs)) } - - __klp_objects 0: ALIGN(8) { - __start_klp_objects = .; - KEEP(*(__klp_objects)) - __stop_klp_objects = .; - } + .init.klp_funcs 0 : ALIGN(8) { KEEP(*(.init.klp_funcs)) } + .init.klp_objects 0 : ALIGN(8) { KEEP(*(.init.klp_objects)) } #ifdef CONFIG_ARCH_USES_CFI_TRAPS __kcfi_traps : { KEEP(*(.kcfi_traps)) } diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec index 98f206cb7c6079..0f1c8de1bd95f8 100644 --- a/scripts/package/kernel.spec +++ b/scripts/package/kernel.spec @@ -2,6 +2,8 @@ %{!?_arch: %define _arch dummy} %{!?make: %define make make} %define makeflags %{?_smp_mflags} ARCH=%{ARCH} +%define __spec_install_post /usr/lib/rpm/brp-compress || : +%define debug_package %{nil} Name: kernel Summary: The Linux Kernel @@ -46,34 +48,12 @@ against the %{version} kernel package. %endif %if %{with_debuginfo} -# list of debuginfo-related options taken from distribution kernel.spec -# files -%undefine _include_minidebuginfo -%undefine _find_debuginfo_dwz_opts -%undefine _unique_build_ids -%undefine _unique_debug_names -%undefine _unique_debug_srcs -%undefine _debugsource_packages -%undefine _debuginfo_subpackages -%global _find_debuginfo_opts -r -%global _missing_build_ids_terminate_build 1 -%global _no_recompute_build_ids 1 -%{debug_package} +%package debuginfo +Summary: Debug information package for the Linux kernel +%description debuginfo +This package provides debug information for the kernel image and modules from the +%{version} package. %endif -# some (but not all) versions of rpmbuild emit %%debug_package with -# %%install. since we've already emitted it manually, that would cause -# a package redefinition error. ensure that doesn't happen -%define debug_package %{nil} - -# later, we make all modules executable so that find-debuginfo.sh strips -# them up. but they don't actually need to be executable, so remove the -# executable bit, taking care to do it _after_ find-debuginfo.sh has run -%define __spec_install_post \ - %{?__debug_package:%{__debug_install_post}} \ - %{__arch_install_post} \ - %{__os_install_post} \ - find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \\\ - | xargs --no-run-if-empty chmod u-x %prep %setup -q -n linux @@ -87,7 +67,7 @@ patch -p1 < %{SOURCE2} mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE} cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz # DEPMOD=true makes depmod no-op. We do not package depmod-generated files. -%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install +%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_STRIP=1 DEPMOD=true modules_install %{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE} cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config @@ -118,22 +98,31 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA echo "%exclude /lib/modules/%{KERNELRELEASE}/build" } > %{buildroot}/kernel.list -# make modules executable so that find-debuginfo.sh strips them. this -# will be undone later in %%__spec_install_post -find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \ - | xargs --no-run-if-empty chmod u+x - %if %{with_debuginfo} # copying vmlinux directly to the debug directory means it will not get # stripped (but its source paths will still be collected + fixed up) mkdir -p %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE} cp vmlinux %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE} + +echo /usr/lib/debug/lib/modules/%{KERNELRELEASE}/vmlinux > %{buildroot}/debuginfo.list + +while read -r mod; do + mod="${mod%.o}.ko" + dbg="%{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE}/kernel/${mod}" + buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p') + link="%{buildroot}/usr/lib/debug/.build-id/${buildid}.debug" + + mkdir -p "${dbg%/*}" "${link%/*}" + "${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}" + ln -sf --relative "${dbg}" "${link}" + + echo "${dbg#%{buildroot}}" >> %{buildroot}/debuginfo.list + echo "${link#%{buildroot}}" >> %{buildroot}/debuginfo.list +done < modules.order %endif %clean rm -rf %{buildroot} -rm -f debugfiles.list debuglinks.list debugsourcefiles.list debugsources.list \ - elfbins.list %post if [ -x /usr/bin/kernel-install ]; then @@ -172,3 +161,9 @@ fi /usr/src/kernels/%{KERNELRELEASE} /lib/modules/%{KERNELRELEASE}/build %endif + +%if %{with_debuginfo} +%files -f %{buildroot}/debuginfo.list debuginfo +%defattr (-, root, root) +%exclude /debuginfo.list +%endif diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs index be05610496605b..6fd9f5c84e2e4c 100644 --- a/scripts/rustdoc_test_gen.rs +++ b/scripts/rustdoc_test_gen.rs @@ -206,7 +206,7 @@ pub extern "C" fn {kunit_name}(__kunit_test: *mut ::kernel::bindings::kunit) {{ /// The anchor where the test code body starts. #[allow(unused)] - static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 1; + static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 2; {{ #![allow(unreachable_pub, clippy::disallowed_names)] {body} diff --git a/security/lsm.h b/security/lsm.h index 81aadbc61685cd..db77cc83e1582b 100644 --- a/security/lsm.h +++ b/security/lsm.h @@ -37,15 +37,6 @@ int lsm_task_alloc(struct task_struct *task); /* LSM framework initializers */ -#ifdef CONFIG_MMU -int min_addr_init(void); -#else -static inline int min_addr_init(void) -{ - return 0; -} -#endif /* CONFIG_MMU */ - #ifdef CONFIG_SECURITYFS int securityfs_init(void); #else diff --git a/security/lsm_init.c b/security/lsm_init.c index 05bd52e6b1f25d..573e2a7250c416 100644 --- a/security/lsm_init.c +++ b/security/lsm_init.c @@ -489,12 +489,7 @@ int __init security_init(void) */ static int __init security_initcall_pure(void) { - int rc_adr, rc_lsm; - - rc_adr = min_addr_init(); - rc_lsm = lsm_initcall(pure); - - return (rc_adr ? rc_adr : rc_lsm); + return lsm_initcall(pure); } pure_initcall(security_initcall_pure); diff --git a/security/min_addr.c b/security/min_addr.c index 0fde5ec9abc8a0..56e4f9d25929b0 100644 --- a/security/min_addr.c +++ b/security/min_addr.c @@ -5,8 +5,6 @@ #include #include -#include "lsm.h" - /* amount of vm to protect from userspace access by both DAC and the LSM*/ unsigned long mmap_min_addr; /* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */ @@ -54,10 +52,11 @@ static const struct ctl_table min_addr_sysctl_table[] = { }, }; -int __init min_addr_init(void) +static int __init mmap_min_addr_init(void) { register_sysctl_init("vm", min_addr_sysctl_table); update_mmap_min_addr(); return 0; } +pure_initcall(mmap_min_addr_init); diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c index 69422ab2d80861..f4ad0bfb4dac60 100644 --- a/sound/core/oss/mixer_oss.c +++ b/sound/core/oss/mixer_oss.c @@ -523,6 +523,8 @@ static void snd_mixer_oss_get_volume1_vol(struct snd_mixer_oss_file *fmixer, if (numid == ID_UNKNOWN) return; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return; kctl = snd_ctl_find_numid(card, numid); if (!kctl) return; @@ -557,6 +559,8 @@ static void snd_mixer_oss_get_volume1_sw(struct snd_mixer_oss_file *fmixer, if (numid == ID_UNKNOWN) return; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return; kctl = snd_ctl_find_numid(card, numid); if (!kctl) return; @@ -618,6 +622,8 @@ static void snd_mixer_oss_put_volume1_vol(struct snd_mixer_oss_file *fmixer, if (numid == ID_UNKNOWN) return; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return; kctl = snd_ctl_find_numid(card, numid); if (!kctl) return; @@ -656,6 +662,8 @@ static void snd_mixer_oss_put_volume1_sw(struct snd_mixer_oss_file *fmixer, if (numid == ID_UNKNOWN) return; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return; kctl = snd_ctl_find_numid(card, numid); if (!kctl) return; @@ -792,10 +800,12 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned struct snd_ctl_elem_info *uinfo __free(kfree) = kzalloc(sizeof(*uinfo), GFP_KERNEL); struct snd_ctl_elem_value *uctl __free(kfree) = - uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); + kzalloc(sizeof(*uctl), GFP_KERNEL); if (uinfo == NULL || uctl == NULL) return -ENOMEM; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return -ENODEV; kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0); if (!kctl) return -ENOENT; @@ -839,6 +849,8 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned if (uinfo == NULL || uctl == NULL) return -ENOMEM; guard(rwsem_read)(&card->controls_rwsem); + if (card->shutdown) + return -ENODEV; kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0); if (!kctl) return -ENOENT; @@ -885,6 +897,8 @@ static int snd_mixer_oss_build_test(struct snd_mixer_oss *mixer, struct slot *sl if (!info) return -ENOMEM; scoped_guard(rwsem_read, &card->controls_rwsem) { + if (card->shutdown) + return -ENODEV; kcontrol = snd_mixer_oss_test_id(mixer, name, index); if (kcontrol == NULL) return 0; @@ -1006,6 +1020,8 @@ static int snd_mixer_oss_build_input(struct snd_mixer_oss *mixer, if (snd_mixer_oss_build_test_all(mixer, ptr, &slot)) return 0; guard(rwsem_read)(&mixer->card->controls_rwsem); + if (mixer->card->shutdown) + return -ENODEV; kctl = NULL; if (!ptr->index) kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0); diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 64ef03b2d579c9..aa0d2fcb1a180c 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -336,37 +336,43 @@ static bool is_access_interleaved(snd_pcm_access_t access) static int loopback_check_format(struct loopback_cable *cable, int stream) { + struct loopback_pcm *dpcm_play, *dpcm_capt; struct snd_pcm_runtime *runtime, *cruntime; struct loopback_setup *setup; struct snd_card *card; + bool stop_capture = false; int check; - if (cable->valid != CABLE_VALID_BOTH) { - if (stream == SNDRV_PCM_STREAM_PLAYBACK) - goto __notify; - return 0; - } - runtime = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]-> - substream->runtime; - cruntime = cable->streams[SNDRV_PCM_STREAM_CAPTURE]-> - substream->runtime; - check = runtime->format != cruntime->format || - runtime->rate != cruntime->rate || - runtime->channels != cruntime->channels || - is_access_interleaved(runtime->access) != - is_access_interleaved(cruntime->access); - if (!check) - return 0; - if (stream == SNDRV_PCM_STREAM_CAPTURE) { - return -EIO; - } else { - snd_pcm_stop(cable->streams[SNDRV_PCM_STREAM_CAPTURE]-> - substream, SNDRV_PCM_STATE_DRAINING); - __notify: - runtime = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]-> - substream->runtime; - setup = get_setup(cable->streams[SNDRV_PCM_STREAM_PLAYBACK]); - card = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]->loopback->card; + scoped_guard(spinlock_irqsave, &cable->lock) { + dpcm_play = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; + dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE]; + + if (cable->valid != CABLE_VALID_BOTH) { + if (stream == SNDRV_PCM_STREAM_CAPTURE || !dpcm_play) + return 0; + } else { + if (!dpcm_play || !dpcm_capt) + return -EIO; + runtime = dpcm_play->substream->runtime; + cruntime = dpcm_capt->substream->runtime; + if (!runtime || !cruntime) + return -EIO; + check = runtime->format != cruntime->format || + runtime->rate != cruntime->rate || + runtime->channels != cruntime->channels || + is_access_interleaved(runtime->access) != + is_access_interleaved(cruntime->access); + if (!check) + return 0; + if (stream == SNDRV_PCM_STREAM_CAPTURE) + return -EIO; + else if (cruntime->state == SNDRV_PCM_STATE_RUNNING) + stop_capture = true; + } + + setup = get_setup(dpcm_play); + card = dpcm_play->loopback->card; + runtime = dpcm_play->substream->runtime; if (setup->format != runtime->format) { snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &setup->format_id); @@ -389,6 +395,10 @@ static int loopback_check_format(struct loopback_cable *cable, int stream) setup->access = runtime->access; } } + + if (stop_capture) + snd_pcm_stop(dpcm_capt->substream, SNDRV_PCM_STATE_DRAINING); + return 0; } diff --git a/sound/hda/codecs/conexant.c b/sound/hda/codecs/conexant.c index 2384e64eada369..5623d8c0a0f7c9 100644 --- a/sound/hda/codecs/conexant.c +++ b/sound/hda/codecs/conexant.c @@ -1081,6 +1081,7 @@ static const struct hda_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/hda/codecs/generic.c b/sound/hda/codecs/generic.c index 443500a3518f41..b75a5e9470df0c 100644 --- a/sound/hda/codecs/generic.c +++ b/sound/hda/codecs/generic.c @@ -1517,7 +1517,7 @@ static int count_multiio_pins(struct hda_codec *codec, hda_nid_t reference_pin) /* * multi-io helper * - * When hardwired is set, try to fill ony hardwired pins, and returns + * When hardwired is set, try to fill only hardwired pins, and returns * zero if any pins are filled, non-zero if nothing found. * When hardwired is off, try to fill possible input pins, and returns * the badness value. @@ -4889,7 +4889,7 @@ static int check_auto_mic_availability(struct hda_codec *codec) * snd_hda_gen_path_power_filter - power_filter hook to make inactive widgets * into power down * @codec: the HDA codec - * @nid: NID to evalute + * @nid: NID to evaluate * @power_state: target power state */ unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 0da00f73500c98..23b35b50f910d1 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -1660,6 +1660,13 @@ static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec, alc285_fixup_hp_gpio_micmute_led(codec, fix, action); } +static void alc245_fixup_hp_envy_x360_mute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc245_fixup_hp_mute_led_v1_coefbit(codec, fix, action); + alc245_fixup_hp_gpio_led(codec, fix, action); +} + static void alc236_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -3610,11 +3617,22 @@ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream, int action) { + static const struct coef_fw dis_coefs[] = { + WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203), + WRITE_COEF(0x28, 0x0004), WRITE_COEF(0x29, 0xb023), + }; /* Disable AMP silence detection */ + static const struct coef_fw en_coefs[] = { + WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203), + WRITE_COEF(0x28, 0x0084), WRITE_COEF(0x29, 0xb023), + }; /* Enable AMP silence detection */ + switch (action) { case HDA_GEN_PCM_ACT_OPEN: + alc_process_coef_fw(codec, dis_coefs); alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */ break; case HDA_GEN_PCM_ACT_CLOSE: + alc_process_coef_fw(codec, en_coefs); alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */ break; } @@ -3908,6 +3926,7 @@ enum { ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, + ALC245_FIXUP_HP_ENVY_X360_MUTE_LED, ALC285_FIXUP_HP_BEEP_MICMUTE_LED, ALC236_FIXUP_HP_MUTE_LED_COEFBIT2, ALC236_FIXUP_HP_GPIO_LED, @@ -4035,6 +4054,7 @@ enum { ALC288_FIXUP_SURFACE_SWAP_DACS, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_GPIO, ALC233_FIXUP_LENOVO_GPIO2_MIC_HOTKEY, + ALC245_FIXUP_BASS_HP_DAC, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -5563,6 +5583,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_mute_led, }, + [ALC245_FIXUP_HP_ENVY_X360_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc245_fixup_hp_envy_x360_mute_led, + }, [ALC285_FIXUP_HP_BEEP_MICMUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_beep, @@ -6537,6 +6561,11 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_gpio2_mic_hotkey, }, + [ALC245_FIXUP_BASS_HP_DAC] = { + .type = HDA_FIXUP_FUNC, + /* Borrow the DAC routing selected for those Thinkpads */ + .v.func = alc285_fixup_thinkpad_x1_gen7, + }, }; static const struct hda_quirk alc269_fixup_tbl[] = { @@ -6584,6 +6613,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1539, "Acer Nitro 5 AN515-57", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x159c, "Acer Nitro 5 AN515-58", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1597, "Acer Nitro 5 AN517-55", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x169a, "Acer Swift SFG16", ALC256_FIXUP_ACER_SFG16_MICMUTE_LED), @@ -6830,6 +6860,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x88b3, "HP ENVY x360 Convertible 15-es0xxx", ALC245_FIXUP_HP_ENVY_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x88dd, "HP Pavilion 15z-ec200", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x88eb, "HP Victus 16-e0xxx", ALC245_FIXUP_HP_MUTE_LED_V2_COEFBIT), @@ -6982,6 +7013,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c8f, "HP EliteBook 630 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), @@ -7586,6 +7618,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d05, 0x300f, "TongFang X6AR5xxY", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d05, 0x3019, "TongFang X6FR5xxY", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1d05, 0x3031, "TongFang X6AR55xU", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -7594,8 +7627,10 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1e39, 0xca14, "MEDION NM14LNL", ALC233_FIXUP_MEDION_MTL_SPK), SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1f4c, 0xe001, "Minisforum V3 (SE)", ALC245_FIXUP_BASS_HP_DAC), SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2039, 0x0001, "Inspur S14-G1", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), @@ -7808,6 +7843,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"}, {.id = ALC2XX_FIXUP_HEADSET_MIC, .name = "alc2xx-fixup-headset-mic"}, + {.id = ALC245_FIXUP_BASS_HP_DAC, .name = "alc245-fixup-bass-hp-dac"}, {} }; #define ALC225_STANDARD_PINS \ @@ -8055,10 +8091,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60140}, {0x19, 0x04a11030}, {0x21, 0x04211020}), - SND_HDA_PIN_QUIRK(0x10ec0274, 0x1d05, "TongFang", ALC274_FIXUP_HP_HEADSET_MIC, - {0x17, 0x90170110}, - {0x19, 0x03a11030}, - {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ALC282_STANDARD_PINS, {0x12, 0x90a609c0}, diff --git a/sound/hda/codecs/side-codecs/tas2781_hda.c b/sound/hda/codecs/side-codecs/tas2781_hda.c index 96e6d82dc69eb1..b22f93424c6211 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda.c @@ -2,7 +2,7 @@ // // TAS2781 HDA Shared Lib for I2C&SPI driver // -// Copyright 2025 Texas Instruments, Inc. +// Copyright 2025 - 2026 Texas Instruments, Inc. // // Author: Shenghao Ding @@ -159,7 +159,6 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) r->tlimit_reg = cali_reg[4]; } - p->is_user_space_calidata = true; cali_data->total_sz = p->ndev * (cali_data->cali_dat_sz_per_dev + 1); } @@ -216,6 +215,12 @@ int tas2781_save_calibration(struct tas2781_hda *hda) status = -ENOMEM; continue; } + /* + * Set to an invalid value before the calibrated data + * is stored into it, for the default value is 0, which + * means the first device. + */ + data[0] = 0xff; /* Get variable contents into buffer */ status = efi.get_variable(efi_name[i], &efi_guid, &attr, &cali_data->total_sz, data); diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c index 49c80babb500dd..74c3cf1e45e18b 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c @@ -393,19 +393,6 @@ static int tas2563_save_calibration(struct tas2781_hda *h) r->pow_reg = TAS2563_CAL_POWER; r->tlimit_reg = TAS2563_CAL_TLIM; - /* - * TAS2781_FMWLIB supports two solutions of calibrated data. One is - * from the driver itself: driver reads the calibrated files directly - * during probe; The other from user space: during init of audio hal, - * the audio hal will pass the calibrated data via kcontrol interface. - * Driver will store this data in "struct calidata" for use. For hda - * device, calibrated data are usunally saved into UEFI. So Hda side - * codec driver use the mixture of these two solutions, driver reads - * the data from UEFI, then store this data in "struct calidata" for - * use. - */ - p->is_user_space_calidata = true; - return 0; } diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 227d8c8490e1f5..a25a599fc5bec7 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -52,18 +52,19 @@ static const struct snd_pci_quirk subsys_20k1_list[] = { static const struct snd_pci_quirk subsys_20k2_list[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB0760, "SB0760", CTSB0760), - SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB1270, - "SB1270", CTSB1270), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08801, "SB0880", CTSB0880), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08802, "SB0880", CTSB0880), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08803, "SB0880", CTSB0880), + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB1270, + "SB1270", CTSB1270), + SND_PCI_QUIRK(0x160b, 0x0101, "OK0010", CTOK0010), + SND_PCI_QUIRK(0x160b, 0x0102, "OK0010", CTOK0010), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_CREATIVE, 0xf000, PCI_SUBDEVICE_ID_CREATIVE_HENDRIX, "HENDRIX", CTHENDRIX), - SND_PCI_QUIRK(0x160b, 0x0101, "OK0010", CTOK0010), { } /* terminator */ }; @@ -78,8 +79,8 @@ static const char *ct_subsys_name[NUM_CTCARDS] = { [CTSB0760] = "SB076x", [CTHENDRIX] = "Hendrix", [CTSB0880] = "SB0880", - [CTSB1270] = "SB1270", - [CTOK0010] = "OK0010", + [CTSB1270] = "SB1270", + [CTOK0010] = "OK0010", [CT20K2_UNKNOWN] = "Unknown", }; diff --git a/sound/soc/amd/acp/amd-acp70-acpi-match.c b/sound/soc/amd/acp/amd-acp70-acpi-match.c index c5f42bd7954889..dd2b010efdaa1a 100644 --- a/sound/soc/amd/acp/amd-acp70-acpi-match.c +++ b/sound/soc/amd/acp/amd-acp70-acpi-match.c @@ -187,7 +187,106 @@ static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { }, }; -static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = { +static const struct snd_soc_acpi_adr_device cs35l56x4_l1u3210_adr[] = { + { + .adr = 0x00013301FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00013201FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, + { + .adr = 0x00013101FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_2_endpoint, + .name_prefix = "AMP3" + }, + { + .adr = 0x00013001FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_3_endpoint, + .name_prefix = "AMP4" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l63x2_l0u01_adr[] = { + { + .adr = 0x00003001FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00003101FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l63x2_l1u01_adr[] = { + { + .adr = 0x00013001FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00013101FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l63x2_l1u13_adr[] = { + { + .adr = 0x00013101FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00013301FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l63x4_l0u0246_adr[] = { + { + .adr = 0x00003001FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00003201FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, + { + .adr = 0x00003401FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_2_endpoint, + .name_prefix = "AMP3" + }, + { + .adr = 0x00003601FA356301ull, + .num_endpoints = 1, + .endpoints = &spk_3_endpoint, + .name_prefix = "AMP4" + }, +}; + +static const struct snd_soc_acpi_adr_device cs42l43_l0u0_adr[] = { { .adr = 0x00003001FA424301ull, .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), @@ -196,7 +295,25 @@ static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = { } }; -static const struct snd_soc_acpi_adr_device cs42l43_1_cs35l56x4_1_adr[] = { +static const struct snd_soc_acpi_adr_device cs42l43_l0u1_adr[] = { + { + .adr = 0x00003101FA424301ull, + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l43" + } +}; + +static const struct snd_soc_acpi_adr_device cs42l43b_l0u1_adr[] = { + { + .adr = 0x00003101FA2A3B01ull, + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l43" + } +}; + +static const struct snd_soc_acpi_adr_device cs42l43_l1u0_cs35l56x4_l1u0123_adr[] = { { .adr = 0x00013001FA424301ull, .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), @@ -229,61 +346,155 @@ static const struct snd_soc_acpi_adr_device cs42l43_1_cs35l56x4_1_adr[] = { }, }; -static const struct snd_soc_acpi_adr_device cs35l56x4_1_adr[] = { +static const struct snd_soc_acpi_adr_device cs42l45_l0u0_adr[] = { { - .adr = 0x00013301FA355601ull, - .num_endpoints = 1, - .endpoints = &spk_l_endpoint, - .name_prefix = "AMP1" + .adr = 0x00003001FA424501ull, + /* Re-use endpoints, but cs42l45 has no speaker */ + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints) - 1, + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l45" + } +}; + +static const struct snd_soc_acpi_adr_device cs42l45_l1u0_adr[] = { + { + .adr = 0x00013001FA424501ull, + /* Re-use endpoints, but cs42l45 has no speaker */ + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints) - 1, + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l45" + } +}; + +static const struct snd_soc_acpi_link_adr acp70_cs35l56x4_l1u3210[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs35l56x4_l1u3210_adr), + .adr_d = cs35l56x4_l1u3210_adr, }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs35l63x4_l0u0246[] = { { - .adr = 0x00013201FA355601ull, - .num_endpoints = 1, - .endpoints = &spk_r_endpoint, - .name_prefix = "AMP2" + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs35l63x4_l0u0246_adr), + .adr_d = cs35l63x4_l0u0246_adr, }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0u1[] = { { - .adr = 0x00013101FA355601ull, - .num_endpoints = 1, - .endpoints = &spk_2_endpoint, - .name_prefix = "AMP3" + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l43_l0u1_adr), + .adr_d = cs42l43_l0u1_adr, }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43b_l0u1[] = { { - .adr = 0x00013001FA355601ull, - .num_endpoints = 1, - .endpoints = &spk_3_endpoint, - .name_prefix = "AMP4" + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l43b_l0u1_adr), + .adr_d = cs42l43b_l0u1_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0u0_cs35l56x4_l1u3210[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l43_l0u0_adr), + .adr_d = cs42l43_l0u0_adr, + }, + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs35l56x4_l1u3210_adr), + .adr_d = cs35l56x4_l1u3210_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l1u0_cs35l56x4_l1u0123[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs42l43_l1u0_cs35l56x4_l1u0123_adr), + .adr_d = cs42l43_l1u0_cs35l56x4_l1u0123_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l0u0[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), + .adr_d = cs42l45_l0u0_adr, }, + {} }; -static const struct snd_soc_acpi_link_adr acp70_cs42l43_l1_cs35l56x4_l1[] = { +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l0u0_cs35l63x2_l1u01[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), + .adr_d = cs42l45_l0u0_adr, + }, { .mask = BIT(1), - .num_adr = ARRAY_SIZE(cs42l43_1_cs35l56x4_1_adr), - .adr_d = cs42l43_1_cs35l56x4_1_adr, + .num_adr = ARRAY_SIZE(cs35l63x2_l1u01_adr), + .adr_d = cs35l63x2_l1u01_adr, }, {} }; -static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0_cs35l56x4_l1[] = { +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l0u0_cs35l63x2_l1u13[] = { { .mask = BIT(0), - .num_adr = ARRAY_SIZE(cs42l43_0_adr), - .adr_d = cs42l43_0_adr, + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), + .adr_d = cs42l45_l0u0_adr, }, { .mask = BIT(1), - .num_adr = ARRAY_SIZE(cs35l56x4_1_adr), - .adr_d = cs35l56x4_1_adr, + .num_adr = ARRAY_SIZE(cs35l63x2_l1u13_adr), + .adr_d = cs35l63x2_l1u13_adr, }, {} }; -static const struct snd_soc_acpi_link_adr acp70_cs35l56x4_l1[] = { +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l1u0[] = { { .mask = BIT(1), - .num_adr = ARRAY_SIZE(cs35l56x4_1_adr), - .adr_d = cs35l56x4_1_adr, + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), + .adr_d = cs42l45_l1u0_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l1u0_cs35l63x2_l0u01[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), + .adr_d = cs42l45_l1u0_adr, + }, + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs35l63x2_l0u01_adr), + .adr_d = cs35l63x2_l0u01_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l45_l1u0_cs35l63x4_l0u0246[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), + .adr_d = cs42l45_l1u0_adr, + }, + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs35l63x4_l0u0246_adr), + .adr_d = cs35l63x4_l0u0246_adr, }, {} }; @@ -327,28 +538,73 @@ struct snd_soc_acpi_mach snd_soc_acpi_amd_acp70_sdw_machines[] = { .drv_name = "amd_sdw", }, { - .link_mask = BIT(0), - .links = acp70_rt722_only, + .link_mask = BIT(0) | BIT(1), + .links = acp70_4_in_1_sdca, .drv_name = "amd_sdw", }, { .link_mask = BIT(0) | BIT(1), - .links = acp70_4_in_1_sdca, + .links = acp70_cs42l43_l0u0_cs35l56x4_l1u3210, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0) | BIT(1), + .links = acp70_cs42l45_l1u0_cs35l63x4_l0u0246, .drv_name = "amd_sdw", }, { .link_mask = BIT(0) | BIT(1), - .links = acp70_cs42l43_l0_cs35l56x4_l1, + .links = acp70_cs42l45_l0u0_cs35l63x2_l1u01, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0) | BIT(1), + .links = acp70_cs42l45_l0u0_cs35l63x2_l1u13, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0) | BIT(1), + .links = acp70_cs42l45_l1u0_cs35l63x2_l0u01, .drv_name = "amd_sdw", }, { .link_mask = BIT(1), - .links = acp70_cs42l43_l1_cs35l56x4_l1, + .links = acp70_cs42l43_l1u0_cs35l56x4_l1u0123, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(1), + .links = acp70_cs35l56x4_l1u3210, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0), + .links = acp70_cs35l63x4_l0u0246, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0), + .links = acp70_rt722_only, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0), + .links = acp70_cs42l43_l0u1, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0), + .links = acp70_cs42l43b_l0u1, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(0), + .links = acp70_cs42l45_l0u0, .drv_name = "amd_sdw", }, { .link_mask = BIT(1), - .links = acp70_cs35l56x4_l1, + .links = acp70_cs42l45_l1u0, .drv_name = "amd_sdw", }, { diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c index 95ac8c68003750..a560d06097d5e3 100644 --- a/sound/soc/amd/renoir/acp3x-pdm-dma.c +++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c @@ -301,9 +301,11 @@ static int acp_pdm_dma_close(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct pdm_dev_data *adata = dev_get_drvdata(component->dev); + struct pdm_stream_instance *rtd = substream->runtime->private_data; disable_pdm_interrupts(adata->acp_base); adata->capture_stream = NULL; + kfree(rtd); return 0; } diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index c18da0915baad8..67f2fee1939804 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -640,6 +640,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_BOARD_NAME, "8BD6"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8EE4"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index e78ac302da15f8..adb3fb923be394 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -936,6 +936,20 @@ config SND_SOC_CS35L56_TEST help This builds KUnit tests for the Cirrus Logic cs35l56 codec driver. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + If in doubt, say "N". + +config SND_SOC_CS35L56_SHARED_TEST + tristate "KUnit test for Cirrus Logic cs35l56-shared" if !KUNIT_ALL_TESTS + depends on SND_SOC_CS35L56_SHARED && KUNIT + default KUNIT_ALL_TESTS + help + This builds KUnit tests for the Cirrus Logic cs35l56-shared + module. + For more information on KUnit and unit tests in general, please refer to the KUnit documentation in Documentation/dev-tools/kunit/. diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index c9e3b813653d90..3ddee529872197 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -77,6 +77,7 @@ snd-soc-cs35l45-spi-y := cs35l45-spi.o snd-soc-cs35l45-i2c-y := cs35l45-i2c.o snd-soc-cs35l56-y := cs35l56.o snd-soc-cs35l56-shared-y := cs35l56-shared.o +snd-soc-cs35l56-shared-test-y := cs35l56-shared-test.o snd-soc-cs35l56-i2c-y := cs35l56-i2c.o snd-soc-cs35l56-spi-y := cs35l56-spi.o snd-soc-cs35l56-sdw-y := cs35l56-sdw.o @@ -512,6 +513,7 @@ obj-$(CONFIG_SND_SOC_CS35L45_SPI) += snd-soc-cs35l45-spi.o obj-$(CONFIG_SND_SOC_CS35L45_I2C) += snd-soc-cs35l45-i2c.o obj-$(CONFIG_SND_SOC_CS35L56) += snd-soc-cs35l56.o obj-$(CONFIG_SND_SOC_CS35L56_SHARED) += snd-soc-cs35l56-shared.o +obj-$(CONFIG_SND_SOC_CS35L56_SHARED_TEST) += snd-soc-cs35l56-shared-test.o obj-$(CONFIG_SND_SOC_CS35L56_I2C) += snd-soc-cs35l56-i2c.o obj-$(CONFIG_SND_SOC_CS35L56_SPI) += snd-soc-cs35l56-spi.o obj-$(CONFIG_SND_SOC_CS35L56_SDW) += snd-soc-cs35l56-sdw.o diff --git a/sound/soc/codecs/aw87390.c b/sound/soc/codecs/aw87390.c index d7fd865c349fbe..613daccca3af8b 100644 --- a/sound/soc/codecs/aw87390.c +++ b/sound/soc/codecs/aw87390.c @@ -314,6 +314,45 @@ static int aw87390_drv_event(struct snd_soc_dapm_widget *w, return ret; } +static int aw87391_rgds_drv_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct aw87390 *aw87390 = snd_soc_component_get_drvdata(component); + struct aw_device *aw_dev = aw87390->aw_pa; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + if (!IS_ERR(aw87390->vdd_reg)) { + if (regulator_enable(aw87390->vdd_reg)) + dev_warn(aw_dev->dev, "Failed to enable vdd\n"); + } + break; + case SND_SOC_DAPM_POST_PMU: + regmap_write(aw_dev->regmap, AW87391_SYSCTRL_REG, + AW87391_REG_VER_SEL_LOW | AW87391_REG_EN_ADAP | + AW87391_REG_EN_2X | AW87391_EN_SPK | + AW87391_EN_PA | AW87391_REG_EN_CP | + AW87391_EN_SW); + break; + case SND_SOC_DAPM_PRE_PMD: + regmap_write(aw_dev->regmap, AW87390_SYSCTRL_REG, + AW87390_POWER_DOWN_VALUE); + break; + case SND_SOC_DAPM_POST_PMD: + if (!IS_ERR(aw87390->vdd_reg)) { + if (regulator_disable(aw87390->vdd_reg)) + dev_warn(aw_dev->dev, "Failed to disable vdd\n"); + } + break; + default: + dev_err(aw_dev->dev, "%s: invalid event %d\n", __func__, event); + return -EINVAL; + } + + return 0; +} + static const struct snd_soc_dapm_widget aw87390_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN"), SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0, NULL, 0, aw87390_drv_event, @@ -321,6 +360,14 @@ static const struct snd_soc_dapm_widget aw87390_dapm_widgets[] = { SND_SOC_DAPM_OUTPUT("OUT"), }; +static const struct snd_soc_dapm_widget aw87391_rgds_dapm_widgets[] = { + SND_SOC_DAPM_INPUT("IN"), + SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0, NULL, 0, aw87391_rgds_drv_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_OUTPUT("OUT"), +}; + static const struct snd_soc_dapm_route aw87390_dapm_routes[] = { { "SPK PA", NULL, "IN" }, { "OUT", NULL, "SPK PA" }, @@ -339,6 +386,80 @@ static int aw87390_codec_probe(struct snd_soc_component *component) return 0; } +/* + * Firmware typically is used to load the sequence of init commands, + * however for the Anbernic RG-DS we don't have a firmware file just + * a list of registers and values. Most of these values are undocumented + * in the AW87391 datasheet. + */ +static void aw87391_rgds_codec_init(struct aw87390 *aw87390) +{ + struct aw_device *aw_dev = aw87390->aw_pa; + + /* Undocumented command per datasheet. */ + regmap_write(aw_dev->regmap, 0x64, 0x3a); + + /* Bits 7:4 are undocumented but provided by manufacturer. */ + regmap_write(aw_dev->regmap, AW87391_CP_REG, + (5 << 4) | AW87391_REG_CP_OVP_8_50V); + + regmap_write(aw_dev->regmap, AW87391_AGCPO_REG, + AW87391_AK1_S_016 | AW87391_AGC2PO_MW(500)); + + regmap_write(aw_dev->regmap, AW87391_AGC2PA_REG, + AW87391_RK_S_20_48 | AW87391_AK2_S_41 | AW87391_AK2F_S_41); + + /* Undocumented commands per datasheet. */ + regmap_write(aw_dev->regmap, 0x5d, 0x00); + regmap_write(aw_dev->regmap, 0x5e, 0xb4); + regmap_write(aw_dev->regmap, 0x5f, 0x30); + regmap_write(aw_dev->regmap, 0x60, 0x39); + regmap_write(aw_dev->regmap, 0x61, 0x10); + regmap_write(aw_dev->regmap, 0x62, 0x03); + regmap_write(aw_dev->regmap, 0x63, 0x7d); + regmap_write(aw_dev->regmap, 0x65, 0xa0); + regmap_write(aw_dev->regmap, 0x66, 0x21); + regmap_write(aw_dev->regmap, 0x67, 0x41); + regmap_write(aw_dev->regmap, 0x68, 0x3b); + regmap_write(aw_dev->regmap, 0x6e, 0x00); + regmap_write(aw_dev->regmap, 0x6f, 0x00); + regmap_write(aw_dev->regmap, 0x70, 0x00); + regmap_write(aw_dev->regmap, 0x71, 0x00); + regmap_write(aw_dev->regmap, 0x72, 0x34); + regmap_write(aw_dev->regmap, 0x73, 0x06); + regmap_write(aw_dev->regmap, 0x74, 0x10); + regmap_write(aw_dev->regmap, 0x75, 0x00); + regmap_write(aw_dev->regmap, 0x7a, 0x00); + regmap_write(aw_dev->regmap, 0x7b, 0x00); + regmap_write(aw_dev->regmap, 0x7c, 0x00); + regmap_write(aw_dev->regmap, 0x7d, 0x00); + + regmap_write(aw_dev->regmap, AW87391_PAG_REG, AW87391_GAIN_12DB); + regmap_write(aw_dev->regmap, AW87391_SYSCTRL_REG, + AW87391_EN_PA | AW87391_REG_EN_CP | AW87391_EN_SW); + regmap_write(aw_dev->regmap, AW87391_SYSCTRL_REG, + AW87391_REG_VER_SEL_LOW | AW87391_REG_EN_ADAP | + AW87391_REG_EN_2X | AW87391_EN_SPK | AW87391_EN_PA | + AW87391_REG_EN_CP | AW87391_EN_SW); + regmap_write(aw_dev->regmap, AW87391_PAG_REG, AW87391_GAIN_15DB); +} + +static int aw87391_rgds_codec_probe(struct snd_soc_component *component) +{ + struct aw87390 *aw87390 = snd_soc_component_get_drvdata(component); + + aw87390->vdd_reg = devm_regulator_get_optional(aw87390->aw_pa->dev, + "vdd"); + if (IS_ERR(aw87390->vdd_reg) && PTR_ERR(aw87390->vdd_reg) != -ENODEV) + return dev_err_probe(aw87390->aw_pa->dev, + PTR_ERR(aw87390->vdd_reg), + "Could not get vdd regulator\n"); + + aw87391_rgds_codec_init(aw87390); + + return 0; +} + static const struct snd_soc_component_driver soc_codec_dev_aw87390 = { .probe = aw87390_codec_probe, .dapm_widgets = aw87390_dapm_widgets, @@ -349,6 +470,14 @@ static const struct snd_soc_component_driver soc_codec_dev_aw87390 = { .num_controls = ARRAY_SIZE(aw87390_controls), }; +static const struct snd_soc_component_driver soc_codec_dev_anbernic_rgds = { + .probe = aw87391_rgds_codec_probe, + .dapm_widgets = aw87391_rgds_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(aw87391_rgds_dapm_widgets), + .dapm_routes = aw87390_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(aw87390_dapm_routes), +}; + static void aw87390_parse_channel_dt(struct aw87390 *aw87390) { struct aw_device *aw_dev = aw87390->aw_pa; @@ -366,6 +495,10 @@ static int aw87390_init(struct aw87390 *aw87390, struct i2c_client *i2c, struct unsigned int chip_id; int ret; + aw_dev = devm_kzalloc(&i2c->dev, sizeof(*aw_dev), GFP_KERNEL); + if (!aw_dev) + return -ENOMEM; + /* read chip id */ ret = regmap_read(regmap, AW87390_ID_REG, &chip_id); if (ret) { @@ -373,22 +506,24 @@ static int aw87390_init(struct aw87390 *aw87390, struct i2c_client *i2c, struct return ret; } - if (chip_id != AW87390_CHIP_ID) { + switch (chip_id) { + case AW87390_CHIP_ID: + aw_dev->chip_id = AW87390_CHIP_ID; + break; + case AW87391_CHIP_ID: + aw_dev->chip_id = AW87391_CHIP_ID; + break; + default: dev_err(&i2c->dev, "unsupported device\n"); return -ENXIO; } dev_dbg(&i2c->dev, "chip id = 0x%x\n", chip_id); - aw_dev = devm_kzalloc(&i2c->dev, sizeof(*aw_dev), GFP_KERNEL); - if (!aw_dev) - return -ENOMEM; - aw87390->aw_pa = aw_dev; aw_dev->i2c = i2c; aw_dev->regmap = regmap; aw_dev->dev = &i2c->dev; - aw_dev->chip_id = AW87390_CHIP_ID; aw_dev->acf = NULL; aw_dev->prof_info.prof_desc = NULL; aw_dev->prof_info.count = 0; @@ -406,6 +541,7 @@ static int aw87390_init(struct aw87390 *aw87390, struct i2c_client *i2c, struct static int aw87390_i2c_probe(struct i2c_client *i2c) { struct aw87390 *aw87390; + const struct snd_soc_component_driver *priv; int ret; ret = i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C); @@ -434,16 +570,38 @@ static int aw87390_i2c_probe(struct i2c_client *i2c) if (ret) return ret; - ret = devm_snd_soc_register_component(&i2c->dev, - &soc_codec_dev_aw87390, NULL, 0); + switch (aw87390->aw_pa->chip_id) { + case AW87390_CHIP_ID: + ret = devm_snd_soc_register_component(&i2c->dev, + &soc_codec_dev_aw87390, NULL, 0); + break; + case AW87391_CHIP_ID: + priv = of_device_get_match_data(&i2c->dev); + if (!priv) + return dev_err_probe(&i2c->dev, -EINVAL, + "aw87391 not currently supported\n"); + ret = devm_snd_soc_register_component(&i2c->dev, priv, NULL, 0); + break; + default: + return -ENXIO; + } + if (ret) dev_err(&i2c->dev, "failed to register aw87390: %d\n", ret); return ret; } +static const struct of_device_id aw87390_of_match[] = { + { .compatible = "awinic,aw87390" }, + { .compatible = "anbernic,rgds-amp", .data = &soc_codec_dev_anbernic_rgds }, + {}, +}; +MODULE_DEVICE_TABLE(of, aw87390_of_match); + static const struct i2c_device_id aw87390_i2c_id[] = { { AW87390_I2C_NAME }, + { AW87391_I2C_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, aw87390_i2c_id); @@ -451,6 +609,7 @@ MODULE_DEVICE_TABLE(i2c, aw87390_i2c_id); static struct i2c_driver aw87390_i2c_driver = { .driver = { .name = AW87390_I2C_NAME, + .of_match_table = of_match_ptr(aw87390_of_match), }, .probe = aw87390_i2c_probe, .id_table = aw87390_i2c_id, diff --git a/sound/soc/codecs/aw87390.h b/sound/soc/codecs/aw87390.h index d0d049e65991f9..f48b207e4bb447 100644 --- a/sound/soc/codecs/aw87390.h +++ b/sound/soc/codecs/aw87390.h @@ -52,6 +52,90 @@ #define AW87390_I2C_NAME "aw87390" #define AW87390_ACF_FILE "aw87390_acf.bin" +#define AW87391_SYSCTRL_REG (0x01) +#define AW87391_REG_VER_SEL_LOW (0 << 6) +#define AW87391_REG_VER_SEL_NORMAL (1 << 6) +#define AW87391_REG_VER_SEL_SUPER (2 << 6) +#define AW87391_REG_EN_ADAP BIT(5) +#define AW87391_REG_EN_2X BIT(4) +#define AW87391_EN_SPK BIT(3) +#define AW87391_EN_PA BIT(2) +#define AW87391_REG_EN_CP BIT(1) +#define AW87391_EN_SW BIT(0) + +#define AW87391_CP_REG (0x02) +#define AW87391_REG_CP_OVP_6_50V 0 +#define AW87391_REG_CP_OVP_6_75V 1 +#define AW87391_REG_CP_OVP_7_00V 2 +#define AW87391_REG_CP_OVP_7_25V 3 +#define AW87391_REG_CP_OVP_7_50V 4 +#define AW87391_REG_CP_OVP_7_75V 5 +#define AW87391_REG_CP_OVP_8_00V 6 +#define AW87391_REG_CP_OVP_8_25V 7 +#define AW87391_REG_CP_OVP_8_50V 8 + +#define AW87391_PAG_REG (0x03) +#define AW87391_GAIN_12DB 0 +#define AW87391_GAIN_15DB 1 +#define AW87391_GAIN_18DB 2 +#define AW87391_GAIN_21DB 3 +#define AW87391_GAIN_24DB 4 + +#define AW87391_AGCPO_REG (0x04) +#define AW87391_AK1_S_016 (2 << 5) +#define AW87391_AK1_S_032 (3 << 5) +#define AW87391_PD_AGC1_PWRDN BIT(4) +/* AGC2PO supports values between 500mW (0000) to 1600mW (1011) */ +#define AW87391_AGC2PO_MW(n) ((n / 100) - 5) + +#define AW87391_AGC2PA_REG (0x05) +#define AW87391_RK_S_5_12 (0 << 5) +#define AW87391_RK_S_10_24 (1 << 5) +#define AW87391_RK_S_20_48 (2 << 5) +#define AW87391_RK_S_41 (3 << 5) +#define AW87391_RK_S_82 (4 << 5) +#define AW87391_RK_S_164 (5 << 5) +#define AW87391_RK_S_328 (6 << 5) +#define AW87391_RK_S_656 (7 << 5) +#define AW87391_AK2_S_1_28 (0 << 2) +#define AW87391_AK2_S_2_56 (1 << 2) +#define AW87391_AK2_S_10_24 (2 << 2) +#define AW87391_AK2_S_41 (3 << 2) +#define AW87391_AK2_S_82 (4 << 2) +#define AW87391_AK2_S_164 (5 << 2) +#define AW87391_AK2_S_328 (6 << 2) +#define AW87391_AK2_S_656 (7 << 2) +#define AW87391_AK2F_S_10_24 0 +#define AW87391_AK2F_S_20_48 1 +#define AW87391_AK2F_S_41 2 +#define AW87391_AK2F_S_82 3 + +#define AW87391_SYSST_REG (0x06) +#define AW87391_UVLO BIT(7) +#define AW87391_OTN BIT(6) +#define AW87391_OC_FLAG BIT(5) +#define AW87391_ADAP_CP BIT(4) +#define AW87391_STARTOK BIT(3) +#define AW87391_CP_OVP BIT(2) +#define AW87391_PORN BIT(1) + +#define AW87391_SYSINT_REG (0x07) +#define AW87391_UVLOI BIT(7) +#define AW87391_ONTI BIT(6) +#define AW87391_OC_FLAGI BIT(5) +#define AW87391_ADAP_CPI BIT(4) +#define AW87391_STARTOKI BIT(3) +#define AW87391_CP_OVPI BIT(2) +#define AW87391_PORNI BIT(1) + +#define AW87391_DFT_THGEN0_REG (0x63) +#define AW87391_ADAPVTH_01W (0 << 2) +#define AW87391_ADAPVTH_02W (1 << 2) +#define AW87391_ADAPVTH_03W (2 << 2) +#define AW87391_ADAPVTH_04W (3 << 2) + +#define AW87391_I2C_NAME "aw87391" + #define AW87390_PROFILE_EXT(xname, profile_info, profile_get, profile_set) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ @@ -63,6 +147,7 @@ enum aw87390_id { AW87390_CHIP_ID = 0x76, + AW87391_CHIP_ID = 0xc1, }; enum { @@ -80,6 +165,7 @@ struct aw87390 { struct mutex lock; struct regmap *regmap; struct aw_container *aw_cfg; + struct regulator *vdd_reg; }; #endif diff --git a/sound/soc/codecs/cs35l56-shared-test.c b/sound/soc/codecs/cs35l56-shared-test.c new file mode 100644 index 00000000000000..94db02aef7dc42 --- /dev/null +++ b/sound/soc/codecs/cs35l56-shared-test.c @@ -0,0 +1,680 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit test for the Cirrus Logic cs35l56-shared module. +// +// Copyright (C) 2026 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct cs35l56_shared_test_priv { + struct kunit *test; + struct faux_device *amp_dev; + struct regmap *registers; + struct cs35l56_base *cs35l56_base; + u8 applied_pad_pull_state[CS35L56_MAX_GPIO]; +}; + +struct cs35l56_shared_test_param { + int spkid_gpios[4]; + int spkid_pulls[4]; + unsigned long gpio_status; + int spkid; +}; + +KUNIT_DEFINE_ACTION_WRAPPER(faux_device_destroy_wrapper, faux_device_destroy, + struct faux_device *) + +KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_wrapper, regmap_exit, struct regmap *) + +static const struct regmap_config cs35l56_shared_test_mock_registers_regmap = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = CS35L56_DSP1_PMEM_5114, + .cache_type = REGCACHE_MAPLE, +}; + +static const struct regmap_bus cs35l56_shared_test_mock_registers_regmap_bus = { + /* No handlers because it is always in cache-only */ +}; + +static unsigned int cs35l56_shared_test_read_gpio_status(struct cs35l56_shared_test_priv *priv) +{ + const struct cs35l56_shared_test_param *param = priv->test->param_value; + unsigned int reg_offs, pad_cfg, val; + unsigned int status = 0; + unsigned int mask = 1; + + for (reg_offs = 0; reg_offs < CS35L56_MAX_GPIO * sizeof(u32); reg_offs += sizeof(u32)) { + regmap_read(priv->registers, CS35L56_SYNC_GPIO1_CFG + reg_offs, &pad_cfg); + regmap_read(priv->registers, CS35L56_GPIO1_CTRL1 + reg_offs, &val); + + /* Only read a value if set as an input pin and as a GPIO */ + val &= (CS35L56_GPIO_DIR_MASK | CS35L56_GPIO_FN_MASK); + if ((pad_cfg & CS35L56_PAD_GPIO_IE) && + (val == (CS35L56_GPIO_DIR_MASK | CS35L56_GPIO_FN_GPIO))) + status |= (param->gpio_status & mask); + + mask <<= 1; + } + + return status; +} + +static int cs35l56_shared_test_updt_gpio_pres(struct cs35l56_shared_test_priv *priv, + unsigned int reg, unsigned int val) +{ + int i, ret; + + ret = regmap_write(priv->registers, reg, val); + if (ret) + return ret; + + if (val & CS35L56_UPDT_GPIO_PRES) { + /* Simulate transferring register state to internal latches */ + for (i = 0; i < ARRAY_SIZE(priv->applied_pad_pull_state); i++) { + reg = CS35L56_SYNC_GPIO1_CFG + (i * sizeof(u32)); + regmap_read(priv->registers, reg, &val); + val = FIELD_GET(CS35L56_PAD_GPIO_PULL_MASK, val); + priv->applied_pad_pull_state[i] = val; + } + } + + return 0; +} + +static int cs35l56_shared_test_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + struct cs35l56_shared_test_priv *priv = context; + + switch (reg) { + case CS35L56_SYNC_GPIO1_CFG ... CS35L56_ASP2_DIO_GPIO13_CFG: + case CS35L56_GPIO1_CTRL1 ... CS35L56_GPIO13_CTRL1: + return regmap_read(priv->registers, reg, val); + case CS35L56_UPDATE_REGS: + *val = 0; + return 0; + case CS35L56_GPIO_STATUS1: + *val = cs35l56_shared_test_read_gpio_status(priv); + return 0; + default: + kunit_fail_current_test("Bad regmap read address %#x\n", reg); + return -EINVAL; + } +} + +static int cs35l56_shared_test_reg_write(void *context, unsigned int reg, unsigned int val) +{ + struct cs35l56_shared_test_priv *priv = context; + + switch (reg) { + case CS35L56_UPDATE_REGS: + return cs35l56_shared_test_updt_gpio_pres(priv, reg, val); + case CS35L56_SYNC_GPIO1_CFG ... CS35L56_ASP2_DIO_GPIO13_CFG: + case CS35L56_GPIO1_CTRL1 ... CS35L56_GPIO13_CTRL1: + return regmap_write(priv->registers, reg, val); + default: + kunit_fail_current_test("Bad regmap write address %#x\n", reg); + return -EINVAL; + } +} + +static const struct regmap_bus cs35l56_shared_test_regmap_bus = { + .reg_read = cs35l56_shared_test_reg_read, + .reg_write = cs35l56_shared_test_reg_write, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +/* + * Self-test that the mock GPIO registers obey the configuration bits. + * Other tests rely on the mocked registers only returning a GPIO state + * if the pin is correctly set as a GPIO input. + */ +static void cs35l56_shared_test_mock_gpio_status_selftest(struct kunit *test) +{ + const struct cs35l56_shared_test_param *param = test->param_value; + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + unsigned int reg, val; + + KUNIT_ASSERT_NOT_NULL(test, param); + + /* Set all pins non-GPIO and output. Mock GPIO_STATUS should read 0 */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + /* Set all pads as inputs */ + for (reg = CS35L56_SYNC_GPIO1_CFG; reg <= CS35L56_ASP2_DIO_GPIO13_CFG; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, CS35L56_PAD_GPIO_IE)); + + KUNIT_ASSERT_EQ(test, 0, regmap_read(cs35l56_base->regmap, CS35L56_GPIO_STATUS1, &val)); + KUNIT_EXPECT_EQ(test, val, 0); + + /* Set all pins as GPIO outputs. Mock GPIO_STATUS should read 0 */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, CS35L56_GPIO_FN_GPIO)); + + KUNIT_ASSERT_EQ(test, 0, regmap_read(cs35l56_base->regmap, CS35L56_GPIO_STATUS1, &val)); + KUNIT_EXPECT_EQ(test, val, 0); + + /* Set all pins as non-GPIO inputs. Mock GPIO_STATUS should read 0 */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, CS35L56_GPIO_DIR_MASK)); + + KUNIT_ASSERT_EQ(test, 0, regmap_read(cs35l56_base->regmap, CS35L56_GPIO_STATUS1, &val)); + KUNIT_EXPECT_EQ(test, val, 0); + + /* Set all pins as GPIO inputs. Mock GPIO_STATUS should match param->gpio_status */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, + regmap_write(priv->registers, reg, + CS35L56_GPIO_DIR_MASK | CS35L56_GPIO_FN_GPIO)); + + KUNIT_ASSERT_EQ(test, 0, regmap_read(cs35l56_base->regmap, CS35L56_GPIO_STATUS1, &val)); + KUNIT_EXPECT_EQ(test, val, param->gpio_status); + + /* Set all pads as outputs. Mock GPIO_STATUS should read 0 */ + for (reg = CS35L56_SYNC_GPIO1_CFG; reg <= CS35L56_ASP2_DIO_GPIO13_CFG; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + KUNIT_ASSERT_EQ(test, 0, regmap_read(cs35l56_base->regmap, CS35L56_GPIO_STATUS1, &val)); + KUNIT_EXPECT_EQ(test, val, 0); +} + +/* Test that the listed chip pins are assembled into a speaker ID integer. */ +static void cs35l56_shared_test_get_onchip_speaker_id(struct kunit *test) +{ + const struct cs35l56_shared_test_param *param = test->param_value; + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + unsigned int i, reg; + + /* Set all pins non-GPIO and output */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + for (reg = CS35L56_SYNC_GPIO1_CFG; reg <= CS35L56_ASP2_DIO_GPIO13_CFG; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + /* Init GPIO array */ + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + cs35l56_base->onchip_spkid_gpios[i] = param->spkid_gpios[i] - 1; + cs35l56_base->num_onchip_spkid_gpios++; + } + + cs35l56_base->num_onchip_spkid_pulls = 0; + + KUNIT_EXPECT_EQ(test, cs35l56_configure_onchip_spkid_pads(cs35l56_base), 0); + KUNIT_EXPECT_EQ(test, cs35l56_read_onchip_spkid(cs35l56_base), param->spkid); +} + +/* Test that the listed chip pins and the corresponding pads are configured correctly. */ +static void cs35l56_shared_test_onchip_speaker_id_pad_config(struct kunit *test) +{ + const struct cs35l56_shared_test_param *param = test->param_value; + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + unsigned int i, reg, val; + + /* Init values in all pin registers */ + for (reg = CS35L56_GPIO1_CTRL1; reg <= CS35L56_GPIO13_CTRL1; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + for (reg = CS35L56_SYNC_GPIO1_CFG; reg <= CS35L56_ASP2_DIO_GPIO13_CFG; reg += sizeof(u32)) + KUNIT_ASSERT_EQ(test, 0, regmap_write(priv->registers, reg, 0)); + + /* Init GPIO array */ + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + cs35l56_base->onchip_spkid_gpios[i] = param->spkid_gpios[i] - 1; + cs35l56_base->num_onchip_spkid_gpios++; + } + + /* Init pulls array */ + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++) { + if (param->spkid_pulls[i] < 0) + break; + + cs35l56_base->onchip_spkid_pulls[i] = param->spkid_pulls[i]; + cs35l56_base->num_onchip_spkid_pulls++; + } + + KUNIT_EXPECT_EQ(test, cs35l56_configure_onchip_spkid_pads(cs35l56_base), 0); + + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + /* Pad should be an input */ + reg = CS35L56_SYNC_GPIO1_CFG + ((param->spkid_gpios[i] - 1) * sizeof(u32)); + KUNIT_EXPECT_EQ(test, regmap_read(priv->registers, reg, &val), 0); + KUNIT_EXPECT_EQ(test, val & CS35L56_PAD_GPIO_IE, CS35L56_PAD_GPIO_IE); + + /* Specified pulls should be set, others should be none */ + if (i < cs35l56_base->num_onchip_spkid_pulls) { + KUNIT_EXPECT_EQ(test, val & CS35L56_PAD_GPIO_PULL_MASK, + FIELD_PREP(CS35L56_PAD_GPIO_PULL_MASK, + param->spkid_pulls[i])); + } else { + KUNIT_EXPECT_EQ(test, val & CS35L56_PAD_GPIO_PULL_MASK, + CS35L56_PAD_PULL_NONE); + } + + /* Pulls for all specfied GPIOs should have been transferred to AO latch */ + if (i < cs35l56_base->num_onchip_spkid_pulls) { + KUNIT_EXPECT_EQ(test, + priv->applied_pad_pull_state[param->spkid_gpios[i] - 1], + param->spkid_pulls[i]); + } else { + KUNIT_EXPECT_EQ(test, + priv->applied_pad_pull_state[param->spkid_gpios[i] - 1], + CS35L56_PAD_PULL_NONE); + } + } +} + +/* Test that the listed chip pins are stashed correctly. */ +static void cs35l56_shared_test_stash_onchip_spkid_pins(struct kunit *test) +{ + const struct cs35l56_shared_test_param *param = test->param_value; + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + u32 gpios[5], pulls[5]; + int i, num_gpios, num_pulls; + + static_assert(ARRAY_SIZE(gpios) >= ARRAY_SIZE(param->spkid_gpios)); + static_assert(ARRAY_SIZE(pulls) >= ARRAY_SIZE(param->spkid_pulls)); + + num_gpios = 0; + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + gpios[i] = (u32)param->spkid_gpios[i]; + num_gpios++; + } + + num_pulls = 0; + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++) { + if (param->spkid_pulls[i] < 0) + break; + + pulls[i] = (u32)param->spkid_pulls[i]; + num_pulls++; + } + + cs35l56_base->num_onchip_spkid_gpios = 0; + cs35l56_base->num_onchip_spkid_pulls = 0; + + KUNIT_ASSERT_LE(test, num_gpios, ARRAY_SIZE(cs35l56_base->onchip_spkid_gpios)); + KUNIT_ASSERT_LE(test, num_pulls, ARRAY_SIZE(cs35l56_base->onchip_spkid_pulls)); + + KUNIT_EXPECT_EQ(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, num_gpios, + pulls, num_pulls), + 0); + + KUNIT_EXPECT_EQ(test, cs35l56_base->num_onchip_spkid_gpios, num_gpios); + KUNIT_EXPECT_EQ(test, cs35l56_base->num_onchip_spkid_pulls, num_pulls); + + /* GPIO numbers are adjusted from 1-based to 0-based */ + for (i = 0; i < num_gpios; i++) + KUNIT_EXPECT_EQ(test, cs35l56_base->onchip_spkid_gpios[i], gpios[i] - 1); + + for (i = 0; i < num_pulls; i++) + KUNIT_EXPECT_EQ(test, cs35l56_base->onchip_spkid_pulls[i], pulls[i]); +} + +/* Test that illegal GPIO numbers are rejected. */ +static void cs35l56_shared_test_stash_onchip_spkid_pins_reject_invalid(struct kunit *test) +{ + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + u32 gpios[8] = { }, pulls[8] = { }; + + KUNIT_EXPECT_LE(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, 1, + pulls, 0), + 0); + + switch (cs35l56_base->type) { + case 0x54: + case 0x56: + case 0x57: + gpios[0] = CS35L56_MAX_GPIO + 1; + break; + case 0x63: + gpios[0] = CS35L63_MAX_GPIO + 1; + break; + default: + kunit_fail_current_test("Unsupported type:%#x\n", cs35l56_base->type); + return; + } + KUNIT_EXPECT_LE(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, 1, + pulls, 0), + 0); + + gpios[0] = 1; + pulls[0] = 3; + KUNIT_EXPECT_LE(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, 1, + pulls, 1), + 0); + + static_assert(ARRAY_SIZE(gpios) > ARRAY_SIZE(cs35l56_base->onchip_spkid_gpios)); + static_assert(ARRAY_SIZE(pulls) > ARRAY_SIZE(cs35l56_base->onchip_spkid_pulls)); + KUNIT_EXPECT_EQ(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, ARRAY_SIZE(gpios), + pulls, 0), + -EOVERFLOW); + KUNIT_EXPECT_EQ(test, + cs35l56_check_and_save_onchip_spkid_gpios(cs35l56_base, + gpios, 1, + pulls, ARRAY_SIZE(pulls)), + -EOVERFLOW); +} + +static void cs35l56_shared_test_onchip_speaker_id_not_defined(struct kunit *test) +{ + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base = priv->cs35l56_base; + + memset(cs35l56_base->onchip_spkid_gpios, 0, sizeof(cs35l56_base->onchip_spkid_gpios)); + memset(cs35l56_base->onchip_spkid_pulls, 0, sizeof(cs35l56_base->onchip_spkid_pulls)); + cs35l56_base->num_onchip_spkid_gpios = 0; + cs35l56_base->num_onchip_spkid_pulls = 0; + KUNIT_EXPECT_EQ(test, cs35l56_configure_onchip_spkid_pads(cs35l56_base), 0); + KUNIT_EXPECT_EQ(test, cs35l56_read_onchip_spkid(cs35l56_base), -ENOENT); +} + +static int cs35l56_shared_test_case_regmap_init(struct kunit *test, + const struct regmap_config *regmap_config) +{ + struct cs35l56_shared_test_priv *priv = test->priv; + struct cs35l56_base *cs35l56_base; + + /* + * Create a dummy regmap to simulate a register map by holding the + * values of all simulated registers in the regmap cache. + */ + priv->registers = regmap_init(&priv->amp_dev->dev, + &cs35l56_shared_test_mock_registers_regmap_bus, + priv, + &cs35l56_shared_test_mock_registers_regmap); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->registers); + KUNIT_ASSERT_EQ(test, 0, + kunit_add_action_or_reset(test, regmap_exit_wrapper, + priv->registers)); + regcache_cache_only(priv->registers, true); + + /* Create dummy regmap for cs35l56 driver */ + cs35l56_base = priv->cs35l56_base; + cs35l56_base->regmap = regmap_init(cs35l56_base->dev, + &cs35l56_shared_test_regmap_bus, + priv, + regmap_config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cs35l56_base->regmap); + KUNIT_ASSERT_EQ(test, 0, + kunit_add_action_or_reset(test, regmap_exit_wrapper, + cs35l56_base->regmap)); + + return 0; +} + +static int cs35l56_shared_test_case_base_init(struct kunit *test, u8 type, u8 rev, + const struct regmap_config *regmap_config) +{ + struct cs35l56_shared_test_priv *priv; + int ret; + + KUNIT_ASSERT_NOT_NULL(test, cs_amp_test_hooks); + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + test->priv = priv; + priv->test = test; + + /* Create dummy amp driver dev */ + priv->amp_dev = faux_device_create("cs35l56_shared_test_drv", NULL, NULL); + KUNIT_ASSERT_NOT_NULL(test, priv->amp_dev); + KUNIT_ASSERT_EQ(test, 0, + kunit_add_action_or_reset(test, + faux_device_destroy_wrapper, + priv->amp_dev)); + + priv->cs35l56_base = kunit_kzalloc(test, sizeof(*priv->cs35l56_base), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, priv->cs35l56_base); + priv->cs35l56_base->dev = &priv->amp_dev->dev; + priv->cs35l56_base->type = type; + priv->cs35l56_base->rev = rev; + + if (regmap_config) { + ret = cs35l56_shared_test_case_regmap_init(test, regmap_config); + if (ret) + return ret; + } + + return 0; +} + +static int cs35l56_shared_test_case_regmap_init_L56_B0_sdw(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb0, &cs35l56_regmap_sdw); +} + +static int cs35l56_shared_test_case_regmap_init_L56_B0_spi(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb0, &cs35l56_regmap_spi); +} + +static int cs35l56_shared_test_case_regmap_init_L56_B0_i2c(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb0, &cs35l56_regmap_i2c); +} + +static int cs35l56_shared_test_case_regmap_init_L56_B2_sdw(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb2, &cs35l56_regmap_sdw); +} + +static int cs35l56_shared_test_case_regmap_init_L56_B2_spi(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb2, &cs35l56_regmap_spi); +} + +static int cs35l56_shared_test_case_regmap_init_L56_B2_i2c(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x56, 0xb2, &cs35l56_regmap_i2c); +} + +static int cs35l56_shared_test_case_regmap_init_L63_A1_sdw(struct kunit *test) +{ + return cs35l56_shared_test_case_base_init(test, 0x63, 0xa1, &cs35l63_regmap_sdw); +} + +static void cs35l56_shared_test_gpio_param_desc(const struct cs35l56_shared_test_param *param, + char *desc) +{ + DECLARE_SEQ_BUF(gpios, 1 + (2 * ARRAY_SIZE(param->spkid_gpios))); + DECLARE_SEQ_BUF(pulls, 1 + (2 * ARRAY_SIZE(param->spkid_pulls))); + int i; + + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + seq_buf_printf(&gpios, "%s%d", (i == 0) ? "" : ",", param->spkid_gpios[i]); + } + + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++) { + if (param->spkid_pulls[i] < 0) + break; + + seq_buf_printf(&pulls, "%s%d", (i == 0) ? "" : ",", param->spkid_pulls[i]); + } + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "gpios:{%s} pulls:{%s} status:%#lx spkid:%d", + seq_buf_str(&gpios), seq_buf_str(&pulls), param->gpio_status, param->spkid); +} + +static const struct cs35l56_shared_test_param cs35l56_shared_test_gpios_selftest_cases[] = { + { .spkid_gpios = { -1 }, .gpio_status = GENMASK(12, 0) }, +}; +KUNIT_ARRAY_PARAM(cs35l56_shared_test_gpios_selftest, + cs35l56_shared_test_gpios_selftest_cases, + cs35l56_shared_test_gpio_param_desc); + +static const struct cs35l56_shared_test_param cs35l56_shared_test_onchip_spkid_cases[] = { + { .spkid_gpios = { 1, -1 }, .gpio_status = 0, .spkid = 0 }, + { .spkid_gpios = { 1, -1 }, .gpio_status = ~BIT(0), .spkid = 0 }, + { .spkid_gpios = { 1, -1 }, .gpio_status = BIT(0), .spkid = 1 }, + + { .spkid_gpios = { 7, -1 }, .gpio_status = 0, .spkid = 0 }, + { .spkid_gpios = { 7, -1 }, .gpio_status = ~BIT(6), .spkid = 0 }, + { .spkid_gpios = { 7, -1 }, .gpio_status = BIT(6), .spkid = 1 }, + + { .spkid_gpios = { 1, 7, -1 }, .gpio_status = 0, .spkid = 0 }, + { .spkid_gpios = { 1, 7, -1 }, .gpio_status = ~(BIT(0) | BIT(6)), .spkid = 0 }, + { .spkid_gpios = { 1, 7, -1 }, .gpio_status = BIT(6), .spkid = 1 }, + { .spkid_gpios = { 1, 7, -1 }, .gpio_status = BIT(0), .spkid = 2 }, + { .spkid_gpios = { 1, 7, -1 }, .gpio_status = BIT(6) | BIT(0), .spkid = 3 }, + + { .spkid_gpios = { 7, 1, -1 }, .gpio_status = 0, .spkid = 0 }, + { .spkid_gpios = { 7, 1, -1 }, .gpio_status = ~(BIT(6) | BIT(0)), .spkid = 0 }, + { .spkid_gpios = { 7, 1, -1 }, .gpio_status = BIT(0), .spkid = 1 }, + { .spkid_gpios = { 7, 1, -1 }, .gpio_status = BIT(6), .spkid = 2 }, + { .spkid_gpios = { 7, 1, -1 }, .gpio_status = BIT(6) | BIT(0), .spkid = 3 }, + + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = 0, .spkid = 0 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(0), .spkid = 1 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(6), .spkid = 2 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(6) | BIT(0), .spkid = 3 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(2), .spkid = 4 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(2) | BIT(0), .spkid = 5 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(2) | BIT(6), .spkid = 6 }, + { .spkid_gpios = { 3, 7, 1, -1 }, .gpio_status = BIT(2) | BIT(6) | BIT(0), .spkid = 7 }, +}; +KUNIT_ARRAY_PARAM(cs35l56_shared_test_onchip_spkid, cs35l56_shared_test_onchip_spkid_cases, + cs35l56_shared_test_gpio_param_desc); + +static const struct cs35l56_shared_test_param cs35l56_shared_test_onchip_spkid_pull_cases[] = { + { .spkid_gpios = { 1, -1 }, .spkid_pulls = { 1, -1 }, }, + { .spkid_gpios = { 1, -1 }, .spkid_pulls = { 2, -1 }, }, + + { .spkid_gpios = { 7, -1 }, .spkid_pulls = { 1, -1 }, }, + { .spkid_gpios = { 7, -1 }, .spkid_pulls = { 2, -1 }, }, + + { .spkid_gpios = { 1, 7, -1 }, .spkid_pulls = { 1, 1, -1 }, }, + { .spkid_gpios = { 1, 7, -1 }, .spkid_pulls = { 2, 2, -1 }, }, + + { .spkid_gpios = { 7, 1, -1 }, .spkid_pulls = { 1, 1, -1 }, }, + { .spkid_gpios = { 7, 1, -1 }, .spkid_pulls = { 2, 2, -1 }, }, + + { .spkid_gpios = { 3, 7, 1, -1 }, .spkid_pulls = { 1, 1, 1, -1 }, }, + { .spkid_gpios = { 3, 7, 1, -1 }, .spkid_pulls = { 2, 2, 2, -1 }, }, +}; +KUNIT_ARRAY_PARAM(cs35l56_shared_test_onchip_spkid_pull, + cs35l56_shared_test_onchip_spkid_pull_cases, + cs35l56_shared_test_gpio_param_desc); + +static struct kunit_case cs35l56_shared_test_cases[] = { + /* Tests for speaker id */ + KUNIT_CASE_PARAM(cs35l56_shared_test_mock_gpio_status_selftest, + cs35l56_shared_test_gpios_selftest_gen_params), + KUNIT_CASE_PARAM(cs35l56_shared_test_get_onchip_speaker_id, + cs35l56_shared_test_onchip_spkid_gen_params), + KUNIT_CASE_PARAM(cs35l56_shared_test_onchip_speaker_id_pad_config, + cs35l56_shared_test_onchip_spkid_gen_params), + KUNIT_CASE_PARAM(cs35l56_shared_test_onchip_speaker_id_pad_config, + cs35l56_shared_test_onchip_spkid_pull_gen_params), + KUNIT_CASE_PARAM(cs35l56_shared_test_stash_onchip_spkid_pins, + cs35l56_shared_test_onchip_spkid_pull_gen_params), + KUNIT_CASE(cs35l56_shared_test_stash_onchip_spkid_pins_reject_invalid), + KUNIT_CASE(cs35l56_shared_test_onchip_speaker_id_not_defined), + { } +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B0_sdw = { + .name = "snd-soc-cs35l56-shared-test_L56_B0_sdw", + .init = cs35l56_shared_test_case_regmap_init_L56_B0_sdw, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B2_sdw = { + .name = "snd-soc-cs35l56-shared-test_L56_B2_sdw", + .init = cs35l56_shared_test_case_regmap_init_L56_B2_sdw, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L63_A1_sdw = { + .name = "snd-soc-cs35l56-shared-test_L63_A1_sdw", + .init = cs35l56_shared_test_case_regmap_init_L63_A1_sdw, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B0_spi = { + .name = "snd-soc-cs35l56-shared-test_L56_B0_spi", + .init = cs35l56_shared_test_case_regmap_init_L56_B0_spi, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B2_spi = { + .name = "snd-soc-cs35l56-shared-test_L56_B2_spi", + .init = cs35l56_shared_test_case_regmap_init_L56_B2_spi, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B0_i2c = { + .name = "snd-soc-cs35l56-shared-test_L56_B0_i2c", + .init = cs35l56_shared_test_case_regmap_init_L56_B0_i2c, + .test_cases = cs35l56_shared_test_cases, +}; + +static struct kunit_suite cs35l56_shared_test_suite_L56_B2_i2c = { + .name = "snd-soc-cs35l56-shared-test_L56_B2_i2c", + .init = cs35l56_shared_test_case_regmap_init_L56_B2_i2c, + .test_cases = cs35l56_shared_test_cases, +}; + +kunit_test_suites( + &cs35l56_shared_test_suite_L56_B0_sdw, + &cs35l56_shared_test_suite_L56_B2_sdw, + &cs35l56_shared_test_suite_L63_A1_sdw, + + &cs35l56_shared_test_suite_L56_B0_spi, + &cs35l56_shared_test_suite_L56_B2_spi, + + &cs35l56_shared_test_suite_L56_B0_i2c, + &cs35l56_shared_test_suite_L56_B2_i2c, +); + +MODULE_IMPORT_NS("SND_SOC_CS35L56_SHARED"); +MODULE_IMPORT_NS("SND_SOC_CS_AMP_LIB"); +MODULE_DESCRIPTION("KUnit test for cs35l56-shared module"); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index 60100c8f8c952b..4707f28bfca245 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -5,13 +5,16 @@ // Copyright (C) 2023 Cirrus Logic, Inc. and // Cirrus Logic International Semiconductor Ltd. +#include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -182,6 +185,8 @@ static bool cs35l56_readable_reg(struct device *dev, unsigned int reg) case CS35L56_OTP_MEM_53: case CS35L56_OTP_MEM_54: case CS35L56_OTP_MEM_55: + case CS35L56_SYNC_GPIO1_CFG ... CS35L56_ASP2_DIO_GPIO13_CFG: + case CS35L56_UPDATE_REGS: case CS35L56_ASP1_ENABLES1: case CS35L56_ASP1_CONTROL1: case CS35L56_ASP1_CONTROL2: @@ -213,6 +218,7 @@ static bool cs35l56_readable_reg(struct device *dev, unsigned int reg) case CS35L56_IRQ1_MASK_8: case CS35L56_IRQ1_MASK_18: case CS35L56_IRQ1_MASK_20: + case CS35L56_GPIO_STATUS1 ... CS35L56_GPIO13_CTRL1: case CS35L56_MIXER_NGATE_CH1_CFG: case CS35L56_MIXER_NGATE_CH2_CFG: case CS35L56_DSP_VIRTUAL1_MBOX_1: @@ -262,6 +268,8 @@ static bool cs35l56_common_volatile_reg(unsigned int reg) case CS35L56_GLOBAL_ENABLES: /* owned by firmware */ case CS35L56_BLOCK_ENABLES: /* owned by firmware */ case CS35L56_BLOCK_ENABLES2: /* owned by firmware */ + case CS35L56_SYNC_GPIO1_CFG ... CS35L56_ASP2_DIO_GPIO13_CFG: + case CS35L56_UPDATE_REGS: case CS35L56_REFCLK_INPUT: /* owned by firmware */ case CS35L56_GLOBAL_SAMPLE_RATE: /* owned by firmware */ case CS35L56_DACPCM1_INPUT: /* owned by firmware */ @@ -272,6 +280,7 @@ static bool cs35l56_common_volatile_reg(unsigned int reg) case CS35L56_IRQ1_EINT_1 ... CS35L56_IRQ1_EINT_8: case CS35L56_IRQ1_EINT_18: case CS35L56_IRQ1_EINT_20: + case CS35L56_GPIO_STATUS1 ... CS35L56_GPIO13_CTRL1: case CS35L56_MIXER_NGATE_CH1_CFG: case CS35L56_MIXER_NGATE_CH2_CFG: case CS35L56_DSP_VIRTUAL1_MBOX_1: @@ -1552,6 +1561,169 @@ int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base) } EXPORT_SYMBOL_NS_GPL(cs35l56_get_speaker_id, "SND_SOC_CS35L56_SHARED"); +int cs35l56_check_and_save_onchip_spkid_gpios(struct cs35l56_base *cs35l56_base, + const u32 *gpios, int num_gpios, + const u32 *pulls, int num_pulls) +{ + int max_gpio; + int ret = 0; + int i; + + if ((num_gpios > ARRAY_SIZE(cs35l56_base->onchip_spkid_gpios)) || + (num_pulls > ARRAY_SIZE(cs35l56_base->onchip_spkid_pulls))) + return -EOVERFLOW; + + switch (cs35l56_base->type) { + case 0x54: + case 0x56: + case 0x57: + max_gpio = CS35L56_MAX_GPIO; + break; + default: + max_gpio = CS35L63_MAX_GPIO; + break; + } + + for (i = 0; i < num_gpios; i++) { + if (gpios[i] < 1 || gpios[i] > max_gpio) { + dev_err(cs35l56_base->dev, "Invalid spkid GPIO %d\n", gpios[i]); + /* Keep going so we log all bad values */ + ret = -EINVAL; + } + + /* Change to zero-based */ + cs35l56_base->onchip_spkid_gpios[i] = gpios[i] - 1; + } + + for (i = 0; i < num_pulls; i++) { + switch (pulls[i]) { + case 0: + cs35l56_base->onchip_spkid_pulls[i] = CS35L56_PAD_PULL_NONE; + break; + case 1: + cs35l56_base->onchip_spkid_pulls[i] = CS35L56_PAD_PULL_UP; + break; + case 2: + cs35l56_base->onchip_spkid_pulls[i] = CS35L56_PAD_PULL_DOWN; + break; + default: + dev_err(cs35l56_base->dev, "Invalid spkid pull %d\n", pulls[i]); + /* Keep going so we log all bad values */ + ret = -EINVAL; + break; + } + } + if (ret) + return ret; + + cs35l56_base->num_onchip_spkid_gpios = num_gpios; + cs35l56_base->num_onchip_spkid_pulls = num_pulls; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs35l56_check_and_save_onchip_spkid_gpios, "SND_SOC_CS35L56_SHARED"); + +/* Caller must pm_runtime resume before calling this function */ +int cs35l56_configure_onchip_spkid_pads(struct cs35l56_base *cs35l56_base) +{ + struct regmap *regmap = cs35l56_base->regmap; + unsigned int addr_offset, val; + int num_gpios, num_pulls; + int i, ret; + + KUNIT_STATIC_STUB_REDIRECT(cs35l56_configure_onchip_spkid_pads, cs35l56_base); + + if (cs35l56_base->num_onchip_spkid_gpios == 0) + return 0; + + num_gpios = min(cs35l56_base->num_onchip_spkid_gpios, + ARRAY_SIZE(cs35l56_base->onchip_spkid_gpios)); + num_pulls = min(cs35l56_base->num_onchip_spkid_pulls, + ARRAY_SIZE(cs35l56_base->onchip_spkid_pulls)); + + for (i = 0; i < num_gpios; i++) { + addr_offset = cs35l56_base->onchip_spkid_gpios[i] * sizeof(u32); + + /* Set unspecified pulls to NONE */ + if (i < num_pulls) { + val = FIELD_PREP(CS35L56_PAD_GPIO_PULL_MASK, + cs35l56_base->onchip_spkid_pulls[i]); + } else { + val = FIELD_PREP(CS35L56_PAD_GPIO_PULL_MASK, CS35L56_PAD_PULL_NONE); + } + + ret = regmap_update_bits(regmap, CS35L56_SYNC_GPIO1_CFG + addr_offset, + CS35L56_PAD_GPIO_PULL_MASK | CS35L56_PAD_GPIO_IE, + val | CS35L56_PAD_GPIO_IE); + if (ret) { + dev_err(cs35l56_base->dev, "GPIO%d set pad fail: %d\n", + cs35l56_base->onchip_spkid_gpios[i] + 1, ret); + return ret; + } + } + + ret = regmap_write(regmap, CS35L56_UPDATE_REGS, CS35L56_UPDT_GPIO_PRES); + if (ret) { + dev_err(cs35l56_base->dev, "UPDT_GPIO_PRES failed:%d\n", ret); + return ret; + } + + usleep_range(CS35L56_PAD_PULL_SETTLE_US, CS35L56_PAD_PULL_SETTLE_US * 2); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs35l56_configure_onchip_spkid_pads, "SND_SOC_CS35L56_SHARED"); + +/* Caller must pm_runtime resume before calling this function */ +int cs35l56_read_onchip_spkid(struct cs35l56_base *cs35l56_base) +{ + struct regmap *regmap = cs35l56_base->regmap; + unsigned int addr_offset, val; + int num_gpios; + int speaker_id = 0; + int i, ret; + + KUNIT_STATIC_STUB_REDIRECT(cs35l56_read_onchip_spkid, cs35l56_base); + + if (cs35l56_base->num_onchip_spkid_gpios == 0) + return -ENOENT; + + num_gpios = min(cs35l56_base->num_onchip_spkid_gpios, + ARRAY_SIZE(cs35l56_base->onchip_spkid_gpios)); + + for (i = 0; i < num_gpios; i++) { + addr_offset = cs35l56_base->onchip_spkid_gpios[i] * sizeof(u32); + + ret = regmap_update_bits(regmap, CS35L56_GPIO1_CTRL1 + addr_offset, + CS35L56_GPIO_DIR_MASK | CS35L56_GPIO_FN_MASK, + CS35L56_GPIO_DIR_MASK | CS35L56_GPIO_FN_GPIO); + if (ret) { + dev_err(cs35l56_base->dev, "GPIO%u set func fail: %d\n", + cs35l56_base->onchip_spkid_gpios[i] + 1, ret); + return ret; + } + } + + ret = regmap_read(regmap, CS35L56_GPIO_STATUS1, &val); + if (ret) { + dev_err(cs35l56_base->dev, "GPIO%d status read failed: %d\n", + cs35l56_base->onchip_spkid_gpios[i] + 1, ret); + return ret; + } + + for (i = 0; i < num_gpios; i++) { + speaker_id <<= 1; + + if (val & BIT(cs35l56_base->onchip_spkid_gpios[i])) + speaker_id |= 1; + } + + dev_dbg(cs35l56_base->dev, "Onchip GPIO Speaker ID = %d\n", speaker_id); + + return speaker_id; +} +EXPORT_SYMBOL_NS_GPL(cs35l56_read_onchip_spkid, "SND_SOC_CS35L56_SHARED"); + static const u32 cs35l56_bclk_valid_for_pll_freq_table[] = { [0x0C] = 128000, [0x0F] = 256000, diff --git a/sound/soc/codecs/cs35l56-test.c b/sound/soc/codecs/cs35l56-test.c index a7b21660c40260..b6c8c08e3adefb 100644 --- a/sound/soc/codecs/cs35l56-test.c +++ b/sound/soc/codecs/cs35l56-test.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include #include @@ -23,16 +25,46 @@ KUNIT_DEFINE_ACTION_WRAPPER(faux_device_destroy_wrapper, faux_device_destroy, struct faux_device *) +KUNIT_DEFINE_ACTION_WRAPPER(software_node_unregister_node_group_wrapper, + software_node_unregister_node_group, + const struct software_node * const *) + +KUNIT_DEFINE_ACTION_WRAPPER(software_node_unregister_wrapper, + software_node_unregister, + const struct software_node *) + +KUNIT_DEFINE_ACTION_WRAPPER(device_remove_software_node_wrapper, + device_remove_software_node, + struct device *) + struct cs35l56_test_priv { struct faux_device *amp_dev; struct cs35l56_private *cs35l56_priv; const char *ssidexv2; + + bool read_onchip_spkid_called; + bool configure_onchip_spkid_pads_called; }; struct cs35l56_test_param { u8 type; u8 rev; + + s32 spkid_gpios[4]; + s32 spkid_pulls[4]; +}; + +static const struct software_node cs35l56_test_dev_sw_node = + SOFTWARE_NODE("SWD1", NULL, NULL); + +static const struct software_node cs35l56_test_af01_sw_node = + SOFTWARE_NODE("AF01", NULL, &cs35l56_test_dev_sw_node); + +static const struct software_node *cs35l56_test_dev_and_af01_node_group[] = { + &cs35l56_test_dev_sw_node, + &cs35l56_test_af01_sw_node, + NULL }; static const char *cs35l56_test_devm_get_vendor_specific_variant_id_none(struct device *dev, @@ -232,6 +264,190 @@ static void cs35l56_test_l56_b0_ssidexv2_ignored_suffix_sdw(struct kunit *test) KUNIT_EXPECT_STREQ(test, cs35l56->fallback_fw_suffix, "l1u5"); } +/* + * Test that cs35l56_process_xu_properties() correctly parses the GPIO and + * pull values from properties into the arrays in struct cs35l56_base. + * + * This test creates the node tree: + * + * Node("SWD1") { // top-level device node + * Node("AF01") { + * Node("mipi-sdca-function-expansion-subproperties") { + * property: "01fa-spk-id-gpios-onchip" + * property: 01fa-spk-id-gpios-onchip-pull + * } + * } + * } + * + * Note that in ACPI "mipi-sdca-function-expansion-subproperties" is + * a special _DSD property that points to a Device(EXT0) node but behaves + * as an alias of the EXT0 node. The equivalent in software nodes is to + * create a Node named "mipi-sdca-function-expansion-subproperties" with + * the properties. + * + */ +static void cs35l56_test_parse_xu_onchip_spkid(struct kunit *test) +{ + const struct cs35l56_test_param *param = test->param_value; + struct cs35l56_test_priv *priv = test->priv; + struct cs35l56_private *cs35l56 = priv->cs35l56_priv; + struct software_node *ext0_node; + int num_gpios = 0; + int num_pulls = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++, num_gpios++) { + if (param->spkid_gpios[i] < 0) + break; + } + KUNIT_ASSERT_LE(test, num_gpios, ARRAY_SIZE(cs35l56->base.onchip_spkid_gpios)); + + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++, num_pulls++) { + if (param->spkid_pulls[i] < 0) + break; + } + KUNIT_ASSERT_LE(test, num_pulls, ARRAY_SIZE(cs35l56->base.onchip_spkid_pulls)); + + const struct property_entry ext0_props[] = { + PROPERTY_ENTRY_U32_ARRAY_LEN("01fa-spk-id-gpios-onchip", + param->spkid_gpios, num_gpios), + PROPERTY_ENTRY_U32_ARRAY_LEN("01fa-spk-id-gpios-onchip-pull", + param->spkid_pulls, num_pulls), + { } + }; + + KUNIT_ASSERT_EQ(test, + software_node_register_node_group(cs35l56_test_dev_and_af01_node_group), + 0); + KUNIT_ASSERT_EQ(test, + kunit_add_action_or_reset(test, + software_node_unregister_node_group_wrapper, + cs35l56_test_dev_and_af01_node_group), + 0); + + ext0_node = kunit_kzalloc(test, sizeof(*ext0_node), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ext0_node); + *ext0_node = SOFTWARE_NODE("mipi-sdca-function-expansion-subproperties", + ext0_props, &cs35l56_test_af01_sw_node); + + KUNIT_ASSERT_EQ(test, software_node_register(ext0_node), 0); + KUNIT_ASSERT_EQ(test, + kunit_add_action_or_reset(test, + software_node_unregister_wrapper, + ext0_node), + 0); + + KUNIT_ASSERT_EQ(test, + device_add_software_node(cs35l56->base.dev, &cs35l56_test_dev_sw_node), 0); + KUNIT_ASSERT_EQ(test, 0, + kunit_add_action_or_reset(test, + device_remove_software_node_wrapper, + cs35l56->base.dev)); + + KUNIT_EXPECT_EQ(test, cs35l56_process_xu_properties(cs35l56), 0); + + KUNIT_EXPECT_EQ(test, cs35l56->base.num_onchip_spkid_gpios, num_gpios); + KUNIT_EXPECT_EQ(test, cs35l56->base.num_onchip_spkid_pulls, num_pulls); + + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + /* + * cs35l56_process_xu_properties() stores the GPIO numbers + * zero-based, which is one less than the value in the property. + */ + KUNIT_EXPECT_EQ_MSG(test, cs35l56->base.onchip_spkid_gpios[i], + param->spkid_gpios[i] - 1, + "i=%d", i); + } + + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++) { + if (param->spkid_pulls[i] < 0) + break; + + KUNIT_EXPECT_EQ_MSG(test, cs35l56->base.onchip_spkid_pulls[i], + param->spkid_pulls[i], "i=%d", i); + } +} + +static int cs35l56_test_dummy_read_onchip_spkid(struct cs35l56_base *cs35l56_base) +{ + struct kunit *test = kunit_get_current_test(); + struct cs35l56_test_priv *priv = test->priv; + + priv->read_onchip_spkid_called = true; + + return 4; +} + +static int cs35l56_test_dummy_configure_onchip_spkid_pads(struct cs35l56_base *cs35l56_base) +{ + struct kunit *test = kunit_get_current_test(); + struct cs35l56_test_priv *priv = test->priv; + + priv->configure_onchip_spkid_pads_called = true; + + return 0; +} + +static void cs35l56_test_set_fw_name_reads_onchip_spkid(struct kunit *test) +{ + struct cs35l56_test_priv *priv = test->priv; + struct cs35l56_private *cs35l56 = priv->cs35l56_priv; + + /* Provide some on-chip GPIOs for spkid */ + cs35l56->base.onchip_spkid_gpios[0] = 1; + cs35l56->base.num_onchip_spkid_gpios = 1; + + cs35l56->speaker_id = -ENOENT; + + kunit_activate_static_stub(test, + cs35l56_configure_onchip_spkid_pads, + cs35l56_test_dummy_configure_onchip_spkid_pads); + kunit_activate_static_stub(test, + cs35l56_read_onchip_spkid, + cs35l56_test_dummy_read_onchip_spkid); + + priv->configure_onchip_spkid_pads_called = false; + priv->read_onchip_spkid_called = false; + KUNIT_EXPECT_EQ(test, cs35l56_set_fw_name(cs35l56->component), 0); + KUNIT_EXPECT_TRUE(test, priv->configure_onchip_spkid_pads_called); + KUNIT_EXPECT_TRUE(test, priv->read_onchip_spkid_called); + KUNIT_EXPECT_EQ(test, cs35l56->speaker_id, + cs35l56_test_dummy_read_onchip_spkid(&cs35l56->base)); +} + +static void cs35l56_test_set_fw_name_preserves_spkid_with_onchip_gpios(struct kunit *test) +{ + struct cs35l56_test_priv *priv = test->priv; + struct cs35l56_private *cs35l56 = priv->cs35l56_priv; + + /* Provide some on-chip GPIOs for spkid */ + cs35l56->base.onchip_spkid_gpios[0] = 1; + cs35l56->base.num_onchip_spkid_gpios = 1; + + /* Simulate that the driver already got a spkid from somewhere */ + cs35l56->speaker_id = 15; + + KUNIT_EXPECT_EQ(test, cs35l56_set_fw_name(cs35l56->component), 0); + KUNIT_EXPECT_EQ(test, cs35l56->speaker_id, 15); +} + +static void cs35l56_test_set_fw_name_preserves_spkid_without_onchip_gpios(struct kunit *test) +{ + struct cs35l56_test_priv *priv = test->priv; + struct cs35l56_private *cs35l56 = priv->cs35l56_priv; + + cs35l56->base.num_onchip_spkid_gpios = 0; + + /* Simulate that the driver already got a spkid from somewhere */ + cs35l56->speaker_id = 15; + + KUNIT_EXPECT_EQ(test, cs35l56_set_fw_name(cs35l56->component), 0); + KUNIT_EXPECT_EQ(test, cs35l56->speaker_id, 15); +} + static int cs35l56_test_case_init_common(struct kunit *test) { struct cs35l56_test_priv *priv; @@ -263,6 +479,7 @@ static int cs35l56_test_case_init_common(struct kunit *test) cs35l56->component = kunit_kzalloc(test, sizeof(*cs35l56->component), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, cs35l56->component); cs35l56->component->dev = cs35l56->base.dev; + snd_soc_component_set_drvdata(cs35l56->component, cs35l56); cs35l56->component->card = kunit_kzalloc(test, sizeof(*cs35l56->component->card), GFP_KERNEL); @@ -299,6 +516,50 @@ static int cs35l56_test_case_init_soundwire(struct kunit *test) return 0; } +static void cs35l56_test_gpio_param_desc(const struct cs35l56_test_param *param, char *desc) +{ + DECLARE_SEQ_BUF(gpios, 1 + (2 * ARRAY_SIZE(param->spkid_gpios))); + DECLARE_SEQ_BUF(pulls, 1 + (2 * ARRAY_SIZE(param->spkid_pulls))); + int i; + + for (i = 0; i < ARRAY_SIZE(param->spkid_gpios); i++) { + if (param->spkid_gpios[i] < 0) + break; + + seq_buf_printf(&gpios, "%s%d", (i == 0) ? "" : ",", param->spkid_gpios[i]); + } + + for (i = 0; i < ARRAY_SIZE(param->spkid_pulls); i++) { + if (param->spkid_pulls[i] < 0) + break; + + seq_buf_printf(&pulls, "%s%d", (i == 0) ? "" : ",", param->spkid_pulls[i]); + } + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "gpios:{%s} pulls:{%s}", + seq_buf_str(&gpios), seq_buf_str(&pulls)); +} + +static const struct cs35l56_test_param cs35l56_test_onchip_spkid_cases[] = { + { .spkid_gpios = { 1, -1 }, .spkid_pulls = { 1, -1 }, }, + { .spkid_gpios = { 1, -1 }, .spkid_pulls = { 2, -1 }, }, + + { .spkid_gpios = { 7, -1 }, .spkid_pulls = { 1, -1 }, }, + { .spkid_gpios = { 7, -1 }, .spkid_pulls = { 2, -1 }, }, + + { .spkid_gpios = { 1, 7, -1 }, .spkid_pulls = { 1, 1, -1 }, }, + { .spkid_gpios = { 1, 7, -1 }, .spkid_pulls = { 2, 2, -1 }, }, + + { .spkid_gpios = { 7, 1, -1 }, .spkid_pulls = { 1, 1, -1 }, }, + { .spkid_gpios = { 7, 1, -1 }, .spkid_pulls = { 2, 2, -1 }, }, + + { .spkid_gpios = { 3, 7, 1, -1 }, .spkid_pulls = { 1, 1, 1, -1 }, }, + { .spkid_gpios = { 3, 7, 1, -1 }, .spkid_pulls = { 2, 2, 2, -1 }, }, +}; +KUNIT_ARRAY_PARAM(cs35l56_test_onchip_spkid, + cs35l56_test_onchip_spkid_cases, + cs35l56_test_gpio_param_desc); + static void cs35l56_test_type_rev_param_desc(const struct cs35l56_test_param *param, char *desc) { @@ -331,6 +592,13 @@ static struct kunit_case cs35l56_test_cases_soundwire[] = { cs35l56_test_type_rev_ex_b0_gen_params), KUNIT_CASE(cs35l56_test_l56_b0_ssidexv2_ignored_suffix_sdw), + KUNIT_CASE_PARAM(cs35l56_test_parse_xu_onchip_spkid, + cs35l56_test_onchip_spkid_gen_params), + + KUNIT_CASE(cs35l56_test_set_fw_name_reads_onchip_spkid), + KUNIT_CASE(cs35l56_test_set_fw_name_preserves_spkid_with_onchip_gpios), + KUNIT_CASE(cs35l56_test_set_fw_name_preserves_spkid_without_onchip_gpios), + { } /* terminator */ }; @@ -339,6 +607,10 @@ static struct kunit_case cs35l56_test_cases_not_soundwire[] = { KUNIT_CASE_PARAM(cs35l56_test_ssidexv2_suffix_i2cspi, cs35l56_test_type_rev_all_gen_params), + KUNIT_CASE(cs35l56_test_set_fw_name_reads_onchip_spkid), + KUNIT_CASE(cs35l56_test_set_fw_name_preserves_spkid_with_onchip_gpios), + KUNIT_CASE(cs35l56_test_set_fw_name_preserves_spkid_without_onchip_gpios), + { } /* terminator */ }; @@ -360,6 +632,7 @@ kunit_test_suites( ); MODULE_IMPORT_NS("SND_SOC_CS_AMP_LIB"); +MODULE_IMPORT_NS("SND_SOC_CS35L56_SHARED"); MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); MODULE_DESCRIPTION("KUnit test for Cirrus Logic cs35l56 codec driver"); MODULE_AUTHOR("Richard Fitzgerald "); diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c index 31dd2f7b285872..2ff8b172b76ec6 100644 --- a/sound/soc/codecs/cs35l56.c +++ b/sound/soc/codecs/cs35l56.c @@ -1179,15 +1179,28 @@ VISIBLE_IF_KUNIT int cs35l56_set_fw_suffix(struct cs35l56_private *cs35l56) } EXPORT_SYMBOL_IF_KUNIT(cs35l56_set_fw_suffix); -static int cs35l56_component_probe(struct snd_soc_component *component) +VISIBLE_IF_KUNIT int cs35l56_set_fw_name(struct snd_soc_component *component) { - struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); - struct dentry *debugfs_root = component->debugfs_root; unsigned short vendor, device; int ret; - BUILD_BUG_ON(ARRAY_SIZE(cs35l56_tx_input_texts) != ARRAY_SIZE(cs35l56_tx_input_values)); + if ((cs35l56->speaker_id < 0) && cs35l56->base.num_onchip_spkid_gpios) { + PM_RUNTIME_ACQUIRE(cs35l56->base.dev, pm); + ret = PM_RUNTIME_ACQUIRE_ERR(&pm); + if (ret) + return ret; + + ret = cs35l56_configure_onchip_spkid_pads(&cs35l56->base); + if (ret) + return ret; + + ret = cs35l56_read_onchip_spkid(&cs35l56->base); + if (ret < 0) + return ret; + + cs35l56->speaker_id = ret; + } if (!cs35l56->dsp.system_name && (snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) { @@ -1208,6 +1221,19 @@ static int cs35l56_component_probe(struct snd_soc_component *component) return -ENOMEM; } + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(cs35l56_set_fw_name); + +static int cs35l56_component_probe(struct snd_soc_component *component) +{ + struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); + struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); + struct dentry *debugfs_root = component->debugfs_root; + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(cs35l56_tx_input_texts) != ARRAY_SIZE(cs35l56_tx_input_values)); + if (!wait_for_completion_timeout(&cs35l56->init_completion, msecs_to_jiffies(5000))) { dev_err(cs35l56->base.dev, "%s: init_completion timed out\n", __func__); @@ -1219,6 +1245,10 @@ static int cs35l56_component_probe(struct snd_soc_component *component) return -ENOMEM; cs35l56->component = component; + ret = cs35l56_set_fw_name(component); + if (ret) + return ret; + ret = cs35l56_set_fw_suffix(cs35l56); if (ret) return ret; @@ -1532,6 +1562,105 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) return 0; } +static int cs35l56_read_fwnode_u32_array(struct device *dev, + struct fwnode_handle *parent_node, + const char *prop_name, + int max_count, + u32 *dest) +{ + int count, ret; + + count = fwnode_property_count_u32(parent_node, prop_name); + if ((count == 0) || (count == -EINVAL) || (count == -ENODATA)) { + dev_dbg(dev, "%s not found in %s\n", prop_name, fwnode_get_name(parent_node)); + return 0; + } + + if (count < 0) { + dev_err(dev, "Get %s error:%d\n", prop_name, count); + return count; + } + + if (count > max_count) { + dev_err(dev, "%s too many entries (%d)\n", prop_name, count); + return -EOVERFLOW; + } + + ret = fwnode_property_read_u32_array(parent_node, prop_name, dest, count); + if (ret) { + dev_err(dev, "Error reading %s: %d\n", prop_name, ret); + return ret; + } + + return count; +} + +static int cs35l56_process_xu_onchip_speaker_id(struct cs35l56_private *cs35l56, + struct fwnode_handle *ext_node) +{ + static const char * const gpio_name = "01fa-spk-id-gpios-onchip"; + static const char * const pull_name = "01fa-spk-id-gpios-onchip-pull"; + u32 gpios[5], pulls[5]; + int num_gpios, num_pulls; + int ret; + + static_assert(ARRAY_SIZE(gpios) == ARRAY_SIZE(cs35l56->base.onchip_spkid_gpios)); + static_assert(ARRAY_SIZE(pulls) == ARRAY_SIZE(cs35l56->base.onchip_spkid_pulls)); + + num_gpios = cs35l56_read_fwnode_u32_array(cs35l56->base.dev, ext_node, gpio_name, + ARRAY_SIZE(gpios), gpios); + if (num_gpios < 1) + return num_gpios; + + num_pulls = cs35l56_read_fwnode_u32_array(cs35l56->base.dev, ext_node, pull_name, + ARRAY_SIZE(pulls), pulls); + if (num_pulls < 0) + return num_pulls; + + if (num_pulls != num_gpios) { + dev_warn(cs35l56->base.dev, "%s count(%d) != %s count(%d)\n", + pull_name, num_pulls, gpio_name, num_gpios); + } + + ret = cs35l56_check_and_save_onchip_spkid_gpios(&cs35l56->base, + gpios, num_gpios, + pulls, num_pulls); + if (ret) { + return dev_err_probe(cs35l56->base.dev, ret, "Error in %s/%s\n", + gpio_name, pull_name); + } + + return 0; +} + +VISIBLE_IF_KUNIT int cs35l56_process_xu_properties(struct cs35l56_private *cs35l56) +{ + struct fwnode_handle *ext_node = NULL; + struct fwnode_handle *link; + int ret; + + if (!cs35l56->sdw_peripheral) + return 0; + + fwnode_for_each_child_node(dev_fwnode(cs35l56->base.dev), link) { + ext_node = fwnode_get_named_child_node(link, + "mipi-sdca-function-expansion-subproperties"); + if (ext_node) { + fwnode_handle_put(link); + break; + } + } + + if (!ext_node) + return 0; + + ret = cs35l56_process_xu_onchip_speaker_id(cs35l56, ext_node); + fwnode_handle_put(ext_node); + + return ret; +} +EXPORT_SYMBOL_IF_KUNIT(cs35l56_process_xu_properties); + static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56) { struct device *dev = cs35l56->base.dev; @@ -1712,6 +1841,10 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) if (ret != 0) goto err; + ret = cs35l56_process_xu_properties(cs35l56); + if (ret) + goto err; + ret = cs35l56_dsp_init(cs35l56); if (ret < 0) { dev_err_probe(cs35l56->base.dev, ret, "DSP init failed\n"); diff --git a/sound/soc/codecs/cs35l56.h b/sound/soc/codecs/cs35l56.h index 7187885a13c1b4..691f857d0bd8b2 100644 --- a/sound/soc/codecs/cs35l56.h +++ b/sound/soc/codecs/cs35l56.h @@ -76,6 +76,8 @@ void cs35l56_remove(struct cs35l56_private *cs35l56); #if IS_ENABLED(CONFIG_KUNIT) int cs35l56_set_fw_suffix(struct cs35l56_private *cs35l56); +int cs35l56_set_fw_name(struct snd_soc_component *component); +int cs35l56_process_xu_properties(struct cs35l56_private *cs35l56); #endif #endif /* ifndef CS35L56_H */ diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 77dfc83a3c0131..d8cdd37e9112b2 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c @@ -528,7 +528,7 @@ static int cs4271_soc_resume(struct snd_soc_component *component) ret = clk_prepare_enable(cs4271->clk); if (ret) { dev_err(component->dev, "Failed to enable clk: %d\n", ret); - return ret; + goto err_disable_regulators; } /* Do a proper reset after power up */ @@ -537,15 +537,21 @@ static int cs4271_soc_resume(struct snd_soc_component *component) /* Restore codec state */ ret = regcache_sync(cs4271->regmap); if (ret < 0) - return ret; + goto err_disable_clk; /* then disable the power-down bit */ ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2, CS4271_MODE2_PDN, 0); if (ret < 0) - return ret; + goto err_disable_clk; return 0; + +err_disable_clk: + clk_disable_unprepare(cs4271->clk); +err_disable_regulators: + regulator_bulk_disable(ARRAY_SIZE(cs4271->supplies), cs4271->supplies); + return ret; } #else #define cs4271_soc_suspend NULL diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c index b719d62635a0eb..b83bc4de1301d1 100644 --- a/sound/soc/codecs/cs42l43-jack.c +++ b/sound/soc/codecs/cs42l43-jack.c @@ -496,7 +496,23 @@ void cs42l43_bias_sense_timeout(struct work_struct *work) pm_runtime_put_autosuspend(priv->dev); } -static void cs42l43_start_load_detect(struct cs42l43_codec *priv) +static const struct reg_sequence cs42l43_3pole_patch[] = { + { 0x4000, 0x00000055 }, + { 0x4000, 0x000000AA }, + { 0x17420, 0x8500F300 }, + { 0x17424, 0x36003E00 }, + { 0x4000, 0x00000000 }, +}; + +static const struct reg_sequence cs42l43_4pole_patch[] = { + { 0x4000, 0x00000055 }, + { 0x4000, 0x000000AA }, + { 0x17420, 0x7800E600 }, + { 0x17424, 0x36003800 }, + { 0x4000, 0x00000000 }, +}; + +static void cs42l43_start_load_detect(struct cs42l43_codec *priv, bool mic) { struct cs42l43 *cs42l43 = priv->core; @@ -520,6 +536,15 @@ static void cs42l43_start_load_detect(struct cs42l43_codec *priv) dev_err(priv->dev, "Load detect HP power down timed out\n"); } + if (mic) + regmap_multi_reg_write_bypassed(cs42l43->regmap, + cs42l43_4pole_patch, + ARRAY_SIZE(cs42l43_4pole_patch)); + else + regmap_multi_reg_write_bypassed(cs42l43->regmap, + cs42l43_3pole_patch, + ARRAY_SIZE(cs42l43_3pole_patch)); + regmap_update_bits(cs42l43->regmap, CS42L43_BLOCK_EN3, CS42L43_ADC1_EN_MASK | CS42L43_ADC2_EN_MASK, 0); regmap_update_bits(cs42l43->regmap, CS42L43_DACCNFG2, CS42L43_HP_HPF_EN_MASK, 0); @@ -598,7 +623,7 @@ static int cs42l43_run_load_detect(struct cs42l43_codec *priv, bool mic) reinit_completion(&priv->load_detect); - cs42l43_start_load_detect(priv); + cs42l43_start_load_detect(priv, mic); time_left = wait_for_completion_timeout(&priv->load_detect, msecs_to_jiffies(CS42L43_LOAD_TIMEOUT_MS)); cs42l43_stop_load_detect(priv); @@ -622,11 +647,11 @@ static int cs42l43_run_load_detect(struct cs42l43_codec *priv, bool mic) } switch (val & CS42L43_AMP3_RES_DET_MASK) { - case 0x0: // low impedance - case 0x1: // high impedance + case 0x0: // < 22 Ohm impedance + case 0x1: // < 150 Ohm impedance + case 0x2: // < 1000 Ohm impedance return CS42L43_JACK_HEADPHONE; - case 0x2: // lineout - case 0x3: // Open circuit + case 0x3: // > 1000 Ohm impedance return CS42L43_JACK_LINEOUT; default: return -EINVAL; diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 38340f2922826b..9838fe42cb6fca 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -163,12 +163,11 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, if (es8328->deemph == deemph) return 0; + es8328->deemph = deemph; ret = es8328_set_deemph(component); if (ret < 0) return ret; - es8328->deemph = deemph; - return 1; } @@ -530,7 +529,9 @@ static int es8328_hw_params(struct snd_pcm_substream *substream, return ret; es8328->playback_fs = params_rate(params); - es8328_set_deemph(component); + ret = es8328_set_deemph(component); + if (ret < 0) + return ret; } else { ret = snd_soc_component_update_bits(component, ES8328_ADCCONTROL4, ES8328_ADCCONTROL4_ADCWL_MASK, @@ -591,21 +592,26 @@ static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai, { struct snd_soc_component *component = codec_dai->component; struct es8328_priv *es8328 = snd_soc_component_get_drvdata(component); + int ret; u8 dac_mode = 0; u8 adc_mode = 0; switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_CBP_CFP: /* Master serial port mode, with BCLK generated automatically */ - snd_soc_component_update_bits(component, ES8328_MASTERMODE, - ES8328_MASTERMODE_MSC, - ES8328_MASTERMODE_MSC); + ret = snd_soc_component_update_bits(component, ES8328_MASTERMODE, + ES8328_MASTERMODE_MSC, + ES8328_MASTERMODE_MSC); + if (ret < 0) + return ret; es8328->provider = true; break; case SND_SOC_DAIFMT_CBC_CFC: /* Slave serial port mode */ - snd_soc_component_update_bits(component, ES8328_MASTERMODE, - ES8328_MASTERMODE_MSC, 0); + ret = snd_soc_component_update_bits(component, ES8328_MASTERMODE, + ES8328_MASTERMODE_MSC, 0); + if (ret < 0) + return ret; es8328->provider = false; break; default: @@ -634,10 +640,17 @@ static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai, if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) return -EINVAL; - snd_soc_component_update_bits(component, ES8328_DACCONTROL1, - ES8328_DACCONTROL1_DACFORMAT_MASK, dac_mode); - snd_soc_component_update_bits(component, ES8328_ADCCONTROL4, - ES8328_ADCCONTROL4_ADCFORMAT_MASK, adc_mode); + ret = snd_soc_component_update_bits(component, ES8328_DACCONTROL1, + ES8328_DACCONTROL1_DACFORMAT_MASK, + dac_mode); + if (ret < 0) + return ret; + + ret = snd_soc_component_update_bits(component, ES8328_ADCCONTROL4, + ES8328_ADCCONTROL4_ADCFORMAT_MASK, + adc_mode); + if (ret < 0) + return ret; return 0; } @@ -646,6 +659,7 @@ static int es8328_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); + int ret; switch (level) { case SND_SOC_BIAS_ON: @@ -653,43 +667,56 @@ static int es8328_set_bias_level(struct snd_soc_component *component, case SND_SOC_BIAS_PREPARE: /* VREF, VMID=2x50k, digital enabled */ - snd_soc_component_write(component, ES8328_CHIPPOWER, 0); - snd_soc_component_update_bits(component, ES8328_CONTROL1, - ES8328_CONTROL1_VMIDSEL_MASK | - ES8328_CONTROL1_ENREF, - ES8328_CONTROL1_VMIDSEL_50k | - ES8328_CONTROL1_ENREF); + ret = snd_soc_component_write(component, ES8328_CHIPPOWER, 0); + if (ret < 0) + return ret; + + ret = snd_soc_component_update_bits(component, ES8328_CONTROL1, + ES8328_CONTROL1_VMIDSEL_MASK | + ES8328_CONTROL1_ENREF, + ES8328_CONTROL1_VMIDSEL_50k | + ES8328_CONTROL1_ENREF); + if (ret < 0) + return ret; break; case SND_SOC_BIAS_STANDBY: if (snd_soc_dapm_get_bias_level(dapm) == SND_SOC_BIAS_OFF) { - snd_soc_component_update_bits(component, ES8328_CONTROL1, - ES8328_CONTROL1_VMIDSEL_MASK | - ES8328_CONTROL1_ENREF, - ES8328_CONTROL1_VMIDSEL_5k | - ES8328_CONTROL1_ENREF); + ret = snd_soc_component_update_bits(component, ES8328_CONTROL1, + ES8328_CONTROL1_VMIDSEL_MASK | + ES8328_CONTROL1_ENREF, + ES8328_CONTROL1_VMIDSEL_5k | + ES8328_CONTROL1_ENREF); + if (ret < 0) + return ret; /* Charge caps */ msleep(100); } - snd_soc_component_write(component, ES8328_CONTROL2, - ES8328_CONTROL2_OVERCURRENT_ON | - ES8328_CONTROL2_THERMAL_SHUTDOWN_ON); + ret = snd_soc_component_write(component, ES8328_CONTROL2, + ES8328_CONTROL2_OVERCURRENT_ON | + ES8328_CONTROL2_THERMAL_SHUTDOWN_ON); + if (ret < 0) + return ret; /* VREF, VMID=2*500k, digital stopped */ - snd_soc_component_update_bits(component, ES8328_CONTROL1, - ES8328_CONTROL1_VMIDSEL_MASK | - ES8328_CONTROL1_ENREF, - ES8328_CONTROL1_VMIDSEL_500k | - ES8328_CONTROL1_ENREF); + ret = snd_soc_component_update_bits(component, ES8328_CONTROL1, + ES8328_CONTROL1_VMIDSEL_MASK | + ES8328_CONTROL1_ENREF, + ES8328_CONTROL1_VMIDSEL_500k | + ES8328_CONTROL1_ENREF); + if (ret < 0) + return ret; break; case SND_SOC_BIAS_OFF: - snd_soc_component_update_bits(component, ES8328_CONTROL1, - ES8328_CONTROL1_VMIDSEL_MASK | - ES8328_CONTROL1_ENREF, - 0); + ret = snd_soc_component_update_bits(component, ES8328_CONTROL1, + ES8328_CONTROL1_VMIDSEL_MASK | + ES8328_CONTROL1_ENREF, + 0); + if (ret < 0) + return ret; break; } return 0; @@ -744,12 +771,9 @@ static int es8328_suspend(struct snd_soc_component *component) static int es8328_resume(struct snd_soc_component *component) { - struct regmap *regmap = dev_get_regmap(component->dev, NULL); - struct es8328_priv *es8328; + struct es8328_priv *es8328 = snd_soc_component_get_drvdata(component); int ret; - es8328 = snd_soc_component_get_drvdata(component); - ret = clk_prepare_enable(es8328->clk); if (ret) { dev_err(component->dev, "unable to enable clock\n"); @@ -760,17 +784,23 @@ static int es8328_resume(struct snd_soc_component *component) es8328->supplies); if (ret) { dev_err(component->dev, "unable to enable regulators\n"); - return ret; + goto err_clk; } - regcache_mark_dirty(regmap); - ret = regcache_sync(regmap); + regcache_mark_dirty(es8328->regmap); + ret = regcache_sync(es8328->regmap); if (ret) { dev_err(component->dev, "unable to sync regcache\n"); - return ret; + goto err_regulators; } return 0; + +err_regulators: + regulator_bulk_disable(ARRAY_SIZE(es8328->supplies), es8328->supplies); +err_clk: + clk_disable_unprepare(es8328->clk); + return ret; } static int es8328_component_probe(struct snd_soc_component *component) diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c index e849c977266525..50f65662e1433e 100644 --- a/sound/soc/codecs/rt1320-sdw.c +++ b/sound/soc/codecs/rt1320-sdw.c @@ -203,6 +203,7 @@ static const struct reg_sequence rt1320_vc_blind_write[] = { { 0x3fc2bfc2, 0x00 }, { 0x3fc2bfc1, 0x00 }, { 0x3fc2bfc0, 0x07 }, + { 0x1000cc46, 0x00 }, { 0x0000d486, 0x43 }, { SDW_SDCA_CTL(FUNC_NUM_AMP, RT1320_SDCA_ENT_PDE23, RT1320_SDCA_CTL_REQ_POWER_STATE, 0), 0x00 }, { 0x1000db00, 0x07 }, @@ -354,6 +355,7 @@ static const struct reg_sequence rt1321_blind_write[] = { { 0x0000d73d, 0xd7 }, { 0x0000d73e, 0x00 }, { 0x0000d73f, 0x10 }, + { 0x1000cd56, 0x00 }, { 0x3fc2dfc3, 0x00 }, { 0x3fc2dfc2, 0x00 }, { 0x3fc2dfc1, 0x00 }, diff --git a/sound/soc/codecs/rt721-sdca.c b/sound/soc/codecs/rt721-sdca.c index 8233532a1752a3..35960c22522491 100644 --- a/sound/soc/codecs/rt721-sdca.c +++ b/sound/soc/codecs/rt721-sdca.c @@ -245,12 +245,12 @@ static void rt721_sdca_jack_preset(struct rt721_sdca_priv *rt721) regmap_write(rt721->mbq_regmap, 0x5b10007, 0x2000); regmap_write(rt721->mbq_regmap, 0x5B10017, 0x1b0f); rt_sdca_index_write(rt721->mbq_regmap, RT721_CBJ_CTRL, - RT721_CBJ_A0_GAT_CTRL1, 0x2a02); + RT721_CBJ_A0_GAT_CTRL1, 0x2205); rt_sdca_index_write(rt721->mbq_regmap, RT721_CAP_PORT_CTRL, RT721_HP_AMP_2CH_CAL4, 0xa105); rt_sdca_index_write(rt721->mbq_regmap, RT721_VENDOR_ANA_CTL, RT721_UAJ_TOP_TCON14, 0x3b33); - regmap_write(rt721->mbq_regmap, 0x310400, 0x3023); + regmap_write(rt721->mbq_regmap, 0x310400, 0x3043); rt_sdca_index_write(rt721->mbq_regmap, RT721_VENDOR_ANA_CTL, RT721_UAJ_TOP_TCON14, 0x3f33); rt_sdca_index_write(rt721->mbq_regmap, RT721_VENDOR_ANA_CTL, diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index 78fd0a5dc6f22e..0e084c3a162d9c 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -2,7 +2,7 @@ // // tas2781-fmwlib.c -- TASDEVICE firmware support // -// Copyright 2023 - 2025 Texas Instruments, Inc. +// Copyright 2023 - 2026 Texas Instruments, Inc. // // Author: Shenghao Ding // Author: Baojun Xu @@ -80,6 +80,14 @@ #define POST_SOFTWARE_RESET_DEVICE_C 0x47 #define POST_SOFTWARE_RESET_DEVICE_D 0x48 +#define COPY_CAL_DATA(i) \ + do { \ + calbin_data[i + 1] = data[7]; \ + calbin_data[i + 2] = data[8]; \ + calbin_data[i + 3] = data[9]; \ + calbin_data[i + 4] = data[10]; \ + } while (0) + struct tas_crc { unsigned char offset; unsigned char len; @@ -1952,23 +1960,6 @@ static int dspfw_default_callback(struct tasdevice_priv *tas_priv, return rc; } -static int load_calib_data(struct tasdevice_priv *tas_priv, - struct tasdevice_data *dev_data) -{ - struct tasdev_blk *block; - unsigned int i; - int ret = 0; - - for (i = 0; i < dev_data->nr_blk; i++) { - block = &(dev_data->dev_blks[i]); - ret = tasdevice_load_block(tas_priv, block); - if (ret < 0) - break; - } - - return ret; -} - static int fw_parse_header(struct tasdevice_priv *tas_priv, struct tasdevice_fw *tas_fmw, const struct firmware *fmw, int offset) { @@ -2029,6 +2020,103 @@ static int fw_parse_variable_hdr_cal(struct tasdevice_priv *tas_priv, return offset; } +static inline int check_cal_bin_data(struct device *dev, + const unsigned char *data, const char *name) +{ + if (data[2] != 0x85 || data[1] != 4) { + dev_err(dev, "Invalid cal bin file in %s\n", name); + return -1; + } + return 0; +} + +static void calbin_conversion(struct tasdevice_priv *priv, + struct tasdevice_fw *tas_fmw) +{ + struct calidata *cali_data = &priv->cali_data; + unsigned char *calbin_data = cali_data->data; + struct cali_reg *p = &cali_data->cali_reg_array; + struct tasdevice_calibration *calibration; + struct tasdevice_data *img_data; + struct tasdev_blk *blk; + unsigned char *data; + int chn, k; + + if (cali_data->total_sz != priv->ndev * + (cali_data->cali_dat_sz_per_dev + 1)) { + dev_err(priv->dev, "%s: cali_data size err\n", + __func__); + return; + } + calibration = &(tas_fmw->calibrations[0]); + img_data = &(calibration->dev_data); + + if (img_data->nr_blk != 1) { + dev_err(priv->dev, "%s: Invalid nr_blk, wrong cal bin\n", + __func__); + return; + } + + blk = &(img_data->dev_blks[0]); + if (blk->nr_cmds != 15) { + dev_err(priv->dev, "%s: Invalid nr_cmds, wrong cal bin\n", + __func__); + return; + } + + switch (blk->type) { + case COEFF_DEVICE_A: + chn = 0; + break; + case COEFF_DEVICE_B: + chn = 1; + break; + case COEFF_DEVICE_C: + chn = 2; + break; + case COEFF_DEVICE_D: + chn = 3; + break; + default: + dev_err(priv->dev, "%s: Other Type = 0x%02x\n", + __func__, blk->type); + return; + } + k = chn * (cali_data->cali_dat_sz_per_dev + 1); + + data = blk->data; + if (check_cal_bin_data(priv->dev, data, "r0_reg") < 0) + return; + p->r0_reg = TASDEVICE_REG(data[4], data[5], data[6]); + COPY_CAL_DATA(k); + + data = blk->data + 12; + if (check_cal_bin_data(priv->dev, data, "r0_low_reg") < 0) + return; + p->r0_low_reg = TASDEVICE_REG(data[4], data[5], data[6]); + COPY_CAL_DATA(k + 4); + + data = blk->data + 24; + if (check_cal_bin_data(priv->dev, data, "invr0_reg") < 0) + return; + p->invr0_reg = TASDEVICE_REG(data[4], data[5], data[6]); + COPY_CAL_DATA(k + 8); + + data = blk->data + 36; + if (check_cal_bin_data(priv->dev, data, "pow_reg") < 0) + return; + p->pow_reg = TASDEVICE_REG(data[4], data[5], data[6]); + COPY_CAL_DATA(k + 12); + + data = blk->data + 48; + if (check_cal_bin_data(priv->dev, data, "tlimit_reg") < 0) + return; + p->tlimit_reg = TASDEVICE_REG(data[4], data[5], data[6]); + COPY_CAL_DATA(k + 16); + + calbin_data[k] = chn; +} + /* When calibrated data parsing error occurs, DSP can still work with default * calibrated data, memory resource related to calibrated data will be * released in the tasdevice_codec_remove. @@ -2086,6 +2174,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv, goto out; } + calbin_conversion(tas_priv, tas_fmw); out: return offset; } @@ -2371,25 +2460,12 @@ static int tasdevice_load_data(struct tasdevice_priv *tas_priv, static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i) { - struct tasdevice_fw *cal_fmw = priv->tasdevice[i].cali_data_fmw; struct calidata *cali_data = &priv->cali_data; struct cali_reg *p = &cali_data->cali_reg_array; unsigned char *data = cali_data->data; - struct tasdevice_calibration *cal; int k = i * (cali_data->cali_dat_sz_per_dev + 1); int rc; - /* Load the calibrated data from cal bin file */ - if (!priv->is_user_space_calidata && cal_fmw) { - cal = cal_fmw->calibrations; - - if (cal) - load_calib_data(priv, &cal->dev_data); - return; - } - if (!priv->is_user_space_calidata) - return; - /* load calibrated data from user space */ if (data[k] != i) { dev_err(priv->dev, "%s: no cal-data for dev %d from usr-spc\n", __func__, i); diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c index d1c76ab0144d19..41b89fcc69c370 100644 --- a/sound/soc/codecs/tas2781-i2c.c +++ b/sound/soc/codecs/tas2781-i2c.c @@ -2,7 +2,7 @@ // // ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier // -// Copyright (C) 2022 - 2025 Texas Instruments Incorporated +// Copyright (C) 2022 - 2026 Texas Instruments Incorporated // https://www.ti.com // // The TAS2563/TAS2781 driver implements a flexible and configurable @@ -255,8 +255,6 @@ static int tasdev_cali_data_get(struct snd_kcontrol *kcontrol, int rc; guard(mutex)(&priv->codec_lock); - if (!priv->is_user_space_calidata) - return -1; if (!p->r0_reg) return -1; @@ -654,7 +652,6 @@ static int tasdev_cali_data_put(struct snd_kcontrol *kcontrol, } } i += 2; - priv->is_user_space_calidata = true; if (priv->dspbin_typ == TASDEV_BASIC) { p->r0_reg = TASDEVICE_REG(src[i], src[i + 1], src[i + 2]); @@ -1444,7 +1441,11 @@ static int tasdevice_create_cali_ctrls(struct tasdevice_priv *priv) GFP_KERNEL); if (!cali_data->data) return -ENOMEM; - + /* + * Set to an invalid value before the calibrated data is stored into + * it, for the default value is 0, which means the first device. + */ + cali_data->data[0] = 0xff; if (priv->chip_id == TAS2781) { struct soc_bytes_ext *ext_cali_start; char *cali_start_name; diff --git a/sound/soc/codecs/wcd939x-sdw.c b/sound/soc/codecs/wcd939x-sdw.c index d0dacf3eef9e2d..95f4be287e79b6 100644 --- a/sound/soc/codecs/wcd939x-sdw.c +++ b/sound/soc/codecs/wcd939x-sdw.c @@ -1408,7 +1408,6 @@ static void wcd9390_remove(struct sdw_slave *pdev) struct device *dev = &pdev->dev; component_del(dev, &wcd_sdw_component_ops); - } static const struct sdw_device_id wcd9390_slave_id[] = { diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 92fb16f7be4563..5fda9b647c7049 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -106,6 +106,12 @@ static unsigned char clk_map_imx8qxp[2][ASRC_CLK_MAP_LEN] = { }, }; +static unsigned char clk_map_imx952[ASRC_CLK_MAP_LEN] = { + 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0x0, + 0x0, 0x1, 0x2, 0x3, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0x4, 0x5, 0x6, 0x8, 0xf, 0xf, + 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0x7, 0x9, 0xa, 0xb, 0xc, 0xd, 0xf, 0xf, 0xf, 0xf, +}; + /* * According to RM, the divider range is 1 ~ 8, * prescaler is power of 2 from 1 ~ 128. @@ -1078,6 +1084,26 @@ static unsigned int fsl_asrc_get_output_fifo_size(struct fsl_asrc_pair *pair) return val >> ASRFSTi_OUTPUT_FIFO_SHIFT; } +static bool fsl_asrc_m2m_output_ready(struct fsl_asrc_pair *pair) +{ + struct fsl_asrc *asrc = pair->asrc; + enum asrc_pair_index index = pair->index; + u32 val; + int ret; + + /* Check output fifo status if it exceeds the watermark. */ + ret = regmap_read_poll_timeout(asrc->regmap, REG_ASRFST(index), val, + (ASRFSTi_OUTPUT_FIFO_FILL(val) >= ASRC_M2M_OUTPUTFIFO_WML), + 1, 1000); + + if (ret) { + pair_warn("output is not ready\n"); + return false; + } + + return true; +} + static int fsl_asrc_m2m_prepare(struct fsl_asrc_pair *pair) { struct fsl_asrc_pair_priv *pair_priv = pair->private; @@ -1275,6 +1301,7 @@ static int fsl_asrc_probe(struct platform_device *pdev) asrc_priv->soc = of_device_get_match_data(&pdev->dev); asrc->use_edma = asrc_priv->soc->use_edma; + asrc->start_before_dma = asrc_priv->soc->start_before_dma; asrc->get_dma_channel = fsl_asrc_get_dma_channel; asrc->request_pair = fsl_asrc_request_pair; asrc->release_pair = fsl_asrc_release_pair; @@ -1289,6 +1316,7 @@ static int fsl_asrc_probe(struct platform_device *pdev) asrc->m2m_get_maxburst = fsl_asrc_m2m_get_maxburst; asrc->m2m_pair_resume = fsl_asrc_m2m_pair_resume; asrc->m2m_get_cap = fsl_asrc_m2m_get_cap; + asrc->m2m_output_ready = fsl_asrc_m2m_output_ready; if (of_device_is_compatible(np, "fsl,imx35-asrc")) { asrc_priv->clk_map[IN] = input_clk_map_imx35; @@ -1315,6 +1343,9 @@ static int fsl_asrc_probe(struct platform_device *pdev) asrc_priv->clk_map[IN] = clk_map_imx8qxp[map_idx]; asrc_priv->clk_map[OUT] = clk_map_imx8qxp[map_idx]; } + } else if (of_device_is_compatible(np, "fsl,imx952-asrc")) { + asrc_priv->clk_map[IN] = clk_map_imx952; + asrc_priv->clk_map[OUT] = clk_map_imx952; } asrc->channel_avail = 10; @@ -1553,11 +1584,18 @@ static const struct fsl_asrc_soc_data fsl_asrc_imx8qxp_data = { .channel_bits = 4, }; +static const struct fsl_asrc_soc_data fsl_asrc_imx952_data = { + .use_edma = true, + .channel_bits = 4, + .start_before_dma = true, +}; + static const struct of_device_id fsl_asrc_ids[] = { { .compatible = "fsl,imx35-asrc", .data = &fsl_asrc_imx35_data }, { .compatible = "fsl,imx53-asrc", .data = &fsl_asrc_imx53_data }, { .compatible = "fsl,imx8qm-asrc", .data = &fsl_asrc_imx8qm_data }, { .compatible = "fsl,imx8qxp-asrc", .data = &fsl_asrc_imx8qxp_data }, + { .compatible = "fsl,imx952-asrc", .data = &fsl_asrc_imx952_data }, {} }; MODULE_DEVICE_TABLE(of, fsl_asrc_ids); diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h index 1c492eb237f5f4..7a81366a0ee496 100644 --- a/sound/soc/fsl/fsl_asrc.h +++ b/sound/soc/fsl/fsl_asrc.h @@ -257,6 +257,8 @@ #define ASRFSTi_OUTPUT_FIFO_WIDTH 7 #define ASRFSTi_OUTPUT_FIFO_SHIFT 12 #define ASRFSTi_OUTPUT_FIFO_MASK (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT) +#define ASRFSTi_OUTPUT_FIFO_FILL(v) \ + (((v) & ASRFSTi_OUTPUT_FIFO_MASK) >> ASRFSTi_OUTPUT_FIFO_SHIFT) #define ASRFSTi_IAEi_SHIFT 11 #define ASRFSTi_IAEi_MASK (1 << ASRFSTi_IAEi_SHIFT) #define ASRFSTi_IAEi (1 << ASRFSTi_IAEi_SHIFT) @@ -324,6 +326,13 @@ enum asrc_inclk { INCLK_SAI6_TX_BCLK = 0x22, INCLK_HDMI_RX_SAI0_RX_BCLK = 0x24, INCLK_HDMI_TX_SAI0_TX_BCLK = 0x25, + + INCLK_SAI2_TX_BCLK = 0x26, + INCLK_SAI3_TX_BCLK = 0x27, + INCLK_SAI4_RX_BCLK = 0x28, + INCLK_SAI4_TX_BCLK = 0x29, + INCLK_SAI5_RX_BCLK = 0x2a, + INCLK_SAI5_TX_BCLK = 0x2b, }; enum asrc_outclk { @@ -364,6 +373,13 @@ enum asrc_outclk { OUTCLK_SAI6_TX_BCLK = 0x22, OUTCLK_HDMI_RX_SAI0_RX_BCLK = 0x24, OUTCLK_HDMI_TX_SAI0_TX_BCLK = 0x25, + + OUTCLK_SAI2_TX_BCLK = 0x26, + OUTCLK_SAI3_TX_BCLK = 0x27, + OUTCLK_SAI4_RX_BCLK = 0x28, + OUTCLK_SAI4_TX_BCLK = 0x29, + OUTCLK_SAI5_RX_BCLK = 0x2a, + OUTCLK_SAI5_TX_BCLK = 0x2b, }; #define ASRC_CLK_MAX_NUM 16 @@ -432,10 +448,12 @@ struct dma_block { * * @use_edma: using edma as dma device or not * @channel_bits: width of ASRCNCR register for each pair + * @start_before_dma: start asrc before dma */ struct fsl_asrc_soc_data { bool use_edma; unsigned int channel_bits; + bool start_before_dma; }; /** diff --git a/sound/soc/fsl/fsl_asrc_common.h b/sound/soc/fsl/fsl_asrc_common.h index 0cd595b0f629db..c8a1a2b5915d37 100644 --- a/sound/soc/fsl/fsl_asrc_common.h +++ b/sound/soc/fsl/fsl_asrc_common.h @@ -107,6 +107,7 @@ struct fsl_asrc_pair { * @asrc_rate: default sample rate for ASoC Back-Ends * @asrc_format: default sample format for ASoC Back-Ends * @use_edma: edma is used + * @start_before_dma: start asrc before dma * @get_dma_channel: function pointer * @request_pair: function pointer * @release_pair: function pointer @@ -116,6 +117,7 @@ struct fsl_asrc_pair { * @m2m_start: function pointer * @m2m_unprepare: function pointer * @m2m_stop: function pointer + * @m2m_output_ready: function pointer, check output fifo ready or not * @m2m_calc_out_len: function pointer * @m2m_get_maxburst: function pointer * @m2m_pair_suspend: function pointer @@ -143,6 +145,7 @@ struct fsl_asrc { int asrc_rate; snd_pcm_format_t asrc_format; bool use_edma; + bool start_before_dma; struct dma_chan *(*get_dma_channel)(struct fsl_asrc_pair *pair, bool dir); int (*request_pair)(int channels, struct fsl_asrc_pair *pair); @@ -154,6 +157,7 @@ struct fsl_asrc { int (*m2m_start)(struct fsl_asrc_pair *pair); int (*m2m_unprepare)(struct fsl_asrc_pair *pair); int (*m2m_stop)(struct fsl_asrc_pair *pair); + bool (*m2m_output_ready)(struct fsl_asrc_pair *pair); int (*m2m_calc_out_len)(struct fsl_asrc_pair *pair, int input_buffer_length); int (*m2m_get_maxburst)(u8 dir, struct fsl_asrc_pair *pair); diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index 7dacc06b2f02e2..348b0aabfa6830 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -449,18 +449,52 @@ fsl_asrc_dma_pcm_pointer(struct snd_soc_component *component, static int fsl_asrc_dma_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { - struct snd_card *card = rtd->card->snd_card; + struct device *dev = component->dev; + struct fsl_asrc *asrc = dev_get_drvdata(dev); + struct fsl_asrc_pair *pair; struct snd_pcm *pcm = rtd->pcm; + struct dma_chan *chan; int ret; - ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); - if (ret) { - dev_err(card->dev, "failed to set DMA mask\n"); - return ret; + pair = kzalloc(size_add(sizeof(*pair), asrc->pair_priv_size), GFP_KERNEL); + if (!pair) + return -ENOMEM; + + pair->asrc = asrc; + pair->private = (void *)pair + sizeof(struct fsl_asrc_pair); + + /* Request a pair, which will be released later. + * Request pair function needs channel num as input, for this + * pair, we just request "1" channel temporarily. + */ + ret = asrc->request_pair(1, pair); + if (ret < 0) { + dev_err(dev, "failed to request asrc pair\n"); + goto req_pair_err; + } + + /* Request a dma channel, which will be released later. */ + chan = asrc->get_dma_channel(pair, IN); + if (!chan) { + dev_err(dev, "failed to get dma channel\n"); + ret = -EINVAL; + goto dma_chan_err; } - return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, - card->dev, FSL_ASRC_DMABUF_SIZE); + ret = snd_pcm_set_fixed_buffer_all(pcm, + SNDRV_DMA_TYPE_DEV, + chan->device->dev, + FSL_ASRC_DMABUF_SIZE); + + dma_release_channel(chan); + +dma_chan_err: + asrc->release_pair(pair); + +req_pair_err: + kfree(pair); + + return ret; } struct snd_soc_component_driver fsl_asrc_component = { diff --git a/sound/soc/fsl/fsl_asrc_m2m.c b/sound/soc/fsl/fsl_asrc_m2m.c index f46881f71e4307..77999526dd9eaa 100644 --- a/sound/soc/fsl/fsl_asrc_m2m.c +++ b/sound/soc/fsl/fsl_asrc_m2m.c @@ -253,15 +253,21 @@ static int asrc_m2m_device_run(struct fsl_asrc_pair *pair, struct snd_compr_task reinit_completion(&pair->complete[IN]); reinit_completion(&pair->complete[OUT]); + if (asrc->start_before_dma) + asrc->m2m_start(pair); + /* Submit DMA request */ dmaengine_submit(pair->desc[IN]); dma_async_issue_pending(pair->desc[IN]->chan); if (out_dma_len > 0) { + if (asrc->start_before_dma && asrc->m2m_output_ready) + asrc->m2m_output_ready(pair); dmaengine_submit(pair->desc[OUT]); dma_async_issue_pending(pair->desc[OUT]->chan); } - asrc->m2m_start(pair); + if (!asrc->start_before_dma) + asrc->m2m_start(pair); if (!wait_for_completion_interruptible_timeout(&pair->complete[IN], 10 * HZ)) { dev_err(dev, "out DMA task timeout\n"); diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c index 53f04d1f32806d..76a8e68c1b620c 100644 --- a/sound/soc/fsl/imx-rpmsg.c +++ b/sound/soc/fsl/imx-rpmsg.c @@ -145,7 +145,7 @@ static int imx_rpmsg_probe(struct platform_device *pdev) data->dai.ignore_pmdown_time = 1; data->dai.cpus->dai_name = pdev->dev.platform_data; - cpu_dai = snd_soc_find_dai(data->dai.cpus); + cpu_dai = snd_soc_find_dai_with_mutex(data->dai.cpus); if (!cpu_dai) { ret = -EPROBE_DEFER; goto fail; diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c index fce50fd9f09389..e9ee752d5ec320 100644 --- a/sound/soc/intel/boards/sof_es8336.c +++ b/sound/soc/intel/boards/sof_es8336.c @@ -334,6 +334,15 @@ static int sof_es8336_quirk_cb(const struct dmi_system_id *id) * if the topology file is modified as well. */ static const struct dmi_system_id sof_es8336_quirk_table[] = { + { + .callback = sof_es8336_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"), + DMI_MATCH(DMI_PRODUCT_NAME, "BOD-WXX9"), + }, + .driver_data = (void *)(SOF_ES8336_HEADPHONE_GPIO | + SOF_ES8336_ENABLE_DMIC) + }, { .callback = sof_es8336_quirk_cb, .matches = { diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig index bd210fafe9fea1..391ce2225fde04 100644 --- a/sound/soc/rockchip/Kconfig +++ b/sound/soc/rockchip/Kconfig @@ -41,6 +41,7 @@ config SND_SOC_ROCKCHIP_SAI config SND_SOC_ROCKCHIP_SPDIF tristate "Rockchip SPDIF Device Driver" + select SND_PCM_IEC958 select SND_SOC_GENERIC_DMAENGINE_PCM help Say Y or M if you want to add support for SPDIF driver for diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index d365168934dc6e..581624f2682ef3 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c @@ -5,10 +5,11 @@ * * Copyright (c) 2014 Rockchip Electronics Co. Ltd. * Author: Jianqun - * Copyright (c) 2015 Collabora Ltd. + * Copyright (c) 2015-2026 Collabora Ltd. * Author: Sjoerd Simons */ +#include #include #include #include @@ -16,6 +17,7 @@ #include #include #include +#include #include #include "rockchip_spdif.h" @@ -27,7 +29,25 @@ enum rk_spdif_type { RK_SPDIF_RK3366, }; -#define RK3288_GRF_SOC_CON2 0x24c +/* + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * CS0: | Mode | d | c | b | a | + * CS1: | Category Code | + * CS2: | Channel Number | Source Number | + * CS3: | Clock Accuracy | Sample Freq | + * CS4: | Ori Sample Freq | Word Length | + * CS5: | | CGMS-A | + * CS6~CS23: Reserved + * + * a: use of channel status block + * b: linear PCM identification: 0 for lpcm, 1 for nlpcm + * c: copyright information + * d: additional format information + */ +#define CS_BYTE 6 +#define CS_FRAME(c) ((c) << 16 | (c)) + +#define RK3288_GRF_SOC_CON2 0x24c struct rk_spdif_dev { struct device *dev; @@ -40,29 +60,6 @@ struct rk_spdif_dev { struct regmap *regmap; }; -static const struct of_device_id rk_spdif_match[] __maybe_unused = { - { .compatible = "rockchip,rk3066-spdif", - .data = (void *)RK_SPDIF_RK3066 }, - { .compatible = "rockchip,rk3188-spdif", - .data = (void *)RK_SPDIF_RK3188 }, - { .compatible = "rockchip,rk3228-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - { .compatible = "rockchip,rk3288-spdif", - .data = (void *)RK_SPDIF_RK3288 }, - { .compatible = "rockchip,rk3328-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - { .compatible = "rockchip,rk3366-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - { .compatible = "rockchip,rk3368-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - { .compatible = "rockchip,rk3399-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - { .compatible = "rockchip,rk3568-spdif", - .data = (void *)RK_SPDIF_RK3366 }, - {}, -}; -MODULE_DEVICE_TABLE(of, rk_spdif_match); - static int rk_spdif_runtime_suspend(struct device *dev) { struct rk_spdif_dev *spdif = dev_get_drvdata(dev); @@ -109,39 +106,63 @@ static int rk_spdif_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct rk_spdif_dev *spdif = snd_soc_dai_get_drvdata(dai); + unsigned int mclk_rate = clk_get_rate(spdif->mclk); unsigned int val = SPDIF_CFGR_HALFWORD_ENABLE; - int srate, mclk; - int ret; + int bmc, div, ret, i; + u16 *fc; + u8 cs[CS_BYTE]; + + ret = snd_pcm_create_iec958_consumer_hw_params(params, cs, sizeof(cs)); + if (ret < 0) + return ret; - srate = params_rate(params); - mclk = srate * 128; + fc = (u16 *)cs; + for (i = 0; i < CS_BYTE / 2; i++) + regmap_write(spdif->regmap, SPDIF_CHNSRn(i), CS_FRAME(fc[i])); + + regmap_update_bits(spdif->regmap, SPDIF_CFGR, SPDIF_CFGR_CSE_MASK, + SPDIF_CFGR_CSE_EN); + + /* bmc = 128fs */ + bmc = 128 * params_rate(params); + div = DIV_ROUND_CLOSEST(mclk_rate, bmc); + val |= SPDIF_CFGR_CLK_DIV(div); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: val |= SPDIF_CFGR_VDW_16; + val |= SPDIF_CFGR_ADJ_RIGHT_J; break; case SNDRV_PCM_FORMAT_S20_3LE: val |= SPDIF_CFGR_VDW_20; + val |= SPDIF_CFGR_ADJ_RIGHT_J; break; case SNDRV_PCM_FORMAT_S24_LE: val |= SPDIF_CFGR_VDW_24; + val |= SPDIF_CFGR_ADJ_RIGHT_J; + break; + case SNDRV_PCM_FORMAT_S32_LE: + val |= SPDIF_CFGR_VDW_24; + val |= SPDIF_CFGR_ADJ_LEFT_J; break; default: return -EINVAL; } - /* Set clock and calculate divider */ - ret = clk_set_rate(spdif->mclk, mclk); - if (ret != 0) { - dev_err(spdif->dev, "Failed to set module clock rate: %d\n", - ret); - return ret; - } + /* + * clear MCLK domain logic before setting Fmclk and Fsdo to ensure + * that switching between S16_LE and S32_LE audio does not result + * in accidential channels swap. + */ + regmap_update_bits(spdif->regmap, SPDIF_CFGR, SPDIF_CFGR_CLR_MASK, + SPDIF_CFGR_CLR_EN); + udelay(1); ret = regmap_update_bits(spdif->regmap, SPDIF_CFGR, SPDIF_CFGR_CLK_DIV_MASK | - SPDIF_CFGR_HALFWORD_ENABLE | - SDPIF_CFGR_VDW_MASK, val); + SPDIF_CFGR_HALFWORD_MASK | + SDPIF_CFGR_VDW_MASK | + SPDIF_CFGR_ADJ_MASK, val); return ret; } @@ -157,7 +178,7 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream, case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, - SPDIF_DMACR_TDE_ENABLE | + SPDIF_DMACR_TDE_MASK | SPDIF_DMACR_TDL_MASK, SPDIF_DMACR_TDE_ENABLE | SPDIF_DMACR_TDL(16)); @@ -166,21 +187,21 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream, return ret; ret = regmap_update_bits(spdif->regmap, SPDIF_XFER, - SPDIF_XFER_TXS_START, + SPDIF_XFER_TXS_MASK, SPDIF_XFER_TXS_START); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, - SPDIF_DMACR_TDE_ENABLE, + SPDIF_DMACR_TDE_MASK, SPDIF_DMACR_TDE_DISABLE); if (ret != 0) return ret; ret = regmap_update_bits(spdif->regmap, SPDIF_XFER, - SPDIF_XFER_TXS_START, + SPDIF_XFER_TXS_MASK, SPDIF_XFER_TXS_STOP); break; default: @@ -200,7 +221,24 @@ static int rk_spdif_dai_probe(struct snd_soc_dai *dai) return 0; } +static int rk_spdif_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct rk_spdif_dev *spdif = snd_soc_dai_get_drvdata(dai); + int ret; + + if (!freq) + return 0; + + ret = clk_set_rate(spdif->mclk, freq); + if (ret) + dev_err(spdif->dev, "Failed to set mclk: %d\n", ret); + + return ret; +} + static const struct snd_soc_dai_ops rk_spdif_dai_ops = { + .set_sysclk = rk_spdif_set_sysclk, .probe = rk_spdif_dai_probe, .hw_params = rk_spdif_hw_params, .trigger = rk_spdif_trigger, @@ -211,14 +249,11 @@ static struct snd_soc_dai_driver rk_spdif_dai = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, - .rates = (SNDRV_PCM_RATE_32000 | - SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_192000), + .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | - SNDRV_PCM_FMTBIT_S24_LE), + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE), }, .ops = &rk_spdif_dai_ops, }; @@ -236,6 +271,9 @@ static bool rk_spdif_wr_reg(struct device *dev, unsigned int reg) case SPDIF_INTCR: case SPDIF_XFER: case SPDIF_SMPDR: + case SPDIF_VLDFRn(0) ... SPDIF_VLDFRn(11): + case SPDIF_USRDRn(0) ... SPDIF_USRDRn(11): + case SPDIF_CHNSRn(0) ... SPDIF_CHNSRn(11): return true; default: return false; @@ -251,6 +289,9 @@ static bool rk_spdif_rd_reg(struct device *dev, unsigned int reg) case SPDIF_INTSR: case SPDIF_XFER: case SPDIF_SMPDR: + case SPDIF_VLDFRn(0) ... SPDIF_VLDFRn(11): + case SPDIF_USRDRn(0) ... SPDIF_USRDRn(11): + case SPDIF_CHNSRn(0) ... SPDIF_CHNSRn(11): return true; default: return false; @@ -273,32 +314,38 @@ static const struct regmap_config rk_spdif_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = SPDIF_SMPDR, + .max_register = SPDIF_VERSION, .writeable_reg = rk_spdif_wr_reg, .readable_reg = rk_spdif_rd_reg, .volatile_reg = rk_spdif_volatile_reg, .cache_type = REGCACHE_FLAT, }; +static void rk_spdif_suspend(void *data) +{ + struct device *dev = data; + + if (!pm_runtime_status_suspended(dev)) + rk_spdif_runtime_suspend(dev); +} + static int rk_spdif_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + enum rk_spdif_type spdif_type; struct rk_spdif_dev *spdif; - const struct of_device_id *match; struct resource *res; void __iomem *regs; int ret; - match = of_match_node(rk_spdif_match, np); - if (match->data == (void *)RK_SPDIF_RK3288) { + spdif_type = (uintptr_t) device_get_match_data(&pdev->dev); + if (spdif_type == RK_SPDIF_RK3288) { struct regmap *grf; grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); - if (IS_ERR(grf)) { - dev_err(&pdev->dev, + if (IS_ERR(grf)) + return dev_err_probe(&pdev->dev, PTR_ERR(grf), "rockchip_spdif missing 'rockchip,grf'\n"); - return PTR_ERR(grf); - } /* Select the 8 channel SPDIF solution on RK3288 as * the 2 channel one does not appear to work @@ -334,55 +381,63 @@ static int rk_spdif_probe(struct platform_device *pdev) spdif->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, spdif); - pm_runtime_enable(&pdev->dev); + ret = devm_add_action_or_reset(&pdev->dev, rk_spdif_suspend, &pdev->dev); + if (ret) + return ret; + + devm_pm_runtime_enable(&pdev->dev); + if (!pm_runtime_enabled(&pdev->dev)) { ret = rk_spdif_runtime_resume(&pdev->dev); if (ret) - goto err_pm_runtime; + return ret; } + ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Could not register PCM\n"); + ret = devm_snd_soc_register_component(&pdev->dev, &rk_spdif_component, &rk_spdif_dai, 1); - if (ret) { - dev_err(&pdev->dev, "Could not register DAI\n"); - goto err_pm_suspend; - } - - ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); - if (ret) { - dev_err(&pdev->dev, "Could not register PCM\n"); - goto err_pm_suspend; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "Could not register DAI\n"); return 0; - -err_pm_suspend: - if (!pm_runtime_status_suspended(&pdev->dev)) - rk_spdif_runtime_suspend(&pdev->dev); -err_pm_runtime: - pm_runtime_disable(&pdev->dev); - - return ret; -} - -static void rk_spdif_remove(struct platform_device *pdev) -{ - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - rk_spdif_runtime_suspend(&pdev->dev); } static const struct dev_pm_ops rk_spdif_pm_ops = { RUNTIME_PM_OPS(rk_spdif_runtime_suspend, rk_spdif_runtime_resume, NULL) }; +static const struct of_device_id rk_spdif_match[] = { + { .compatible = "rockchip,rk3066-spdif", + .data = (void *)RK_SPDIF_RK3066 }, + { .compatible = "rockchip,rk3188-spdif", + .data = (void *)RK_SPDIF_RK3188 }, + { .compatible = "rockchip,rk3228-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + { .compatible = "rockchip,rk3288-spdif", + .data = (void *)RK_SPDIF_RK3288 }, + { .compatible = "rockchip,rk3328-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + { .compatible = "rockchip,rk3366-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + { .compatible = "rockchip,rk3368-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + { .compatible = "rockchip,rk3399-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + { .compatible = "rockchip,rk3568-spdif", + .data = (void *)RK_SPDIF_RK3366 }, + {}, +}; +MODULE_DEVICE_TABLE(of, rk_spdif_match); + static struct platform_driver rk_spdif_driver = { .probe = rk_spdif_probe, - .remove = rk_spdif_remove, .driver = { .name = "rockchip-spdif", - .of_match_table = of_match_ptr(rk_spdif_match), + .of_match_table = rk_spdif_match, .pm = pm_ptr(&rk_spdif_pm_ops), }, }; diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h index d8be9aae5b195c..ec33295e251280 100644 --- a/sound/soc/rockchip/rockchip_spdif.h +++ b/sound/soc/rockchip/rockchip_spdif.h @@ -2,7 +2,7 @@ /* * ALSA SoC Audio Layer - Rockchip SPDIF transceiver driver * - * Copyright (c) 2015 Collabora Ltd. + * Copyright (c) 2015-2026 Collabora Ltd. * Author: Sjoerd Simons */ @@ -13,41 +13,50 @@ * CFGR * transfer configuration register */ -#define SPDIF_CFGR_CLK_DIV_SHIFT (16) -#define SPDIF_CFGR_CLK_DIV_MASK (0xff << SPDIF_CFGR_CLK_DIV_SHIFT) -#define SPDIF_CFGR_CLK_DIV(x) (x << SPDIF_CFGR_CLK_DIV_SHIFT) +#define SPDIF_CFGR_CLK_DIV_MASK GENMASK(23, 16) +#define SPDIF_CFGR_CLK_DIV(x) FIELD_PREP(SPDIF_CFGR_CLK_DIV_MASK, x-1) -#define SPDIF_CFGR_HALFWORD_SHIFT 2 -#define SPDIF_CFGR_HALFWORD_DISABLE (0 << SPDIF_CFGR_HALFWORD_SHIFT) -#define SPDIF_CFGR_HALFWORD_ENABLE (1 << SPDIF_CFGR_HALFWORD_SHIFT) +#define SPDIF_CFGR_CLR_MASK BIT(7) +#define SPDIF_CFGR_CLR_EN FIELD_PREP(SPDIF_CFGR_CLR_MASK, 1) +#define SPDIF_CFGR_CLR_DIS FIELD_PREP(SPDIF_CFGR_CLR_MASK, 0) -#define SPDIF_CFGR_VDW_SHIFT 0 -#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT) -#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT) +#define SPDIF_CFGR_CSE_MASK BIT(6) +#define SPDIF_CFGR_CSE_EN FIELD_PREP(SPDIF_CFGR_CSE_MASK, 1) +#define SPDIF_CFGR_CSE_DIS FIELD_PREP(SPDIF_CFGR_CSE_MASK, 0) -#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0) -#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1) -#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2) +#define SPDIF_CFGR_ADJ_MASK BIT(3) +#define SPDIF_CFGR_ADJ_LEFT_J FIELD_PREP(SPDIF_CFGR_ADJ_MASK, 1) +#define SPDIF_CFGR_ADJ_RIGHT_J FIELD_PREP(SPDIF_CFGR_ADJ_MASK, 0) + +#define SPDIF_CFGR_HALFWORD_MASK BIT(2) +#define SPDIF_CFGR_HALFWORD_DISABLE FIELD_PREP(SPDIF_CFGR_HALFWORD_MASK, 0) +#define SPDIF_CFGR_HALFWORD_ENABLE FIELD_PREP(SPDIF_CFGR_HALFWORD_MASK, 1) + +#define SDPIF_CFGR_VDW_MASK GENMASK(1, 0) +#define SPDIF_CFGR_VDW(x) FIELD_PREP(SDPIF_CFGR_VDW_MASK, x) + +#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0) +#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1) +#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2) /* * DMACR * DMA control register */ -#define SPDIF_DMACR_TDE_SHIFT 5 -#define SPDIF_DMACR_TDE_DISABLE (0 << SPDIF_DMACR_TDE_SHIFT) -#define SPDIF_DMACR_TDE_ENABLE (1 << SPDIF_DMACR_TDE_SHIFT) +#define SPDIF_DMACR_TDE_MASK BIT(5) +#define SPDIF_DMACR_TDE_DISABLE FIELD_PREP(SPDIF_DMACR_TDE_MASK, 0) +#define SPDIF_DMACR_TDE_ENABLE FIELD_PREP(SPDIF_DMACR_TDE_MASK, 1) -#define SPDIF_DMACR_TDL_SHIFT 0 -#define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) -#define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT) +#define SPDIF_DMACR_TDL_MASK GENMASK(4, 0) +#define SPDIF_DMACR_TDL(x) FIELD_PREP(SPDIF_DMACR_TDL_MASK, x) /* * XFER * Transfer control register */ -#define SPDIF_XFER_TXS_SHIFT 0 -#define SPDIF_XFER_TXS_STOP (0 << SPDIF_XFER_TXS_SHIFT) -#define SPDIF_XFER_TXS_START (1 << SPDIF_XFER_TXS_SHIFT) +#define SPDIF_XFER_TXS_MASK BIT(0) +#define SPDIF_XFER_TXS_STOP FIELD_PREP(SPDIF_XFER_TXS_MASK, 0) +#define SPDIF_XFER_TXS_START FIELD_PREP(SPDIF_XFER_TXS_MASK, 1) #define SPDIF_CFGR (0x0000) #define SPDIF_SDBLR (0x0004) @@ -56,5 +65,9 @@ #define SPDIF_INTSR (0x0010) #define SPDIF_XFER (0x0018) #define SPDIF_SMPDR (0x0020) +#define SPDIF_VLDFRn(x) (0x0060 + (x) * 4) +#define SPDIF_USRDRn(x) (0x0090 + (x) * 4) +#define SPDIF_CHNSRn(x) (0x00c0 + (x) * 4) +#define SPDIF_VERSION (0x01c0) #endif /* _ROCKCHIP_SPDIF_H */ diff --git a/sound/soc/sdca/sdca_asoc.c b/sound/soc/sdca/sdca_asoc.c index 498aba9df5d9b5..bb6e74e80a3e81 100644 --- a/sound/soc/sdca/sdca_asoc.c +++ b/sound/soc/sdca/sdca_asoc.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,41 @@ int sdca_asoc_count_component(struct device *dev, struct sdca_function_data *fun } EXPORT_SYMBOL_NS(sdca_asoc_count_component, "SND_SOC_SDCA"); +static int ge_put_enum_double(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_to_dapm(kcontrol); + struct snd_soc_component *component = snd_soc_dapm_to_component(dapm); + struct device *dev = component->dev; + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + unsigned int *item = ucontrol->value.enumerated.item; + unsigned int reg = e->reg; + int ret; + + reg &= ~SDW_SDCA_CTL_CSEL(0x3F); + reg |= SDW_SDCA_CTL_CSEL(SDCA_CTL_GE_DETECTED_MODE); + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) { + dev_err(dev, "failed to resume writing %s: %d\n", + kcontrol->id.name, ret); + return ret; + } + + ret = snd_soc_component_read(component, reg); + pm_runtime_put(dev); + if (ret < 0) + return ret; + else if (ret <= SDCA_DETECTED_MODE_DETECTION_IN_PROGRESS) + return -EBUSY; + + ret = snd_soc_enum_item_to_val(e, item[0]); + if (ret <= SDCA_DETECTED_MODE_DETECTION_IN_PROGRESS) + return -EINVAL; + + return snd_soc_dapm_put_enum_double(kcontrol, ucontrol); +} + static int entity_early_parse_ge(struct device *dev, struct sdca_function_data *function, struct sdca_entity *entity) @@ -191,7 +227,7 @@ static int entity_early_parse_ge(struct device *dev, kctl->name = control_name; kctl->info = snd_soc_info_enum_double; kctl->get = snd_soc_dapm_get_enum_double; - kctl->put = snd_soc_dapm_put_enum_double; + kctl->put = ge_put_enum_double; kctl->private_value = (unsigned long)soc_enum; entity->ge.kctl = kctl; @@ -792,6 +828,48 @@ static int control_limit_kctl(struct device *dev, return 0; } +static int volatile_get_volsw(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); + struct device *dev = component->dev; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) { + dev_err(dev, "failed to resume reading %s: %d\n", + kcontrol->id.name, ret); + return ret; + } + + ret = snd_soc_get_volsw(kcontrol, ucontrol); + + pm_runtime_put(dev); + + return ret; +} + +static int volatile_put_volsw(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); + struct device *dev = component->dev; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) { + dev_err(dev, "failed to resume writing %s: %d\n", + kcontrol->id.name, ret); + return ret; + } + + ret = snd_soc_put_volsw(kcontrol, ucontrol); + + pm_runtime_put(dev); + + return ret; +} + static int populate_control(struct device *dev, struct sdca_function_data *function, struct sdca_entity *entity, @@ -849,8 +927,13 @@ static int populate_control(struct device *dev, (*kctl)->private_value = (unsigned long)mc; (*kctl)->iface = SNDRV_CTL_ELEM_IFACE_MIXER; (*kctl)->info = snd_soc_info_volsw; - (*kctl)->get = snd_soc_get_volsw; - (*kctl)->put = snd_soc_put_volsw; + if (control->is_volatile) { + (*kctl)->get = volatile_get_volsw; + (*kctl)->put = volatile_put_volsw; + } else { + (*kctl)->get = snd_soc_get_volsw; + (*kctl)->put = snd_soc_put_volsw; + } if (readonly_control(control)) (*kctl)->access = SNDRV_CTL_ELEM_ACCESS_READ; diff --git a/sound/soc/sdca/sdca_fdl.c b/sound/soc/sdca/sdca_fdl.c index 8bee9f23c4739f..07892bc3a44e6e 100644 --- a/sound/soc/sdca/sdca_fdl.c +++ b/sound/soc/sdca/sdca_fdl.c @@ -256,8 +256,7 @@ static int fdl_load_file(struct sdca_interrupt *interrupt, tmp->file_length != firmware->size) { dev_err(dev, "bad disk SWF size\n"); } else if (!swf || swf->file_version <= tmp->file_version) { - dev_dbg(dev, "using SWF from disk: %x-%x-%x\n", - tmp->vendor_id, tmp->file_id, tmp->file_version); + dev_dbg(dev, "using SWF from disk\n"); swf = tmp; } } @@ -267,6 +266,9 @@ static int fdl_load_file(struct sdca_interrupt *interrupt, return -ENOENT; } + dev_info(dev, "loading SWF: %x-%x-%x\n", + swf->vendor_id, swf->file_id, swf->file_version); + ret = sdca_ump_write_message(dev, interrupt->device_regmap, interrupt->function_regmap, interrupt->function, interrupt->entity, diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c index 80c71116e6d4c6..95b67bb904c313 100644 --- a/sound/soc/sdca/sdca_functions.c +++ b/sound/soc/sdca/sdca_functions.c @@ -911,10 +911,38 @@ static int find_sdca_control_value(struct device *dev, struct sdca_entity *entit return 0; } -/* - * TODO: Add support for -cn- properties, allowing different channels to have - * different defaults etc. - */ +static int find_sdca_control_reset(const struct sdca_entity *entity, + struct sdca_control *control) +{ + switch (SDCA_CTL_TYPE(entity->type, control->sel)) { + case SDCA_CTL_TYPE_S(FU, AGC): + case SDCA_CTL_TYPE_S(FU, BASS_BOOST): + case SDCA_CTL_TYPE_S(FU, LOUDNESS): + case SDCA_CTL_TYPE_S(SMPU, TRIGGER_ENABLE): + case SDCA_CTL_TYPE_S(GE, SELECTED_MODE): + case SDCA_CTL_TYPE_S(TG, TONE_DIVIDER): + case SDCA_CTL_TYPE_S(ENTITY_0, COMMIT_GROUP_MASK): + control->has_reset = true; + control->reset = 0; + break; + case SDCA_CTL_TYPE_S(XU, BYPASS): + case SDCA_CTL_TYPE_S(MFPU, BYPASS): + case SDCA_CTL_TYPE_S(FU, MUTE): + case SDCA_CTL_TYPE_S(CX, CLOCK_SELECT): + control->has_reset = true; + control->reset = 1; + break; + case SDCA_CTL_TYPE_S(PDE, REQUESTED_PS): + control->has_reset = true; + control->reset = 3; + break; + default: + break; + } + + return 0; +} + static int find_sdca_entity_control(struct device *dev, struct sdca_entity *entity, struct fwnode_handle *control_node, struct sdca_control *control) @@ -990,6 +1018,10 @@ static int find_sdca_entity_control(struct device *dev, struct sdca_entity *enti control->is_volatile = find_sdca_control_volatile(entity, control); + ret = find_sdca_control_reset(entity, control); + if (ret) + return ret; + ret = find_sdca_control_range(dev, control_node, &control->range); if (ret) { dev_err(dev, "%s: control %#x: range missing: %d\n", @@ -2033,6 +2065,7 @@ static int find_sdca_filesets(struct device *dev, struct sdw_slave *sdw, num_sets = fwnode_property_count_u32(function_node, "mipi-sdca-file-set-id-list"); if (num_sets == 0 || num_sets == -EINVAL) { + dev_dbg(dev, "%pfwP: file set id list missing\n", function_node); return 0; } else if (num_sets < 0) { dev_err(dev, "%pfwP: failed to read file set list: %d\n", diff --git a/sound/soc/sdca/sdca_jack.c b/sound/soc/sdca/sdca_jack.c index 5b9cf69cbcd6b6..605514f0204540 100644 --- a/sound/soc/sdca/sdca_jack.c +++ b/sound/soc/sdca/sdca_jack.c @@ -41,10 +41,11 @@ int sdca_jack_process(struct sdca_interrupt *interrupt) struct jack_state *state = interrupt->priv; struct snd_kcontrol *kctl = state->kctl; struct snd_ctl_elem_value *ucontrol __free(kfree) = NULL; - struct soc_enum *soc_enum; unsigned int reg, val; int ret; + guard(rwsem_write)(rwsem); + if (!kctl) { const char *name __free(kfree) = kasprintf(GFP_KERNEL, "%s %s", interrupt->entity->label, @@ -54,16 +55,12 @@ int sdca_jack_process(struct sdca_interrupt *interrupt) return -ENOMEM; kctl = snd_soc_component_get_kcontrol(component, name); - if (!kctl) { + if (!kctl) dev_dbg(dev, "control not found: %s\n", name); - return -ENOENT; - } - - state->kctl = kctl; + else + state->kctl = kctl; } - soc_enum = (struct soc_enum *)kctl->private_value; - reg = SDW_SDCA_CTL(interrupt->function->desc->adr, interrupt->entity->id, interrupt->control->sel, 0); @@ -73,13 +70,12 @@ int sdca_jack_process(struct sdca_interrupt *interrupt) return ret; } + reg = SDW_SDCA_CTL(interrupt->function->desc->adr, interrupt->entity->id, + SDCA_CTL_GE_SELECTED_MODE, 0); + switch (val) { case SDCA_DETECTED_MODE_DETECTION_IN_PROGRESS: case SDCA_DETECTED_MODE_JACK_UNKNOWN: - reg = SDW_SDCA_CTL(interrupt->function->desc->adr, - interrupt->entity->id, - SDCA_CTL_GE_SELECTED_MODE, 0); - /* * Selected mode is not normally marked as volatile register * (RW), but here force a read from the hardware. If the @@ -100,21 +96,29 @@ int sdca_jack_process(struct sdca_interrupt *interrupt) dev_dbg(dev, "%s: %#x\n", interrupt->name, val); - ucontrol = kzalloc(sizeof(*ucontrol), GFP_KERNEL); - if (!ucontrol) - return -ENOMEM; + if (kctl) { + struct soc_enum *soc_enum = (struct soc_enum *)kctl->private_value; + + ucontrol = kzalloc(sizeof(*ucontrol), GFP_KERNEL); + if (!ucontrol) + return -ENOMEM; - ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(soc_enum, val); + ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(soc_enum, val); - down_write(rwsem); - ret = kctl->put(kctl, ucontrol); - up_write(rwsem); - if (ret < 0) { - dev_err(dev, "failed to update selected mode: %d\n", ret); - return ret; - } + ret = snd_soc_dapm_put_enum_double(kctl, ucontrol); + if (ret < 0) { + dev_err(dev, "failed to update selected mode: %d\n", ret); + return ret; + } - snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id); + snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id); + } else { + ret = regmap_write(interrupt->function_regmap, reg, val); + if (ret) { + dev_err(dev, "failed to write selected mode: %d\n", ret); + return ret; + } + } return sdca_jack_report(interrupt); } diff --git a/sound/soc/sdca/sdca_regmap.c b/sound/soc/sdca/sdca_regmap.c index 2cca9a9c71ea90..4f8a685dc43d7f 100644 --- a/sound/soc/sdca/sdca_regmap.c +++ b/sound/soc/sdca/sdca_regmap.c @@ -218,7 +218,8 @@ int sdca_regmap_count_constants(struct device *dev, struct sdca_entity *entity = &function->entities[i]; for (j = 0; j < entity->num_controls; j++) { - if (entity->controls[j].mode == SDCA_ACCESS_MODE_DC) + if (entity->controls[j].mode == SDCA_ACCESS_MODE_DC || + entity->controls[j].has_reset) nconsts += hweight64(entity->controls[j].cn_list); } } @@ -255,7 +256,8 @@ int sdca_regmap_populate_constants(struct device *dev, struct sdca_control *control = &entity->controls[j]; int cn; - if (control->mode != SDCA_ACCESS_MODE_DC) + if (control->mode != SDCA_ACCESS_MODE_DC && + !control->has_reset) continue; l = 0; @@ -264,7 +266,10 @@ int sdca_regmap_populate_constants(struct device *dev, consts[k].reg = SDW_SDCA_CTL(function->desc->adr, entity->id, control->sel, cn); - consts[k].def = control->values[l]; + if (control->mode == SDCA_ACCESS_MODE_DC) + consts[k].def = control->values[l]; + else + consts[k].def = control->reset; k++; l++; } @@ -306,6 +311,9 @@ static int populate_control_defaults(struct device *dev, struct regmap *regmap, i++; } else if (!control->is_volatile) { + if (control->has_reset) + regcache_drop_region(regmap, reg, reg); + ret = regmap_read(regmap, reg, &val); if (ret) { dev_err(dev, "Failed to read initial %#x: %d\n", diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig index 54cd3807f8c639..e31f4c4061d80e 100644 --- a/sound/soc/sof/intel/Kconfig +++ b/sound/soc/sof/intel/Kconfig @@ -319,6 +319,7 @@ config SND_SOC_SOF_NOVALAKE config SND_SOC_SOF_HDA_COMMON tristate + select SND_HDA_EXT_CORE config SND_SOC_SOF_HDA_GENERIC tristate diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c index 0500b690f9a334..596c3d77a34e18 100644 --- a/sound/soc/sof/ipc4-control.c +++ b/sound/soc/sof/ipc4-control.c @@ -362,6 +362,7 @@ sof_ipc4_refresh_bytes_control(struct snd_sof_control *scontrol, bool lock) "%s: no space for data in %s (%zu, %zu)\n", __func__, scontrol->name, msg->data_size, scontrol->max_size - sizeof(*data)); + ret = -EINVAL; goto out; } diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index db6913c0537897..2d260fbc9b8351 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -71,6 +71,7 @@ struct davinci_mcasp_context { struct davinci_mcasp_ruledata { struct davinci_mcasp *mcasp; int serializers; + int stream; }; struct davinci_mcasp { @@ -88,21 +89,27 @@ struct davinci_mcasp { bool missing_audio_param; /* McASP specific data */ - int tdm_slots; + int tdm_slots_tx; + int tdm_slots_rx; u32 tdm_mask[2]; - int slot_width; + int slot_width_tx; + int slot_width_rx; u8 op_mode; u8 dismod; u8 num_serializer; u8 *serial_dir; u8 version; - u8 bclk_div; + u8 bclk_div_tx; + u8 bclk_div_rx; int streams; u32 irq_request[2]; - int sysclk_freq; + unsigned int sysclk_freq_tx; + unsigned int sysclk_freq_rx; bool bclk_master; - u32 auxclk_fs_ratio; + bool async_mode; + u32 auxclk_fs_ratio_tx; + u32 auxclk_fs_ratio_rx; unsigned long pdir; /* Pin direction bitfield */ @@ -180,10 +187,16 @@ static void mcasp_set_ctl_reg(struct davinci_mcasp *mcasp, u32 ctl_reg, u32 val) static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp) { - u32 rxfmctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXFMCTL_REG); u32 aclkxctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_ACLKXCTL_REG); - return !(aclkxctl & TX_ASYNC) && rxfmctl & AFSRE; + return !(aclkxctl & TX_ASYNC); +} + +static bool mcasp_is_frame_producer(struct davinci_mcasp *mcasp) +{ + u32 rxfmctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXFMCTL_REG); + + return rxfmctl & AFSRE; } static inline void mcasp_set_clk_pdir(struct davinci_mcasp *mcasp, bool enable) @@ -198,6 +211,27 @@ static inline void mcasp_set_clk_pdir(struct davinci_mcasp *mcasp, bool enable) } } +static inline void mcasp_set_clk_pdir_stream(struct davinci_mcasp *mcasp, + int stream, bool enable) +{ + u32 bit, bit_end; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + bit = PIN_BIT_ACLKX; + bit_end = PIN_BIT_AFSX + 1; + } else { + bit = PIN_BIT_ACLKR; + bit_end = PIN_BIT_AFSR + 1; + } + + for_each_set_bit_from(bit, &mcasp->pdir, bit_end) { + if (enable) + mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit)); + else + mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit)); + } +} + static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable) { u32 bit; @@ -210,6 +244,36 @@ static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable) } } +static inline int mcasp_get_tdm_slots(struct davinci_mcasp *mcasp, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + mcasp->tdm_slots_tx : mcasp->tdm_slots_rx; +} + +static inline int mcasp_get_slot_width(struct davinci_mcasp *mcasp, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + mcasp->slot_width_tx : mcasp->slot_width_rx; +} + +static inline unsigned int mcasp_get_sysclk_freq(struct davinci_mcasp *mcasp, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + mcasp->sysclk_freq_tx : mcasp->sysclk_freq_rx; +} + +static inline unsigned int mcasp_get_bclk_div(struct davinci_mcasp *mcasp, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + mcasp->bclk_div_tx : mcasp->bclk_div_rx; +} + +static inline unsigned int mcasp_get_auxclk_fs_ratio(struct davinci_mcasp *mcasp, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + mcasp->auxclk_fs_ratio_tx : mcasp->auxclk_fs_ratio_rx; +} + static void mcasp_start_rx(struct davinci_mcasp *mcasp) { if (mcasp->rxnumevt) { /* enable FIFO */ @@ -225,13 +289,17 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp) /* * When ASYNC == 0 the transmit and receive sections operate * synchronously from the transmit clock and frame sync. We need to make - * sure that the TX signlas are enabled when starting reception. + * sure that the TX signals are enabled when starting reception, + * when the McASP is the producer. */ - if (mcasp_is_synchronous(mcasp)) { + if (mcasp_is_frame_producer(mcasp) && mcasp_is_synchronous(mcasp)) { mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST); mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); - mcasp_set_clk_pdir(mcasp, true); } + if (mcasp_is_synchronous(mcasp)) + mcasp_set_clk_pdir(mcasp, true); + else + mcasp_set_clk_pdir_stream(mcasp, SNDRV_PCM_STREAM_CAPTURE, true); /* Activate serializer(s) */ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF); @@ -240,7 +308,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp) mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST); /* Release Frame Sync generator */ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST); - if (mcasp_is_synchronous(mcasp)) + if (mcasp_is_frame_producer(mcasp) && mcasp_is_synchronous(mcasp)) mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST); /* enable receive IRQs */ @@ -262,7 +330,10 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp) /* Start clocks */ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST); mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); - mcasp_set_clk_pdir(mcasp, true); + if (mcasp_is_synchronous(mcasp)) + mcasp_set_clk_pdir(mcasp, true); + else + mcasp_set_clk_pdir_stream(mcasp, SNDRV_PCM_STREAM_PLAYBACK, true); /* Activate serializer(s) */ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF); @@ -305,11 +376,19 @@ static void mcasp_stop_rx(struct davinci_mcasp *mcasp) /* * In synchronous mode stop the TX clocks if no other stream is * running + * Otherwise in async mode only stop RX clocks */ - if (mcasp_is_synchronous(mcasp) && !mcasp->streams) { + if (mcasp_is_synchronous(mcasp) && !mcasp->streams) mcasp_set_clk_pdir(mcasp, false); + else if (!mcasp_is_synchronous(mcasp)) + mcasp_set_clk_pdir_stream(mcasp, SNDRV_PCM_STREAM_CAPTURE, false); + /* + * When McASP is the producer and operating in synchronous mode, + * stop the transmit clocks if no other stream is running. As + * tx & rx operate synchronously from the transmit clock. + */ + if (mcasp_is_frame_producer(mcasp) && mcasp_is_synchronous(mcasp) && !mcasp->streams) mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, 0); - } mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0); mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF); @@ -332,11 +411,14 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp) /* * In synchronous mode keep TX clocks running if the capture stream is * still running. + * Otherwise in async mode only stop TX clocks */ - if (mcasp_is_synchronous(mcasp) && mcasp->streams) + if (mcasp_is_frame_producer(mcasp) && mcasp_is_synchronous(mcasp) && mcasp->streams) val = TXHCLKRST | TXCLKRST | TXFSRST; - else + if (mcasp_is_synchronous(mcasp) && !mcasp->streams) mcasp_set_clk_pdir(mcasp, false); + else if (!mcasp_is_synchronous(mcasp)) + mcasp_set_clk_pdir_stream(mcasp, SNDRV_PCM_STREAM_PLAYBACK, false); mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val); @@ -620,13 +702,39 @@ static int __davinci_mcasp_set_clkdiv(struct davinci_mcasp *mcasp, int div_id, AHCLKRDIV(div - 1), AHCLKRDIV_MASK); break; + case MCASP_CLKDIV_AUXCLK_TXONLY: /* MCLK divider for TX only */ + mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, + AHCLKXDIV(div - 1), AHCLKXDIV_MASK); + break; + + case MCASP_CLKDIV_AUXCLK_RXONLY: /* MCLK divider for RX only */ + mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, + AHCLKRDIV(div - 1), AHCLKRDIV_MASK); + break; + case MCASP_CLKDIV_BCLK: /* BCLK divider */ mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXDIV(div - 1), ACLKXDIV_MASK); mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRDIV(div - 1), ACLKRDIV_MASK); + if (explicit) { + mcasp->bclk_div_tx = div; + mcasp->bclk_div_rx = div; + } + break; + + case MCASP_CLKDIV_BCLK_TXONLY: /* BCLK divider for TX only */ + mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, + ACLKXDIV(div - 1), ACLKXDIV_MASK); if (explicit) - mcasp->bclk_div = div; + mcasp->bclk_div_tx = div; + break; + + case MCASP_CLKDIV_BCLK_RXONLY: /* BCLK divider for RX only */ + mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, + ACLKRDIV(div - 1), ACLKRDIV_MASK); + if (explicit) + mcasp->bclk_div_rx = div; break; case MCASP_CLKDIV_BCLK_FS_RATIO: @@ -640,11 +748,33 @@ static int __davinci_mcasp_set_clkdiv(struct davinci_mcasp *mcasp, int div_id, * tdm_slot width by dividing the ratio by the * number of configured tdm slots. */ - mcasp->slot_width = div / mcasp->tdm_slots; - if (div % mcasp->tdm_slots) + mcasp->slot_width_tx = div / mcasp->tdm_slots_tx; + if (div % mcasp->tdm_slots_tx) + dev_warn(mcasp->dev, + "%s(): BCLK/LRCLK %d is not divisible by %d tx tdm slots", + __func__, div, mcasp->tdm_slots_tx); + + mcasp->slot_width_rx = div / mcasp->tdm_slots_rx; + if (div % mcasp->tdm_slots_rx) dev_warn(mcasp->dev, - "%s(): BCLK/LRCLK %d is not divisible by %d tdm slots", - __func__, div, mcasp->tdm_slots); + "%s(): BCLK/LRCLK %d is not divisible by %d rx tdm slots", + __func__, div, mcasp->tdm_slots_rx); + break; + + case MCASP_CLKDIV_BCLK_FS_RATIO_TXONLY: + mcasp->slot_width_tx = div / mcasp->tdm_slots_tx; + if (div % mcasp->tdm_slots_tx) + dev_warn(mcasp->dev, + "%s(): BCLK/LRCLK %d is not divisible by %d tx tdm slots", + __func__, div, mcasp->tdm_slots_tx); + break; + + case MCASP_CLKDIV_BCLK_FS_RATIO_RXONLY: + mcasp->slot_width_rx = div / mcasp->tdm_slots_rx; + if (div % mcasp->tdm_slots_rx) + dev_warn(mcasp->dev, + "%s(): BCLK/LRCLK %d is not divisible by %d rx tdm slots", + __func__, div, mcasp->tdm_slots_rx); break; default: @@ -678,6 +808,20 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id, mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE); clear_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + mcasp->sysclk_freq_rx = freq; + break; + case MCASP_CLK_HCLK_AHCLK_TXONLY: + mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, + AHCLKXE); + clear_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + break; + case MCASP_CLK_HCLK_AHCLK_RXONLY: + mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, + AHCLKRE); + clear_bit(PIN_BIT_AHCLKR, &mcasp->pdir); + mcasp->sysclk_freq_rx = freq; break; case MCASP_CLK_HCLK_AUXCLK: mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, @@ -685,22 +829,56 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id, mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE); set_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + mcasp->sysclk_freq_rx = freq; + break; + case MCASP_CLK_HCLK_AUXCLK_TXONLY: + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, + AHCLKXE); + set_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + break; + case MCASP_CLK_HCLK_AUXCLK_RXONLY: + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, + AHCLKRE); + set_bit(PIN_BIT_AHCLKR, &mcasp->pdir); + mcasp->sysclk_freq_rx = freq; break; default: dev_err(mcasp->dev, "Invalid clk id: %d\n", clk_id); goto out; } } else { - /* Select AUXCLK as HCLK */ - mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE); - mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE); - set_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + /* McASP is clock master, select AUXCLK as HCLK */ + switch (clk_id) { + case MCASP_CLK_HCLK_AUXCLK_TXONLY: + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, + AHCLKXE); + set_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + break; + case MCASP_CLK_HCLK_AUXCLK_RXONLY: + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, + AHCLKRE); + set_bit(PIN_BIT_AHCLKR, &mcasp->pdir); + mcasp->sysclk_freq_rx = freq; + break; + default: + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, + AHCLKXE); + mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, + AHCLKRE); + set_bit(PIN_BIT_AHCLKX, &mcasp->pdir); + set_bit(PIN_BIT_AHCLKR, &mcasp->pdir); + mcasp->sysclk_freq_tx = freq; + mcasp->sysclk_freq_rx = freq; + break; + } } /* * When AHCLK X/R is selected to be output it means that the HCLK is * the same clock - coming via AUXCLK. */ - mcasp->sysclk_freq = freq; out: pm_runtime_put(mcasp->dev); return 0; @@ -712,9 +890,11 @@ static int davinci_mcasp_ch_constraint(struct davinci_mcasp *mcasp, int stream, { struct snd_pcm_hw_constraint_list *cl = &mcasp->chconstr[stream]; unsigned int *list = (unsigned int *) cl->list; - int slots = mcasp->tdm_slots; + int slots; int i, count = 0; + slots = mcasp_get_tdm_slots(mcasp, stream); + if (mcasp->tdm_mask[stream]) slots = hweight32(mcasp->tdm_mask[stream]); @@ -779,27 +959,42 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai, return -EINVAL; } - mcasp->tdm_slots = slots; + if (mcasp->async_mode) { + if (tx_mask) { + mcasp->tdm_slots_tx = slots; + mcasp->slot_width_tx = slot_width; + } + if (rx_mask) { + mcasp->tdm_slots_rx = slots; + mcasp->slot_width_rx = slot_width; + } + } else { + mcasp->tdm_slots_tx = slots; + mcasp->tdm_slots_rx = slots; + mcasp->slot_width_tx = slot_width; + mcasp->slot_width_rx = slot_width; + } + mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask; mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask; - mcasp->slot_width = slot_width; return davinci_mcasp_set_ch_constraints(mcasp); } static int davinci_config_channel_size(struct davinci_mcasp *mcasp, - int sample_width) + int sample_width, int stream) { u32 fmt; u32 tx_rotate, rx_rotate, slot_width; u32 mask = (1ULL << sample_width) - 1; - if (mcasp->slot_width) - slot_width = mcasp->slot_width; - else if (mcasp->max_format_width) - slot_width = mcasp->max_format_width; - else - slot_width = sample_width; + slot_width = mcasp_get_slot_width(mcasp, stream); + if (!slot_width) { + if (mcasp->max_format_width) + slot_width = mcasp->max_format_width; + else + slot_width = sample_width; + } /* * TX rotation: * right aligned formats: rotate w/ slot_width @@ -822,17 +1017,23 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp, fmt = (slot_width >> 1) - 1; if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) { - mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXSSZ(fmt), - RXSSZ(0x0F)); - mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(fmt), - TXSSZ(0x0F)); - mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate), - TXROT(7)); - mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate), - RXROT(7)); - mcasp_set_reg(mcasp, DAVINCI_MCASP_RXMASK_REG, mask); + if (!mcasp->async_mode || stream == SNDRV_PCM_STREAM_PLAYBACK) { + mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(fmt), + TXSSZ(0x0F)); + mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate), + TXROT(7)); + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask); + } + if (!mcasp->async_mode || stream == SNDRV_PCM_STREAM_CAPTURE) { + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXSSZ(fmt), + RXSSZ(0x0F)); + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate), + RXROT(7)); + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXMASK_REG, mask); + } } else { /* + * DIT mode only use TX serializers * according to the TRM it should be TXROT=0, this one works: * 16 bit to 23-8 (TXROT=6, rotate 24 bits) * 24 bit to 23-0 (TXROT=0, rotate 0 bits) @@ -845,10 +1046,9 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp, TXROT(7)); mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(15), TXSSZ(0x0F)); + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask); } - mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask); - return 0; } @@ -859,11 +1059,13 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, int i; u8 tx_ser = 0; u8 rx_ser = 0; - u8 slots = mcasp->tdm_slots; + int slots; u8 max_active_serializers, max_rx_serializers, max_tx_serializers; int active_serializers, numevt; u32 reg; + slots = mcasp_get_tdm_slots(mcasp, stream); + /* In DIT mode we only allow maximum of one serializers for now */ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) max_active_serializers = 1; @@ -991,7 +1193,7 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream, u32 mask = 0; u32 busel = 0; - total_slots = mcasp->tdm_slots; + total_slots = mcasp_get_tdm_slots(mcasp, stream); /* * If more than one serializer is needed, then use them with @@ -1022,7 +1224,10 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream, mask |= (1 << i); } - mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC); + if (mcasp->async_mode) + mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC); + else + mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC); if (!mcasp->dat_port) busel = TXSEL; @@ -1042,7 +1247,8 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream, * not running already we need to configure the TX slots in * order to have correct FSX on the bus */ - if (mcasp_is_synchronous(mcasp) && !mcasp->channels) + if (mcasp_is_frame_producer(mcasp) && mcasp_is_synchronous(mcasp) && + !mcasp->channels) mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXMOD(total_slots), FSXMOD(0x1FF)); } @@ -1120,16 +1326,33 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp, static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp, unsigned int sysclk_freq, - unsigned int bclk_freq, bool set) + unsigned int bclk_freq, + int stream, + bool set) { - u32 reg = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG); int div = sysclk_freq / bclk_freq; int rem = sysclk_freq % bclk_freq; int error_ppm; int aux_div = 1; + int bclk_div_id, auxclk_div_id; + bool auxclk_enabled; + + if (mcasp->async_mode && stream == SNDRV_PCM_STREAM_CAPTURE) { + auxclk_enabled = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG) & AHCLKRE; + bclk_div_id = MCASP_CLKDIV_BCLK_RXONLY; + auxclk_div_id = MCASP_CLKDIV_AUXCLK_RXONLY; + } else if (mcasp->async_mode && stream == SNDRV_PCM_STREAM_PLAYBACK) { + auxclk_enabled = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG) & AHCLKXE; + bclk_div_id = MCASP_CLKDIV_BCLK_TXONLY; + auxclk_div_id = MCASP_CLKDIV_AUXCLK_TXONLY; + } else { + auxclk_enabled = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG) & AHCLKXE; + bclk_div_id = MCASP_CLKDIV_BCLK; + auxclk_div_id = MCASP_CLKDIV_AUXCLK; + } if (div > (ACLKXDIV_MASK + 1)) { - if (reg & AHCLKXE) { + if (auxclk_enabled) { aux_div = div / (ACLKXDIV_MASK + 1); if (div % (ACLKXDIV_MASK + 1)) aux_div++; @@ -1159,10 +1382,10 @@ static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp, dev_info(mcasp->dev, "Sample-rate is off by %d PPM\n", error_ppm); - __davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_BCLK, div, 0); - if (reg & AHCLKXE) - __davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_AUXCLK, - aux_div, 0); + __davinci_mcasp_set_clkdiv(mcasp, bclk_div_id, div, false); + if (auxclk_enabled) + __davinci_mcasp_set_clkdiv(mcasp, auxclk_div_id, + aux_div, false); } return error_ppm; @@ -1213,6 +1436,7 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, int channels = params_channels(params); int period_size = params_period_size(params); int ret; + unsigned int sysclk_freq = mcasp_get_sysclk_freq(mcasp, substream->stream); switch (params_format(params)) { case SNDRV_PCM_FORMAT_U8: @@ -1253,22 +1477,26 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, * If mcasp is BCLK master, and a BCLK divider was not provided by * the machine driver, we need to calculate the ratio. */ - if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) { - int slots = mcasp->tdm_slots; + if (mcasp->bclk_master && mcasp_get_bclk_div(mcasp, substream->stream) == 0 && + sysclk_freq) { + int slots, slot_width; int rate = params_rate(params); int sbits = params_width(params); unsigned int bclk_target; - if (mcasp->slot_width) - sbits = mcasp->slot_width; + slots = mcasp_get_tdm_slots(mcasp, substream->stream); + + slot_width = mcasp_get_slot_width(mcasp, substream->stream); + if (slot_width) + sbits = slot_width; if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) bclk_target = rate * sbits * slots; else bclk_target = rate * 128; - davinci_mcasp_calc_clk_div(mcasp, mcasp->sysclk_freq, - bclk_target, true); + davinci_mcasp_calc_clk_div(mcasp, sysclk_freq, + bclk_target, substream->stream, true); } ret = mcasp_common_hw_param(mcasp, substream->stream, @@ -1285,9 +1513,10 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, if (ret) return ret; - davinci_config_channel_size(mcasp, word_length); + davinci_config_channel_size(mcasp, word_length, substream->stream); - if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) { + /* Channel constraints are disabled for async mode */ + if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE && !mcasp->async_mode) { mcasp->channels = channels; if (!mcasp->max_format_width) mcasp->max_format_width = word_length; @@ -1331,7 +1560,7 @@ static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params, snd_pcm_format_t i; snd_mask_none(&nfmt); - slot_width = rd->mcasp->slot_width; + slot_width = mcasp_get_slot_width(rd->mcasp, rd->stream); pcm_for_each_format(i) { if (snd_mask_test_format(fmt, i)) { @@ -1381,12 +1610,15 @@ static int davinci_mcasp_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_interval *ri = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); int sbits = params_width(params); - int slots = rd->mcasp->tdm_slots; + int slots, slot_width; struct snd_interval range; int i; - if (rd->mcasp->slot_width) - sbits = rd->mcasp->slot_width; + slots = mcasp_get_tdm_slots(rd->mcasp, rd->stream); + + slot_width = mcasp_get_slot_width(rd->mcasp, rd->stream); + if (slot_width) + sbits = slot_width; snd_interval_any(&range); range.empty = 1; @@ -1396,16 +1628,17 @@ static int davinci_mcasp_hw_rule_rate(struct snd_pcm_hw_params *params, uint bclk_freq = sbits * slots * davinci_mcasp_dai_rates[i]; unsigned int sysclk_freq; + unsigned int ratio; int ppm; - if (rd->mcasp->auxclk_fs_ratio) - sysclk_freq = davinci_mcasp_dai_rates[i] * - rd->mcasp->auxclk_fs_ratio; + ratio = mcasp_get_auxclk_fs_ratio(rd->mcasp, rd->stream); + if (ratio) + sysclk_freq = davinci_mcasp_dai_rates[i] * ratio; else - sysclk_freq = rd->mcasp->sysclk_freq; + sysclk_freq = mcasp_get_sysclk_freq(rd->mcasp, rd->stream); ppm = davinci_mcasp_calc_clk_div(rd->mcasp, sysclk_freq, - bclk_freq, false); + bclk_freq, rd->stream, false); if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) { if (range.empty) { range.min = davinci_mcasp_dai_rates[i]; @@ -1431,30 +1664,34 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_mask nfmt; int rate = params_rate(params); - int slots = rd->mcasp->tdm_slots; + int slots; int count = 0; snd_pcm_format_t i; + slots = mcasp_get_tdm_slots(rd->mcasp, rd->stream); + snd_mask_none(&nfmt); pcm_for_each_format(i) { if (snd_mask_test_format(fmt, i)) { uint sbits = snd_pcm_format_width(i); unsigned int sysclk_freq; - int ppm; + unsigned int ratio; + int ppm, slot_width; - if (rd->mcasp->auxclk_fs_ratio) - sysclk_freq = rate * - rd->mcasp->auxclk_fs_ratio; + ratio = mcasp_get_auxclk_fs_ratio(rd->mcasp, rd->stream); + if (ratio) + sysclk_freq = rate * ratio; else - sysclk_freq = rd->mcasp->sysclk_freq; + sysclk_freq = mcasp_get_sysclk_freq(rd->mcasp, rd->stream); - if (rd->mcasp->slot_width) - sbits = rd->mcasp->slot_width; + slot_width = mcasp_get_slot_width(rd->mcasp, rd->stream); + if (slot_width) + sbits = slot_width; ppm = davinci_mcasp_calc_clk_div(rd->mcasp, sysclk_freq, sbits * slots * rate, - false); + rd->stream, false); if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) { snd_mask_set_format(&nfmt, i); count++; @@ -1491,7 +1728,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, &mcasp->ruledata[substream->stream]; u32 max_channels = 0; int i, dir, ret; - int tdm_slots = mcasp->tdm_slots; + int tdm_slots; u8 *numevt; /* Do not allow more then one stream per direction */ @@ -1500,6 +1737,8 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, mcasp->substreams[substream->stream] = substream; + tdm_slots = mcasp_get_tdm_slots(mcasp, substream->stream); + if (mcasp->tdm_mask[substream->stream]) tdm_slots = hweight32(mcasp->tdm_mask[substream->stream]); @@ -1521,6 +1760,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, } ruledata->serializers = max_channels; ruledata->mcasp = mcasp; + ruledata->stream = substream->stream; max_channels *= tdm_slots; /* * If the already active stream has less channels than the calculated @@ -1528,9 +1768,13 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, * is in use we need to use that as a constraint for the second stream. * Otherwise (first stream or less allowed channels or more than one * serializer in use) we use the calculated constraint. + * + * However, in async mode, TX and RX have independent clocks and can + * use different configurations, so don't apply the constraint. */ if (mcasp->channels && mcasp->channels < max_channels && - ruledata->serializers == 1) + ruledata->serializers == 1 && + !mcasp->async_mode) max_channels = mcasp->channels; /* * But we can always allow channels upto the amount of @@ -1547,10 +1791,10 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &mcasp->chconstr[substream->stream]); - if (mcasp->max_format_width) { + if (mcasp->max_format_width && !mcasp->async_mode) { /* * Only allow formats which require same amount of bits on the - * bus as the currently running stream + * bus as the currently running stream to ensure sync mode */ ret = snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, @@ -1559,8 +1803,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, SNDRV_PCM_HW_PARAM_FORMAT, -1); if (ret) return ret; - } - else if (mcasp->slot_width) { + } else if (mcasp_get_slot_width(mcasp, substream->stream)) { /* Only allow formats require <= slot_width bits on the bus */ ret = snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, @@ -1575,7 +1818,8 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, * If we rely on implicit BCLK divider setting we should * set constraints based on what we can provide. */ - if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) { + if (mcasp->bclk_master && mcasp_get_bclk_div(mcasp, substream->stream) == 0 && + mcasp_get_sysclk_freq(mcasp, substream->stream)) { ret = snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, davinci_mcasp_hw_rule_rate, @@ -1752,8 +1996,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = { .formats = DAVINCI_MCASP_PCM_FMTS, }, .ops = &davinci_mcasp_dai_ops, - - .symmetric_rate = 1, }, { .name = "davinci-mcasp.1", @@ -1911,18 +2153,33 @@ static int davinci_mcasp_get_config(struct davinci_mcasp *mcasp, goto out; } + /* Parse TX-specific TDM slot and use it as default for RX */ if (of_property_read_u32(np, "tdm-slots", &val) == 0) { if (val < 2 || val > 32) { - dev_err(&pdev->dev, "tdm-slots must be in rage [2-32]\n"); + dev_err(&pdev->dev, "tdm-slots must be in range [2-32]\n"); return -EINVAL; } - pdata->tdm_slots = val; + pdata->tdm_slots_tx = val; + pdata->tdm_slots_rx = val; } else if (pdata->op_mode == DAVINCI_MCASP_IIS_MODE) { mcasp->missing_audio_param = true; goto out; } + /* Parse RX-specific TDM slot count if provided */ + if (of_property_read_u32(np, "tdm-slots-rx", &val) == 0) { + if (val < 2 || val > 32) { + dev_err(&pdev->dev, "tdm-slots-rx must be in range [2-32]\n"); + return -EINVAL; + } + + pdata->tdm_slots_rx = val; + } + + if (pdata->op_mode != DAVINCI_MCASP_DIT_MODE) + mcasp->async_mode = of_property_read_bool(np, "ti,async-mode"); + of_serial_dir32 = of_get_property(np, "serial-dir", &val); val /= sizeof(u32); if (of_serial_dir32) { @@ -1948,8 +2205,15 @@ static int davinci_mcasp_get_config(struct davinci_mcasp *mcasp, if (of_property_read_u32(np, "rx-num-evt", &val) == 0) pdata->rxnumevt = val; - if (of_property_read_u32(np, "auxclk-fs-ratio", &val) == 0) - mcasp->auxclk_fs_ratio = val; + /* Parse TX-specific auxclk/fs ratio and use it as default for RX */ + if (of_property_read_u32(np, "auxclk-fs-ratio", &val) == 0) { + mcasp->auxclk_fs_ratio_tx = val; + mcasp->auxclk_fs_ratio_rx = val; + } + + /* Parse RX-specific auxclk/fs ratio if provided */ + if (of_property_read_u32(np, "auxclk-fs-ratio-rx", &val) == 0) + mcasp->auxclk_fs_ratio_rx = val; if (of_property_read_u32(np, "dismod", &val) == 0) { if (val == 0 || val == 2 || val == 3) { @@ -1978,19 +2242,51 @@ static int davinci_mcasp_get_config(struct davinci_mcasp *mcasp, mcasp->op_mode = pdata->op_mode; /* sanity check for tdm slots parameter */ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) { - if (pdata->tdm_slots < 2) { - dev_warn(&pdev->dev, "invalid tdm slots: %d\n", - pdata->tdm_slots); - mcasp->tdm_slots = 2; - } else if (pdata->tdm_slots > 32) { - dev_warn(&pdev->dev, "invalid tdm slots: %d\n", - pdata->tdm_slots); - mcasp->tdm_slots = 32; + if (pdata->tdm_slots_tx < 2) { + dev_warn(&pdev->dev, "invalid tdm tx slots: %d\n", + pdata->tdm_slots_tx); + mcasp->tdm_slots_tx = 2; + } else if (pdata->tdm_slots_tx > 32) { + dev_warn(&pdev->dev, "invalid tdm tx slots: %d\n", + pdata->tdm_slots_tx); + mcasp->tdm_slots_tx = 32; } else { - mcasp->tdm_slots = pdata->tdm_slots; + mcasp->tdm_slots_tx = pdata->tdm_slots_tx; + } + + if (pdata->tdm_slots_rx < 2) { + dev_warn(&pdev->dev, "invalid tdm rx slots: %d\n", + pdata->tdm_slots_rx); + mcasp->tdm_slots_rx = 2; + } else if (pdata->tdm_slots_rx > 32) { + dev_warn(&pdev->dev, "invalid tdm rx slots: %d\n", + pdata->tdm_slots_rx); + mcasp->tdm_slots_rx = 32; + } else { + mcasp->tdm_slots_rx = pdata->tdm_slots_rx; } } else { - mcasp->tdm_slots = 32; + mcasp->tdm_slots_tx = 32; + mcasp->tdm_slots_rx = 32; + } + + /* Different TX/RX slot counts require async mode */ + if (pdata->op_mode != DAVINCI_MCASP_DIT_MODE && + mcasp->tdm_slots_tx != mcasp->tdm_slots_rx && !mcasp->async_mode) { + dev_err(&pdev->dev, + "Different TX (%d) and RX (%d) TDM slots require ti,async-mode\n", + mcasp->tdm_slots_tx, mcasp->tdm_slots_rx); + return -EINVAL; + } + + /* Different TX/RX auxclk-fs-ratio require async mode */ + if (pdata->op_mode != DAVINCI_MCASP_DIT_MODE && + mcasp->auxclk_fs_ratio_tx && mcasp->auxclk_fs_ratio_rx && + mcasp->auxclk_fs_ratio_tx != mcasp->auxclk_fs_ratio_rx && !mcasp->async_mode) { + dev_err(&pdev->dev, + "Different TX (%d) and RX (%d) auxclk-fs-ratio require ti,async-mode\n", + mcasp->auxclk_fs_ratio_tx, mcasp->auxclk_fs_ratio_rx); + return -EINVAL; } mcasp->num_serializer = pdata->num_serializer; diff --git a/sound/soc/ti/davinci-mcasp.h b/sound/soc/ti/davinci-mcasp.h index 5de2b8a31061c9..83b3c67f4a2b66 100644 --- a/sound/soc/ti/davinci-mcasp.h +++ b/sound/soc/ti/davinci-mcasp.h @@ -298,10 +298,20 @@ /* Source of High-frequency transmit/receive clock */ #define MCASP_CLK_HCLK_AHCLK 0 /* AHCLKX/R */ #define MCASP_CLK_HCLK_AUXCLK 1 /* Internal functional clock */ +#define MCASP_CLK_HCLK_AHCLK_TXONLY 2 /* AHCLKX for TX only */ +#define MCASP_CLK_HCLK_AHCLK_RXONLY 3 /* AHCLKR for RX only */ +#define MCASP_CLK_HCLK_AUXCLK_TXONLY 4 /* AUXCLK for TX only */ +#define MCASP_CLK_HCLK_AUXCLK_RXONLY 5 /* AUXCLK for RX only */ /* clock divider IDs */ #define MCASP_CLKDIV_AUXCLK 0 /* HCLK divider from AUXCLK */ #define MCASP_CLKDIV_BCLK 1 /* BCLK divider from HCLK */ #define MCASP_CLKDIV_BCLK_FS_RATIO 2 /* to set BCLK FS ration */ +#define MCASP_CLKDIV_AUXCLK_TXONLY 3 /* AUXCLK divider for TX only */ +#define MCASP_CLKDIV_AUXCLK_RXONLY 4 /* AUXCLK divider for RX only */ +#define MCASP_CLKDIV_BCLK_TXONLY 5 /* BCLK divider for TX only */ +#define MCASP_CLKDIV_BCLK_RXONLY 6 /* BCLK divider for RX only */ +#define MCASP_CLKDIV_BCLK_FS_RATIO_TXONLY 7 /* BCLK/FS ratio for TX only */ +#define MCASP_CLKDIV_BCLK_FS_RATIO_RXONLY 8 /* BCLK/FS ratio for RX only*/ #endif /* DAVINCI_MCASP_H */ diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 07d5cf0077d8aa..ec65e8a7191937 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -311,13 +311,8 @@ static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer, if (pm.err < 0) return pm.err; - if (chip->usb_id == USB_ID(0x041e, 0x3042)) - err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), 0x24, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - !value, 0, NULL, 0); - /* USB X-Fi S51 Pro */ - if (chip->usb_id == USB_ID(0x041e, 0x30df)) + if (chip->usb_id == USB_ID(0x041e, 0x3042) || /* USB X-Fi S51 */ + chip->usb_id == USB_ID(0x041e, 0x30df)) /* USB X-Fi S51 Pro */ err = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), 0x24, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 4f9d19bf1ccacd..86c329632e3968 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2146,6 +2146,8 @@ struct usb_audio_quirk_flags_table { static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { /* Device matches */ + DEVICE_FLG(0x001f, 0x0b21, /* AB13X USB Audio */ + QUIRK_FLAG_FORCE_IFACE_RESET | QUIRK_FLAG_IFACE_DELAY), DEVICE_FLG(0x03f0, 0x654a, /* HP 320 FHD Webcam */ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x041e, 0x3000, /* Creative SB Extigy */ @@ -2235,6 +2237,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { DEVICE_FLG(0x0644, 0x806c, /* Esoteric XD */ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY | QUIRK_FLAG_FORCE_IFACE_RESET), + DEVICE_FLG(0x0661, 0x0883, /* iBasso DC04 Ultra */ + QUIRK_FLAG_DSD_RAW), DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 3f7999317f4dfa..3fd98c5b6e1a88 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -197,7 +197,8 @@ static bool is_rust_noreturn(const struct symbol *func) * as well as changes to the source code itself between versions (since * these come from the Rust standard library). */ - return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || + return str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") || + str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || str_ends_with(func->name, "_4core6option13expect_failed") || str_ends_with(func->name, "_4core6option13unwrap_failed") || str_ends_with(func->name, "_4core6result13unwrap_failed") || @@ -682,7 +683,7 @@ static int create_static_call_sections(struct objtool_file *file) key_sym = find_symbol_by_name(file->elf, tmp); if (!key_sym) { - if (!opts.module || file->klp) { + if (!opts.module) { ERROR("static_call: can't find static_call_key symbol: %s", tmp); return -1; } @@ -4761,7 +4762,7 @@ static int validate_ibt(struct objtool_file *file) !strcmp(sec->name, "__bug_table") || !strcmp(sec->name, "__ex_table") || !strcmp(sec->name, "__jump_table") || - !strcmp(sec->name, "__klp_funcs") || + !strcmp(sec->name, ".init.klp_funcs") || !strcmp(sec->name, "__mcount_loc") || !strcmp(sec->name, ".llvm.call-graph-profile") || !strcmp(sec->name, ".llvm_bb_addr_map") || diff --git a/tools/objtool/disas.c b/tools/objtool/disas.c index 2b5059f55e4008..26f08d41f2b133 100644 --- a/tools/objtool/disas.c +++ b/tools/objtool/disas.c @@ -108,6 +108,8 @@ static int sprint_name(char *str, const char *name, unsigned long offset) #define DINFO_FPRINTF(dinfo, ...) \ ((*(dinfo)->fprintf_func)((dinfo)->stream, __VA_ARGS__)) +#define bfd_vma_fmt \ + __builtin_choose_expr(sizeof(bfd_vma) == sizeof(unsigned long), "%#lx <%s>", "%#llx <%s>") static int disas_result_fprintf(struct disas_context *dctx, const char *fmt, va_list ap) @@ -170,10 +172,10 @@ static void disas_print_addr_sym(struct section *sec, struct symbol *sym, if (sym) { sprint_name(symstr, sym->name, addr - sym->offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr); } else { str = offstr(sec, addr); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str); free(str); } } @@ -252,7 +254,7 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo) * example: "lea 0x0(%rip),%rdi". The kernel can reference * the next IP with _THIS_IP_ macro. */ - DINFO_FPRINTF(dinfo, "0x%lx <_THIS_IP_>", addr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, "_THIS_IP_"); return; } @@ -264,11 +266,11 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo) */ if (reloc->sym->type == STT_SECTION) { str = offstr(reloc->sym->sec, reloc->sym->offset + offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str); free(str); } else { sprint_name(symstr, reloc->sym->name, offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr); } } @@ -311,7 +313,7 @@ static void disas_print_address(bfd_vma addr, struct disassemble_info *dinfo) */ sym = insn_call_dest(insn); if (sym && (sym->offset == addr || (sym->offset == 0 && is_reloc))) { - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, sym->name); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, sym->name); return; } diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 6a8ed9c62323ea..2c02c7b492658c 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -18,15 +18,14 @@ #include #include #include +#include +#include #include +#include #include #include #include -#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1)) -#define ALIGN_UP_POW2(x) (1U << ((8 * sizeof(x)) - __builtin_clz((x) - 1U))) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - static inline u32 str_hash(const char *str) { return jhash(str, strlen(str), 0); @@ -1336,7 +1335,7 @@ unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char return -1; } - offset = ALIGN_UP(strtab->sh.sh_size, strtab->sh.sh_addralign); + offset = ALIGN(strtab->sh.sh_size, strtab->sh.sh_addralign); if (!elf_add_data(elf, strtab, str, strlen(str) + 1)) return -1; @@ -1378,7 +1377,7 @@ void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_ sec->data->d_size = size; sec->data->d_align = 1; - offset = ALIGN_UP(sec->sh.sh_size, sec->sh.sh_addralign); + offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign); sec->sh.sh_size = offset + size; mark_sec_changed(elf, sec, true); @@ -1502,7 +1501,7 @@ static int elf_alloc_reloc(struct elf *elf, struct section *rsec) rsec->data->d_size = nr_relocs_new * elf_rela_size(elf); rsec->sh.sh_size = rsec->data->d_size; - nr_alloc = MAX(64, ALIGN_UP_POW2(nr_relocs_new)); + nr_alloc = max(64UL, roundup_pow_of_two(nr_relocs_new)); if (nr_alloc <= rsec->nr_alloc_relocs) return 0; diff --git a/tools/objtool/include/objtool/klp.h b/tools/objtool/include/objtool/klp.h index ad830a7ce55b6e..e32e5e8bc63129 100644 --- a/tools/objtool/include/objtool/klp.h +++ b/tools/objtool/include/objtool/klp.h @@ -6,12 +6,12 @@ #define SHN_LIVEPATCH 0xff20 /* - * __klp_objects and __klp_funcs are created by klp diff and used by the patch - * module init code to build the klp_patch, klp_object and klp_func structs - * needed by the livepatch API. + * .init.klp_objects and .init.klp_funcs are created by klp diff and used by the + * patch module init code to build the klp_patch, klp_object and klp_func + * structs needed by the livepatch API. */ -#define KLP_OBJECTS_SEC "__klp_objects" -#define KLP_FUNCS_SEC "__klp_funcs" +#define KLP_OBJECTS_SEC ".init.klp_objects" +#define KLP_FUNCS_SEC ".init.klp_funcs" /* * __klp_relocs is an intermediate section which are created by klp diff and diff --git a/tools/objtool/klp-diff.c b/tools/objtool/klp-diff.c index 4d1f9e9977eb94..9f1f4011eb9cda 100644 --- a/tools/objtool/klp-diff.c +++ b/tools/objtool/klp-diff.c @@ -364,11 +364,40 @@ static int correlate_symbols(struct elfs *e) struct symbol *file1_sym, *file2_sym; struct symbol *sym1, *sym2; - /* Correlate locals */ - for (file1_sym = first_file_symbol(e->orig), - file2_sym = first_file_symbol(e->patched); ; - file1_sym = next_file_symbol(e->orig, file1_sym), - file2_sym = next_file_symbol(e->patched, file2_sym)) { + file1_sym = first_file_symbol(e->orig); + file2_sym = first_file_symbol(e->patched); + + /* + * Correlate any locals before the first FILE symbol. This has been + * seen when LTO inexplicably strips the initramfs_data.o FILE symbol + * due to the file only containing data and no code. + */ + for_each_sym(e->orig, sym1) { + if (sym1 == file1_sym || !is_local_sym(sym1)) + break; + + if (dont_correlate(sym1)) + continue; + + for_each_sym(e->patched, sym2) { + if (sym2 == file2_sym || !is_local_sym(sym2)) + break; + + if (sym2->twin || dont_correlate(sym2)) + continue; + + if (strcmp(sym1->demangled_name, sym2->demangled_name)) + continue; + + sym1->twin = sym2; + sym2->twin = sym1; + break; + } + } + + /* Correlate locals after the first FILE symbol */ + for (; ; file1_sym = next_file_symbol(e->orig, file1_sym), + file2_sym = next_file_symbol(e->patched, file2_sym)) { if (!file1_sym && file2_sym) { ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name); @@ -1425,9 +1454,6 @@ static int clone_special_sections(struct elfs *e) { struct section *patched_sec; - if (create_fake_symbols(e->patched)) - return -1; - for_each_sec(e->patched, patched_sec) { if (is_special_section(patched_sec)) { if (clone_special_section(e, patched_sec)) @@ -1439,7 +1465,7 @@ static int clone_special_sections(struct elfs *e) } /* - * Create __klp_objects and __klp_funcs sections which are intermediate + * Create .init.klp_objects and .init.klp_funcs sections which are intermediate * sections provided as input to the patch module's init code for building the * klp_patch, klp_object and klp_func structs for the livepatch API. */ @@ -1704,6 +1730,17 @@ int cmd_klp_diff(int argc, const char **argv) if (!e.out) return -1; + /* + * Special section fake symbols are needed so that individual special + * section entries can be extracted by clone_special_sections(). + * + * Note the fake symbols are also needed by clone_included_functions() + * because __WARN_printf() call sites add references to bug table + * entries in the calling functions. + */ + if (create_fake_symbols(e.patched)) + return -1; + if (clone_included_functions(&e)) return -1; diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index ba5c2b643efaa2..d45bf4ccb3bf07 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -251,6 +251,7 @@ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \ + -U_FORTIFY_SOURCE \ -fno-builtin-memcmp -fno-builtin-memcpy \ -fno-builtin-memset -fno-builtin-strnlen \ -fno-stack-protector -fno-PIE -fno-strict-aliasing \ diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 844a580ae74ef8..890c3f8e51bb8b 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -2327,6 +2327,13 @@ ipv6_ping_novrf() log_test_addr ${a} $? 2 "ping local, device bind" done + for a in ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSA_DEV} ${NSA_IP6} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ::1 ${a} + log_test_addr ${a} $? 0 "ping local, from localhost" + done + # # ip rule blocks address # diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index b2e6e548f796d3..e70d3420954fc1 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -2329,17 +2329,16 @@ signal_address_tests() ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1 speed=slow \ run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 # It is not directly linked to the commit introducing this # symbol but for the parent one which is linked anyway. - if ! mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then - chk_join_nr 3 3 2 - chk_add_nr 4 4 - else - chk_join_nr 3 3 3 + if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then # the server will not signal the address terminating # the MPC subflow chk_add_nr 3 3 + else + chk_add_nr 4 4 fi fi } @@ -3847,21 +3846,28 @@ userspace_pm_chk_get_addr() fi } -# $1: ns ; $2: event type ; $3: count +# $1: ns ; $2: event type ; $3: count ; [ $4: attr ; $5: attr count ] chk_evt_nr() { local ns=${1} local evt_name="${2}" local exp="${3}" + local attr="${4}" + local attr_exp="${5}" local evts="${evts_ns1}" local evt="${!evt_name}" + local attr_name local count + if [ -n "${attr}" ]; then + attr_name=", ${attr}: ${attr_exp}" + fi + evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_ [ "${ns}" == "ns2" ] && evts="${evts_ns2}" - print_check "event ${ns} ${evt_name} (${exp})" + print_check "event ${ns} ${evt_name} (${exp}${attr_name})" if [[ "${evt_name}" = "LISTENER_"* ]] && ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then @@ -3872,11 +3878,42 @@ chk_evt_nr() count=$(grep -cw "type:${evt}" "${evts}") if [ "${count}" != "${exp}" ]; then fail_test "got ${count} events, expected ${exp}" + cat "${evts}" + return + elif [ -z "${attr}" ]; then + print_ok + return + fi + + count=$(grep -w "type:${evt}" "${evts}" | grep -c ",${attr}:") + if [ "${count}" != "${attr_exp}" ]; then + fail_test "got ${count} event attributes, expected ${attr_exp}" + grep -w "type:${evt}" "${evts}" else print_ok fi } +# $1: ns ; $2: event type ; $3: expected count +wait_event() +{ + local ns="${1}" + local evt_name="${2}" + local exp="${3}" + + local evt="${!evt_name}" + local evts="${evts_ns1}" + local count + + [ "${ns}" == "ns2" ] && evts="${evts_ns2}" + + for _ in $(seq 100); do + count=$(grep -cw "type:${evt}" "${evts}") + [ "${count}" -ge "${exp}" ] && break + sleep 0.1 + done +} + userspace_tests() { # userspace pm type prevents add_addr @@ -4085,6 +4122,36 @@ userspace_tests() kill_events_pids mptcp_lib_kill_group_wait $tests_pid fi + + # userspace pm no duplicated spurious close events after an error + if reset_with_events "userspace pm no dup close events after error" && + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns2 + pm_nl_set_limits $ns1 0 2 + { timeout_test=120 test_linkfail=128 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_event ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 + userspace_pm_add_sf $ns2 10.0.3.2 20 + chk_mptcp_info subflows 1 subflows 1 + chk_subflows_total 2 2 + + # force quick loss + ip netns exec $ns2 sysctl -q net.ipv4.tcp_syn_retries=1 + if ip netns exec "${ns1}" ${iptables} -A INPUT -s "10.0.1.2" \ + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset && + ip netns exec "${ns2}" ${iptables} -A INPUT -d "10.0.1.2" \ + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset; then + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 1 + wait_event ns1 MPTCP_LIB_EVENT_SUB_CLOSED 1 + chk_subflows_total 1 1 + userspace_pm_add_sf $ns2 10.0.1.2 0 + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 error 2 + fi + kill_events_pids + mptcp_lib_kill_group_wait $tests_pid + fi } endpoint_tests() diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index a39fdc4aa2ffb3..9b722c1e4b0fe9 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -162,6 +162,39 @@ run_test() { echo " ok" } +run_test_csum() { + local -r msg="$1" + local -r dst="$2" + local csum_error_filter=UdpInCsumErrors + local csum_errors + + printf "%-40s" "$msg" + + is_ipv6 "$dst" && csum_error_filter=Udp6InCsumErrors + + ip netns exec "$NS_DST" iperf3 -s -1 >/dev/null & + wait_local_port_listen "$NS_DST" 5201 tcp + local spid="$!" + ip netns exec "$NS_SRC" iperf3 -c "$dst" -t 2 >/dev/null + local retc="$?" + wait "$spid" + local rets="$?" + if [ "$rets" -ne 0 ] || [ "$retc" -ne 0 ]; then + echo " fail client exit code $retc, server $rets" + ret=1 + return + fi + + csum_errors=$(ip netns exec "$NS_DST" nstat -as "$csum_error_filter" | + grep "$csum_error_filter" | awk '{print $2}') + if [ -n "$csum_errors" ] && [ "$csum_errors" -gt 0 ]; then + echo " fail - csum error on receive $csum_errors, expected 0" + ret=1 + return + fi + echo " ok" +} + run_bench() { local -r msg=$1 local -r dst=$2 @@ -260,6 +293,37 @@ for family in 4 6; do ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST cleanup + + # force segmentation and re-aggregation + create_vxlan_pair + ip netns exec "$NS_DST" ethtool -K veth"$DST" generic-receive-offload on + ip netns exec "$NS_SRC" ethtool -K veth"$SRC" tso off + ip -n "$NS_SRC" link set dev veth"$SRC" mtu 1430 + + # forward to a 2nd veth pair + ip -n "$NS_DST" link add br0 type bridge + ip -n "$NS_DST" link set dev veth"$DST" master br0 + + # segment the aggregated TSO packet, without csum offload + ip -n "$NS_DST" link add veth_segment type veth peer veth_rx + for FEATURE in tso tx-udp-segmentation tx-checksumming; do + ip netns exec "$NS_DST" ethtool -K veth_segment "$FEATURE" off + done + ip -n "$NS_DST" link set dev veth_segment master br0 up + ip -n "$NS_DST" link set dev br0 up + ip -n "$NS_DST" link set dev veth_rx up + + # move the lower layer IP in the last added veth + for ADDR in "$BM_NET_V4$DST/24" "$BM_NET_V6$DST/64"; do + # the dad argument will let iproute emit a unharmful warning + # with ipv4 addresses + ip -n "$NS_DST" addr del dev veth"$DST" "$ADDR" + ip -n "$NS_DST" addr add dev veth_rx "$ADDR" \ + nodad 2>/dev/null + done + + run_test_csum "GSO after GRO" "$OL_NET$DST" + cleanup done exit $ret diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 0e8b5277be3bf8..a369b20d47f0fb 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -157,21 +157,28 @@ irqfd_shutdown(struct work_struct *work) } -/* assumes kvm->irqfds.lock is held */ -static bool -irqfd_is_active(struct kvm_kernel_irqfd *irqfd) +static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd) { + /* + * Assert that either irqfds.lock or SRCU is held, as irqfds.lock must + * be held to prevent false positives (on the irqfd being active), and + * while false negatives are impossible as irqfds are never added back + * to the list once they're deactivated, the caller must at least hold + * SRCU to guard against routing changes if the irqfd is deactivated. + */ + lockdep_assert_once(lockdep_is_held(&irqfd->kvm->irqfds.lock) || + srcu_read_lock_held(&irqfd->kvm->irq_srcu)); + return list_empty(&irqfd->list) ? false : true; } /* * Mark the irqfd as inactive and schedule it for removal - * - * assumes kvm->irqfds.lock is held */ -static void -irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) +static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) { + lockdep_assert_held(&irqfd->kvm->irqfds.lock); + BUG_ON(!irqfd_is_active(irqfd)); list_del_init(&irqfd->list); @@ -217,8 +224,15 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) seq = read_seqcount_begin(&irqfd->irq_entry_sc); irq = irqfd->irq_entry; } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); - /* An event has been signaled, inject an interrupt */ - if (kvm_arch_set_irq_inatomic(&irq, kvm, + + /* + * An event has been signaled, inject an interrupt unless the + * irqfd is being deassigned (isn't active), in which case the + * routing information may be stale (once the irqfd is removed + * from the list, it will stop receiving routing updates). + */ + if (unlikely(!irqfd_is_active(irqfd)) || + kvm_arch_set_irq_inatomic(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false) == -EWOULDBLOCK) schedule_work(&irqfd->inject); @@ -585,18 +599,8 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { - if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { - /* - * This clearing of irq_entry.type is needed for when - * another thread calls kvm_irq_routing_update before - * we flush workqueue below (we synchronize with - * kvm_irq_routing_update using irqfds.lock). - */ - write_seqcount_begin(&irqfd->irq_entry_sc); - irqfd->irq_entry.type = 0; - write_seqcount_end(&irqfd->irq_entry_sc); + if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) irqfd_deactivate(irqfd); - } } spin_unlock_irq(&kvm->irqfds.lock);