Merge tag 'asoc-fix-v6.9-merge-window' of https://git.kernel.org/pub/scm/linux/kernel...
authorTakashi Iwai <tiwai@suse.de>
Thu, 21 Mar 2024 13:07:27 +0000 (14:07 +0100)
committerTakashi Iwai <tiwai@suse.de>
Thu, 21 Mar 2024 13:07:27 +0000 (14:07 +0100)
ASoC: Fixes for v6.9

A bunch of fixes that came in during the merge window, probably the most
substantial thing is the DPCM locking fix for compressed audio which has
been lurking for a while.

765 files changed:
.mailmap
Documentation/arch/x86/mds.rst
Documentation/conf.py
Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
Documentation/driver-api/dpll.rst
Documentation/networking/net_cachelines/inet_sock.rst
Documentation/process/maintainer-netdev.rst
Documentation/sphinx/translations.py
Documentation/virt/hyperv/index.rst
Documentation/virt/hyperv/vpci.rst [new file with mode: 0644]
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/amazon/alpine.dtsi
arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
arch/arm/boot/dts/marvell/kirkwood-l-50.dts
arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
arch/arm/boot/dts/nxp/imx/imx7s.dtsi
arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
arch/arm/boot/dts/renesas/r8a7790-lager.dts
arch/arm/boot/dts/renesas/r8a7790-stout.dts
arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
arch/arm/boot/dts/renesas/r8a7791-porter.dts
arch/arm/boot/dts/renesas/r8a7792-blanche.dts
arch/arm/boot/dts/renesas/r8a7793-gose.dts
arch/arm/boot/dts/renesas/r8a7794-alt.dts
arch/arm/boot/dts/renesas/r8a7794-silk.dts
arch/arm/boot/dts/rockchip/rv1108.dtsi
arch/arm/boot/dts/st/stm32429i-eval.dts
arch/arm/boot/dts/st/stm32mp157c-dk2.dts
arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/mach-ep93xx/core.c
arch/arm64/boot/dts/allwinner/Makefile
arch/arm64/boot/dts/amazon/alpine-v2.dtsi
arch/arm64/boot/dts/amazon/alpine-v3.dtsi
arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/freescale/Makefile
arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/lg/lg1312.dtsi
arch/arm64/boot/dts/lg/lg1313.dtsi
arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
arch/arm64/boot/dts/mediatek/mt8195-demo.dts
arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
arch/arm64/boot/dts/nvidia/tegra234.dtsi
arch/arm64/boot/dts/qcom/ipq6018.dtsi
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
arch/arm64/boot/dts/qcom/sm6115.dtsi
arch/arm64/boot/dts/qcom/sm8650-mtp.dts
arch/arm64/boot/dts/qcom/sm8650-qrd.dts
arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
arch/arm64/boot/dts/rockchip/px30.dtsi
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
arch/arm64/crypto/aes-neonbs-glue.c
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/jump_label.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/vgic/vgic-its.c
arch/loongarch/boot/dts/loongson-2k0500-ref.dts
arch/loongarch/boot/dts/loongson-2k1000-ref.dts
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/loongarch/kvm/vcpu.c
arch/parisc/include/asm/kprobes.h
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/unwind.c
arch/powerpc/include/asm/ppc-pci.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nestedv2.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/pci_dlpar.c
arch/riscv/Kconfig
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/boot/dts/starfive/jh7100.dtsi
arch/riscv/boot/dts/starfive/jh7110.dtsi
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/ftrace.h
arch/riscv/include/asm/hugetlb.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/pgalloc.h
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/suspend.h
arch/riscv/include/asm/vmalloc.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/return_address.c [new file with mode: 0644]
arch/riscv/kernel/suspend.c
arch/riscv/mm/hugetlbpage.c
arch/s390/configs/compat.config [new file with mode: 0644]
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/pci/pci.c
arch/sparc/Makefile
arch/sparc/video/Makefile
arch/x86/entry/entry.S
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/hyperv/hv_vtl.c
arch/x86/hyperv/ivm.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/set_memory.h
arch/x86/include/asm/vsyscall.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/e820.c
arch/x86/kernel/nmi.c
arch/x86/kvm/Kconfig
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/vmx/run_flags.h
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/maccess.c
arch/x86/mm/numa.c
arch/x86/mm/pat/set_memory.c
block/opal_proto.h
block/sed-opal.c
crypto/lskcipher.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/acpi/apei/ghes.c
drivers/acpi/ec.c
drivers/ata/ahci.c
drivers/ata/ahci_ceva.c
drivers/ata/libata-core.c
drivers/bluetooth/btqca.c
drivers/bluetooth/hci_bcm4377.c
drivers/bluetooth/hci_qca.c
drivers/bus/imx-weim.c
drivers/cache/ax45mp_cache.c
drivers/clk/samsung/clk-gs101.c
drivers/comedi/drivers/comedi_8255.c
drivers/comedi/drivers/comedi_test.c
drivers/counter/counter-core.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
drivers/crypto/rockchip/rk3288_crypto_ahash.c
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
drivers/cxl/acpi.c
drivers/cxl/core/cdat.c
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/core/pci.c
drivers/cxl/core/region.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/mem.c
drivers/cxl/pci.c
drivers/dma/dw-edma/dw-edma-v0-core.c
drivers/dma/dw-edma/dw-hdma-v0-core.c
drivers/dma/dw-edma/dw-hdma-v0-regs.h
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma-main.c
drivers/dma/fsl-qdma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/debugfs.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/ptdma/ptdma-dmaengine.c
drivers/dpll/dpll_core.c
drivers/dpll/dpll_core.h
drivers/dpll/dpll_netlink.c
drivers/firewire/core-card.c
drivers/firewire/ohci.c
drivers/firmware/efi/capsule-loader.c
drivers/firmware/microchip/mpfs-auto-update.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
drivers/gpu/drm/amd/display/dc/link/link_factory.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
drivers/gpu/drm/bridge/aux-hpd-bridge.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_display_power_well.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_modeset_setup.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
drivers/gpu/drm/meson/meson_encoder_cvbs.c
drivers/gpu/drm/meson/meson_encoder_dsi.c
drivers/gpu/drm/meson/meson_encoder_hdmi.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/include/nvkm/core/client.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nvkm/core/client.c
drivers/gpu/drm/nouveau/nvkm/core/object.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tests/drm_buddy_test.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/xe/tests/xe_mocs_test.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_drm_client.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_execlist.c
drivers/gpu/drm/xe/xe_gt_idle.c
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_mmio.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_sync.h
drivers/gpu/drm/xe/xe_sync_types.h
drivers/gpu/drm/xe/xe_tile.c
drivers/gpu/drm/xe/xe_trace.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/hv/channel.c
drivers/hv/hv_util.c
drivers/hv/vmbus_drv.c
drivers/hwmon/nct6775-core.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-wmt.c
drivers/iio/accel/adxl367.c
drivers/iio/accel/adxl367_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
drivers/iio/pressure/bmp280-spi.c
drivers/iio/pressure/dlhl60d.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/irdma/defs.h
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/mlx5/cong.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/wr.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/gpio_keys_polled.c
drivers/input/mouse/bcm5974.c
drivers/input/rmi4/rmi_driver.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/nested.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/iommu-sva.c
drivers/iommu/iommufd/hw_pagetable.c
drivers/iommu/iommufd/io_pagetable.c
drivers/iommu/iommufd/iommufd_test.h
drivers/iommu/iommufd/iova_bitmap.c
drivers/iommu/iommufd/selftest.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-sifive-plic.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-verity-target.c
drivers/md/dm-verity.h
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/misc/fastrpc.c
drivers/misc/lis3lv02d/lis3lv02d_i2c.c
drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/vsc-tp.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/sdhci-xenon-phy.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/spi/gigadevice.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/amd/pds_core/auxbus.c
drivers/net/ethernet/broadcom/asp2/bcmasp.c
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
drivers/net/ethernet/cisco/enic/vnic_vic.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_dpll.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
drivers/net/ethernet/pensando/ionic/ionic_dev.c
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_fw.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/stmicro/stmmac/hwif.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/ipa/ipa_interrupt.c
drivers/net/phy/realtek.c
drivers/net/tun.c
drivers/net/usb/dm9601.c
drivers/net/usb/lan78xx.c
drivers/net/usb/smsc95xx.c
drivers/net/veth.c
drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/of/property.c
drivers/pci/msi/irqdomain.c
drivers/perf/cxl_pmu.c
drivers/perf/riscv_pmu.c
drivers/perf/riscv_pmu_legacy.c
drivers/perf/riscv_pmu_sbi.c
drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
drivers/phy/qualcomm/phy-qcom-m31.c
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
drivers/phy/qualcomm/phy-qcom-qmp-usb.c
drivers/pinctrl/core.c
drivers/pinctrl/stm32/pinctrl-stm32mp257.c
drivers/platform/x86/amd/pmf/core.c
drivers/platform/x86/amd/pmf/pmf.h
drivers/platform/x86/amd/pmf/tee-if.c
drivers/platform/x86/intel/int0002_vgpio.c
drivers/platform/x86/intel/vbtn.c
drivers/platform/x86/p2sb.c
drivers/platform/x86/serdev_helpers.h [new file with mode: 0644]
drivers/platform/x86/think-lmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/x86-android-tablets/core.c
drivers/platform/x86/x86-android-tablets/lenovo.c
drivers/platform/x86/x86-android-tablets/other.c
drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
drivers/pmdomain/arm/scmi_perf_domain.c
drivers/pmdomain/qcom/rpmhpd.c
drivers/power/supply/Kconfig
drivers/power/supply/bq27xxx_battery_i2c.c
drivers/regulator/max5970-regulator.c
drivers/regulator/rk808-regulator.c
drivers/s390/cio/device_ops.c
drivers/scsi/Kconfig
drivers/scsi/mpi3mr/mpi3mr_transport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi.c
drivers/scsi/sd.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/soc/microchip/Kconfig
drivers/soc/qcom/pmic_glink.c
drivers/soc/qcom/pmic_glink_altmode.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-cs42l43.c
drivers/spi/spi-ppc4xx.c
drivers/target/target_core_pscsi.c
drivers/tee/optee/device.c
drivers/thunderbolt/switch.c
drivers/tty/hvc/Kconfig
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/serial_port.c
drivers/tty/serial/stm32-usart.c
drivers/tty/vt/vt.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/core.c
drivers/usb/cdns3/drd.c
drivers/usb/cdns3/drd.h
drivers/usb/cdns3/host.c
drivers/usb/core/port.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/udc/omap_udc.c
drivers/usb/host/uhci-grlib.c
drivers/usb/host/xhci-ring.c
drivers/usb/roles/class.c
drivers/usb/storage/isd200.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/ucsi_glink.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/hyperv_fb.c
fs/affs/affs.h
fs/affs/super.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/internal.h
fs/afs/main.c
fs/afs/server.c
fs/afs/volume.c
fs/aio.c
fs/bcachefs/backpointers.c
fs/bcachefs/btree_iter.c
fs/bcachefs/fs-io-buffered.c
fs/bcachefs/fs-io-direct.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/snapshot.c
fs/bcachefs/util.c
fs/btrfs/block-rsv.c
fs/btrfs/block-rsv.h
fs/btrfs/defrag.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/btrfs/space-info.c
fs/btrfs/transaction.c
fs/btrfs/zoned.c
fs/cachefiles/cache.c
fs/cachefiles/daemon.c
fs/ceph/mdsmap.c
fs/ceph/mdsmap.h
fs/coredump.c
fs/dcache.c
fs/efivarfs/internal.h
fs/efivarfs/super.c
fs/efivarfs/vars.c
fs/erofs/data.c
fs/erofs/decompressor.c
fs/erofs/fscache.c
fs/erofs/namei.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/nls.c
fs/exfat/super.c
fs/ext4/symlink.c
fs/fuse/cuse.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/super.c
fs/namei.c
fs/netfs/buffered_write.c
fs/netfs/direct_write.c
fs/netfs/io.c
fs/nfs/client.c
fs/nfs/dir.c
fs/ntfs3/frecord.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/root.c
fs/smb/client/cifsfs.c
fs/super.c
fs/xfs/xfs_super.c
include/drm/bridge/aux-bridge.h
include/linux/bvec.h
include/linux/cxl-event.h
include/linux/dcache.h
include/linux/dpll.h
include/linux/fs.h
include/linux/gfp.h
include/linux/hyperv.h
include/linux/iommu.h
include/linux/kvm_host.h
include/linux/memblock.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/nfs_fs_sb.h
include/linux/poison.h
include/linux/proc_fs.h
include/linux/swap.h
include/linux/trace_seq.h
include/linux/uio.h
include/net/mctp.h
include/net/netfilter/nf_flow_table.h
include/net/sch_generic.h
include/net/switchdev.h
include/net/tcp.h
include/scsi/scsi_device.h
include/trace/events/qdisc.h
include/uapi/drm/nouveau_drm.h
include/uapi/drm/xe_drm.h
include/uapi/linux/in6.h
init/Kconfig
kernel/bpf/cpumap.c
kernel/bpf/helpers.c
kernel/bpf/task_iter.c
kernel/bpf/verifier.c
kernel/cgroup/cpuset.c
kernel/sched/membarrier.c
kernel/trace/fprobe.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_output.c
lib/Kconfig.debug
lib/checksum_kunit.c
lib/iov_iter.c
lib/nlattr.c
lib/stackdepot.c
mm/compaction.c
mm/damon/core.c
mm/damon/lru_sort.c
mm/damon/reclaim.c
mm/damon/sysfs-schemes.c
mm/debug_vm_pgtable.c
mm/filemap.c
mm/kasan/common.c
mm/kasan/generic.c
mm/kasan/kasan.h
mm/kasan/quarantine.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mmap.c
mm/page_alloc.c
mm/swap.h
mm/swap_state.c
mm/swapfile.c
mm/userfaultfd.c
mm/vmscan.c
mm/zswap.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_switchdev.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/ceph/messenger_v2.c
net/core/dev.c
net/core/page_pool_user.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/sock.c
net/devlink/core.c
net/devlink/port.c
net/hsr/hsr_forward.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_tunnel.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/exthdrs.c
net/ipv6/route.c
net/ipv6/seg6.c
net/iucv/iucv.c
net/l2tp/l2tp_ip6.c
net/mac80211/rate.c
net/mctp/route.c
net/mptcp/diag.c
net/mptcp/options.c
net/mptcp/pm_netlink.c
net/mptcp/pm_userspace.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_ct.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_dev.c
net/netrom/nr_in.c
net/netrom/nr_out.c
net/netrom/nr_route.c
net/netrom/nr_subr.c
net/phonet/datagram.c
net/phonet/pep.c
net/rds/rdma.c
net/rds/send.c
net/sched/act_mirred.c
net/sched/cls_flower.c
net/switchdev/switchdev.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/unix/garbage.c
net/wireless/nl80211.c
net/xdp/xsk.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/Kconfig.include
scripts/Makefile.compiler
scripts/bpf_doc.py
scripts/gdb/linux/symbols.py
security/apparmor/lsm.c
security/integrity/digsig.c
security/landlock/fs.c
security/selinux/hooks.c
security/tomoyo/common.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/amd/yc/pci-acp6x.c
sound/soc/codecs/tlv320adc3xxx.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/sof/amd/acp-loader.c
sound/soc/sof/amd/acp.c
sound/soc/sof/amd/acp.h
sound/soc/sof/amd/vangogh.c
tools/net/ynl/lib/ynl.c
tools/testing/cxl/Kbuild
tools/testing/cxl/test/cxl.c
tools/testing/cxl/test/mock.c
tools/testing/cxl/test/mock.h
tools/testing/selftests/bpf/prog_tests/iters.c
tools/testing/selftests/bpf/prog_tests/read_vsyscall.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/timer.c
tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
tools/testing/selftests/bpf/progs/iters_task.c
tools/testing/selftests/bpf/progs/read_vsyscall.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/timer.c
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
tools/testing/selftests/drivers/net/bonding/bond_options.sh
tools/testing/selftests/iommu/config
tools/testing/selftests/iommu/iommufd.c
tools/testing/selftests/iommu/iommufd_utils.h
tools/testing/selftests/kvm/set_memory_region_test.c
tools/testing/selftests/mm/uffd-unit-tests.c
tools/testing/selftests/net/forwarding/tc_actions.sh
tools/testing/selftests/net/ioam6.sh
tools/testing/selftests/net/ioam6_parser.c
tools/testing/selftests/net/mptcp/diag.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/mptcp_lib.sh
tools/testing/selftests/net/mptcp/pm_netlink.sh
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/veth.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/bridge_netfilter.sh [new file with mode: 0644]
tools/testing/selftests/powerpc/math/fpu_signal.c
virt/kvm/kvm_main.c

index b99a238ee3bde17fdf4e0f6b9ca0aee81e1dc9a7..bd9f1025ac44e0e289a6843de2c4497be2b76118 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -325,6 +325,7 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
 Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org>
 Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
 Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
+Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
@@ -553,6 +554,7 @@ Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
 Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
 Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
 Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
+Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
 Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
@@ -608,6 +610,11 @@ TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Tudor Ambarus <tudor.ambarus@linaro.org> <tudor.ambarus@microchip.com>
+Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko.ursulin@intel.com>
+Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko.ursulin@linux.intel.com>
+Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko.ursulin@sophos.com>
+Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko.ursulin@onelan.co.uk>
+Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
 Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
 Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
index e73fdff62c0aa10a0d6de89e6a62f8b2185920a7..c58c72362911cd0a10be8e96eba4cb9940d3b576 100644 (file)
@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
 
     mds_clear_cpu_buffers()
 
+Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
+Other than CFLAGS.ZF, this macro doesn't clobber any registers.
+
 The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
 (idle) transitions.
 
@@ -138,17 +141,30 @@ Mitigation points
 
    When transitioning from kernel to user space the CPU buffers are flushed
    on affected CPUs when the mitigation is not disabled on the kernel
-   command line. The migitation is enabled through the static key
-   mds_user_clear.
-
-   The mitigation is invoked in prepare_exit_to_usermode() which covers
-   all but one of the kernel to user space transitions.  The exception
-   is when we return from a Non Maskable Interrupt (NMI), which is
-   handled directly in do_nmi().
-
-   (The reason that NMI is special is that prepare_exit_to_usermode() can
-    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
-    enable IRQs with NMIs blocked.)
+   command line. The mitigation is enabled through the feature flag
+   X86_FEATURE_CLEAR_CPU_BUF.
+
+   The mitigation is invoked just before transitioning to userspace after
+   user registers are restored. This is done to minimize the window in
+   which kernel data could be accessed after VERW e.g. via an NMI after
+   VERW.
+
+   **Corner case not handled**
+   Interrupts returning to kernel don't clear CPUs buffers since the
+   exit-to-user path is expected to do that anyways. But, there could be
+   a case when an NMI is generated in kernel after the exit-to-user path
+   has cleared the buffers. This case is not handled and NMI returning to
+   kernel don't clear CPU buffers because:
+
+   1. It is rare to get an NMI after VERW, but before returning to userspace.
+   2. For an unprivileged user, there is no known way to make that NMI
+      less rare or target it.
+   3. It would take a large number of these precisely-timed NMIs to mount
+      an actual attack.  There's presumably not enough bandwidth.
+   4. The NMI in question occurs after a VERW, i.e. when user state is
+      restored and most interesting data is already scrubbed. Whats left
+      is only the data that NMI touches, and that may or may not be of
+      any interest.
 
 
 2. C-State transition
index 5830b01c56429d38f18e12778ebce543605b3296..da64c9fb7e072378c53423b1f7b575ef124b6834 100644 (file)
@@ -388,6 +388,12 @@ latex_elements = {
         verbatimhintsturnover=false,
     ''',
 
+    #
+    # Some of our authors are fond of deep nesting; tell latex to
+    # cope.
+    #
+    'maxlistdepth': '10',
+
     # For CJK One-half spacing, need to be in front of hyperref
     'extrapackages': r'\usepackage{setspace}',
 
index 3eebc03a309be24fca83f928ffeab18fed09b13b..ca7fdada3ff2487c3c678bc3aa8b40381d04d12e 100644 (file)
@@ -85,8 +85,8 @@ allOf:
 
         clock-names:
           items:
-            - const: dout_cmu_misc_bus
-            - const: dout_cmu_misc_sss
+            - const: bus
+            - const: sss
 
 additionalProperties: false
 
index 475aff7714d6419a9cb7266c65bffffce733b29d..ea35d19be829a37a657f6a3fb45153981ea16fb9 100644 (file)
@@ -65,9 +65,11 @@ properties:
 
   rx-internal-delay-ps:
     enum: [0, 1800]
+    default: 0
 
   tx-internal-delay-ps:
     enum: [0, 2000]
+    default: 0
 
   '#address-cells':
     const: 1
index 7f9d8c7a635a6ffc1cb4ad3a7987929d28391488..99a536601cc7e67e2d3fe735749ff3dda5f16175 100644 (file)
@@ -185,11 +185,12 @@ properties:
 
       gpio-ranges:
         items:
-          - description: A phandle to the CODEC pinctrl node
-            minimum: 0
-          - const: 0
-          - const: 0
-          - const: 3
+          - items:
+              - description: A phandle to the CODEC pinctrl node
+                minimum: 0
+              - const: 0
+              - const: 0
+              - const: 3
 
     patternProperties:
       "-state$":
index e3d593841aa7ddd96237283b27470a9fba97ec89..ea8d16600e16a8530b7e633368bb53b75e878c15 100644 (file)
@@ -545,7 +545,7 @@ In such scenario, dpll device input signal shall be also configurable
 to drive dpll with signal recovered from the PHY netdevice.
 This is done by exposing a pin to the netdevice - attaching pin to the
 netdevice itself with
-``netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
+``dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
 Exposed pin id handle ``DPLL_A_PIN_ID`` is then identifiable by the user
 as it is attached to rtnetlink respond to get ``RTM_NEWLINK`` command in
 nested attribute ``IFLA_DPLL_PIN``.
index a2babd0d7954e6729ed8533518dbef039f5fdeac..595d7ef5fc8b090788e7a3439843c060951d1098 100644 (file)
@@ -1,9 +1,9 @@
 .. SPDX-License-Identifier: GPL-2.0
 .. Copyright (C) 2023 Google LLC
 
-=====================================================
-inet_connection_sock struct fast path usage breakdown
-=====================================================
+==========================================
+inet_sock struct fast path usage breakdown
+==========================================
 
 Type                    Name                  fastpath_tx_access  fastpath_rx_access  comment
 ..struct                ..inet_sock                                                     
index 84ee60fceef24cbf1ba9e090ac91c94abd4064b5..fd96e4a3cef9c09382e34419ec3f8ac1c5514cf4 100644 (file)
@@ -431,7 +431,7 @@ patchwork checks
 Checks in patchwork are mostly simple wrappers around existing kernel
 scripts, the sources are available at:
 
-https://github.com/kuba-moo/nipa/tree/master/tests
+https://github.com/linux-netdev/nipa/tree/master/tests
 
 **Do not** post your patches just to run them through the checks.
 You must ensure that your patches are ready by testing them locally
index 47161e6eba9976fa8e67a14905f284ea05d82f21..32c2b32b2b5ee91a27abacfa0332620e208b8723 100644 (file)
@@ -29,10 +29,7 @@ all_languages = {
 }
 
 class LanguagesNode(nodes.Element):
-    def __init__(self, current_language, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
-        self.current_language = current_language
+    pass
 
 class TranslationsTransform(Transform):
     default_priority = 900
@@ -49,7 +46,8 @@ class TranslationsTransform(Transform):
             # normalize docname to be the untranslated one
             docname = os.path.join(*components[2:])
 
-        new_nodes = LanguagesNode(all_languages[this_lang_code])
+        new_nodes = LanguagesNode()
+        new_nodes['current_language'] = all_languages[this_lang_code]
 
         for lang_code, lang_name in all_languages.items():
             if lang_code == this_lang_code:
@@ -84,7 +82,7 @@ def process_languages(app, doctree, docname):
 
         html_content = app.builder.templates.render('translations.html',
             context={
-                'current_language': node.current_language,
+                'current_language': node['current_language'],
                 'languages': languages,
             })
 
index 4a7a1b738bbead3563cbc70a67c038725d0aadfd..de447e11b4a5c3b9a0948712e59d1d065130a1f7 100644 (file)
@@ -10,3 +10,4 @@ Hyper-V Enlightenments
    overview
    vmbus
    clocks
+   vpci
diff --git a/Documentation/virt/hyperv/vpci.rst b/Documentation/virt/hyperv/vpci.rst
new file mode 100644 (file)
index 0000000..b65b212
--- /dev/null
@@ -0,0 +1,316 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+PCI pass-thru devices
+=========================
+In a Hyper-V guest VM, PCI pass-thru devices (also called
+virtual PCI devices, or vPCI devices) are physical PCI devices
+that are mapped directly into the VM's physical address space.
+Guest device drivers can interact directly with the hardware
+without intermediation by the host hypervisor.  This approach
+provides higher bandwidth access to the device with lower
+latency, compared with devices that are virtualized by the
+hypervisor.  The device should appear to the guest just as it
+would when running on bare metal, so no changes are required
+to the Linux device drivers for the device.
+
+Hyper-V terminology for vPCI devices is "Discrete Device
+Assignment" (DDA).  Public documentation for Hyper-V DDA is
+available here: `DDA`_
+
+.. _DDA: https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/plan-for-deploying-devices-using-discrete-device-assignment
+
+DDA is typically used for storage controllers, such as NVMe,
+and for GPUs.  A similar mechanism for NICs is called SR-IOV
+and produces the same benefits by allowing a guest device
+driver to interact directly with the hardware.  See Hyper-V
+public documentation here: `SR-IOV`_
+
+.. _SR-IOV: https://learn.microsoft.com/en-us/windows-hardware/drivers/network/overview-of-single-root-i-o-virtualization--sr-iov-
+
+This discussion of vPCI devices includes DDA and SR-IOV
+devices.
+
+Device Presentation
+-------------------
+Hyper-V provides full PCI functionality for a vPCI device when
+it is operating, so the Linux device driver for the device can
+be used unchanged, provided it uses the correct Linux kernel
+APIs for accessing PCI config space and for other integration
+with Linux.  But the initial detection of the PCI device and
+its integration with the Linux PCI subsystem must use Hyper-V
+specific mechanisms.  Consequently, vPCI devices on Hyper-V
+have a dual identity.  They are initially presented to Linux
+guests as VMBus devices via the standard VMBus "offer"
+mechanism, so they have a VMBus identity and appear under
+/sys/bus/vmbus/devices.  The VMBus vPCI driver in Linux at
+drivers/pci/controller/pci-hyperv.c handles a newly introduced
+vPCI device by fabricating a PCI bus topology and creating all
+the normal PCI device data structures in Linux that would
+exist if the PCI device were discovered via ACPI on a bare-
+metal system.  Once those data structures are set up, the
+device also has a normal PCI identity in Linux, and the normal
+Linux device driver for the vPCI device can function as if it
+were running in Linux on bare-metal.  Because vPCI devices are
+presented dynamically through the VMBus offer mechanism, they
+do not appear in the Linux guest's ACPI tables.  vPCI devices
+may be added to a VM or removed from a VM at any time during
+the life of the VM, and not just during initial boot.
+
+With this approach, the vPCI device is a VMBus device and a
+PCI device at the same time.  In response to the VMBus offer
+message, the hv_pci_probe() function runs and establishes a
+VMBus connection to the vPCI VSP on the Hyper-V host.  That
+connection has a single VMBus channel.  The channel is used to
+exchange messages with the vPCI VSP for the purpose of setting
+up and configuring the vPCI device in Linux.  Once the device
+is fully configured in Linux as a PCI device, the VMBus
+channel is used only if Linux changes the vCPU to be interrupted
+in the guest, or if the vPCI device is removed from
+the VM while the VM is running.  The ongoing operation of the
+device happens directly between the Linux device driver for
+the device and the hardware, with VMBus and the VMBus channel
+playing no role.
+
+PCI Device Setup
+----------------
+PCI device setup follows a sequence that Hyper-V originally
+created for Windows guests, and that can be ill-suited for
+Linux guests due to differences in the overall structure of
+the Linux PCI subsystem compared with Windows.  Nonetheless,
+with a bit of hackery in the Hyper-V virtual PCI driver for
+Linux, the virtual PCI device is setup in Linux so that
+generic Linux PCI subsystem code and the Linux driver for the
+device "just work".
+
+Each vPCI device is set up in Linux to be in its own PCI
+domain with a host bridge.  The PCI domainID is derived from
+bytes 4 and 5 of the instance GUID assigned to the VMBus vPCI
+device.  The Hyper-V host does not guarantee that these bytes
+are unique, so hv_pci_probe() has an algorithm to resolve
+collisions.  The collision resolution is intended to be stable
+across reboots of the same VM so that the PCI domainIDs don't
+change, as the domainID appears in the user space
+configuration of some devices.
+
+hv_pci_probe() allocates a guest MMIO range to be used as PCI
+config space for the device.  This MMIO range is communicated
+to the Hyper-V host over the VMBus channel as part of telling
+the host that the device is ready to enter d0.  See
+hv_pci_enter_d0().  When the guest subsequently accesses this
+MMIO range, the Hyper-V host intercepts the accesses and maps
+them to the physical device PCI config space.
+
+hv_pci_probe() also gets BAR information for the device from
+the Hyper-V host, and uses this information to allocate MMIO
+space for the BARs.  That MMIO space is then setup to be
+associated with the host bridge so that it works when generic
+PCI subsystem code in Linux processes the BARs.
+
+Finally, hv_pci_probe() creates the root PCI bus.  At this
+point the Hyper-V virtual PCI driver hackery is done, and the
+normal Linux PCI machinery for scanning the root bus works to
+detect the device, to perform driver matching, and to
+initialize the driver and device.
+
+PCI Device Removal
+------------------
+A Hyper-V host may initiate removal of a vPCI device from a
+guest VM at any time during the life of the VM.  The removal
+is instigated by an admin action taken on the Hyper-V host and
+is not under the control of the guest OS.
+
+A guest VM is notified of the removal by an unsolicited
+"Eject" message sent from the host to the guest over the VMBus
+channel associated with the vPCI device.  Upon receipt of such
+a message, the Hyper-V virtual PCI driver in Linux
+asynchronously invokes Linux kernel PCI subsystem calls to
+shutdown and remove the device.  When those calls are
+complete, an "Ejection Complete" message is sent back to
+Hyper-V over the VMBus channel indicating that the device has
+been removed.  At this point, Hyper-V sends a VMBus rescind
+message to the Linux guest, which the VMBus driver in Linux
+processes by removing the VMBus identity for the device.  Once
+that processing is complete, all vestiges of the device having
+been present are gone from the Linux kernel.  The rescind
+message also indicates to the guest that Hyper-V has stopped
+providing support for the vPCI device in the guest.  If the
+guest were to attempt to access that device's MMIO space, it
+would be an invalid reference. Hypercalls affecting the device
+return errors, and any further messages sent in the VMBus
+channel are ignored.
+
+After sending the Eject message, Hyper-V allows the guest VM
+60 seconds to cleanly shutdown the device and respond with
+Ejection Complete before sending the VMBus rescind
+message.  If for any reason the Eject steps don't complete
+within the allowed 60 seconds, the Hyper-V host forcibly
+performs the rescind steps, which will likely result in
+cascading errors in the guest because the device is now no
+longer present from the guest standpoint and accessing the
+device MMIO space will fail.
+
+Because ejection is asynchronous and can happen at any point
+during the guest VM lifecycle, proper synchronization in the
+Hyper-V virtual PCI driver is very tricky.  Ejection has been
+observed even before a newly offered vPCI device has been
+fully setup.  The Hyper-V virtual PCI driver has been updated
+several times over the years to fix race conditions when
+ejections happen at inopportune times. Care must be taken when
+modifying this code to prevent re-introducing such problems.
+See comments in the code.
+
+Interrupt Assignment
+--------------------
+The Hyper-V virtual PCI driver supports vPCI devices using
+MSI, multi-MSI, or MSI-X.  Assigning the guest vCPU that will
+receive the interrupt for a particular MSI or MSI-X message is
+complex because of the way the Linux setup of IRQs maps onto
+the Hyper-V interfaces.  For the single-MSI and MSI-X cases,
+Linux calls hv_compse_msi_msg() twice, with the first call
+containing a dummy vCPU and the second call containing the
+real vCPU.  Furthermore, hv_irq_unmask() is finally called
+(on x86) or the GICD registers are set (on arm64) to specify
+the real vCPU again.  Each of these three calls interact
+with Hyper-V, which must decide which physical CPU should
+receive the interrupt before it is forwarded to the guest VM.
+Unfortunately, the Hyper-V decision-making process is a bit
+limited, and can result in concentrating the physical
+interrupts on a single CPU, causing a performance bottleneck.
+See details about how this is resolved in the extensive
+comment above the function hv_compose_msi_req_get_cpu().
+
+The Hyper-V virtual PCI driver implements the
+irq_chip.irq_compose_msi_msg function as hv_compose_msi_msg().
+Unfortunately, on Hyper-V the implementation requires sending
+a VMBus message to the Hyper-V host and awaiting an interrupt
+indicating receipt of a reply message.  Since
+irq_chip.irq_compose_msi_msg can be called with IRQ locks
+held, it doesn't work to do the normal sleep until awakened by
+the interrupt. Instead hv_compose_msi_msg() must send the
+VMBus message, and then poll for the completion message. As
+further complexity, the vPCI device could be ejected/rescinded
+while the polling is in progress, so this scenario must be
+detected as well.  See comments in the code regarding this
+very tricky area.
+
+Most of the code in the Hyper-V virtual PCI driver (pci-
+hyperv.c) applies to Hyper-V and Linux guests running on x86
+and on arm64 architectures.  But there are differences in how
+interrupt assignments are managed.  On x86, the Hyper-V
+virtual PCI driver in the guest must make a hypercall to tell
+Hyper-V which guest vCPU should be interrupted by each
+MSI/MSI-X interrupt, and the x86 interrupt vector number that
+the x86_vector IRQ domain has picked for the interrupt.  This
+hypercall is made by hv_arch_irq_unmask().  On arm64, the
+Hyper-V virtual PCI driver manages the allocation of an SPI
+for each MSI/MSI-X interrupt.  The Hyper-V virtual PCI driver
+stores the allocated SPI in the architectural GICD registers,
+which Hyper-V emulates, so no hypercall is necessary as with
+x86.  Hyper-V does not support using LPIs for vPCI devices in
+arm64 guest VMs because it does not emulate a GICv3 ITS.
+
+The Hyper-V virtual PCI driver in Linux supports vPCI devices
+whose drivers create managed or unmanaged Linux IRQs.  If the
+smp_affinity for an unmanaged IRQ is updated via the /proc/irq
+interface, the Hyper-V virtual PCI driver is called to tell
+the Hyper-V host to change the interrupt targeting and
+everything works properly.  However, on x86 if the x86_vector
+IRQ domain needs to reassign an interrupt vector due to
+running out of vectors on a CPU, there's no path to inform the
+Hyper-V host of the change, and things break.  Fortunately,
+guest VMs operate in a constrained device environment where
+using all the vectors on a CPU doesn't happen. Since such a
+problem is only a theoretical concern rather than a practical
+concern, it has been left unaddressed.
+
+DMA
+---
+By default, Hyper-V pins all guest VM memory in the host
+when the VM is created, and programs the physical IOMMU to
+allow the VM to have DMA access to all its memory.  Hence
+it is safe to assign PCI devices to the VM, and allow the
+guest operating system to program the DMA transfers.  The
+physical IOMMU prevents a malicious guest from initiating
+DMA to memory belonging to the host or to other VMs on the
+host. From the Linux guest standpoint, such DMA transfers
+are in "direct" mode since Hyper-V does not provide a virtual
+IOMMU in the guest.
+
+Hyper-V assumes that physical PCI devices always perform
+cache-coherent DMA.  When running on x86, this behavior is
+required by the architecture.  When running on arm64, the
+architecture allows for both cache-coherent and
+non-cache-coherent devices, with the behavior of each device
+specified in the ACPI DSDT.  But when a PCI device is assigned
+to a guest VM, that device does not appear in the DSDT, so the
+Hyper-V VMBus driver propagates cache-coherency information
+from the VMBus node in the ACPI DSDT to all VMBus devices,
+including vPCI devices (since they have a dual identity as a VMBus
+device and as a PCI device).  See vmbus_dma_configure().
+Current Hyper-V versions always indicate that the VMBus is
+cache coherent, so vPCI devices on arm64 always get marked as
+cache coherent and the CPU does not perform any sync
+operations as part of dma_map/unmap_*() calls.
+
+vPCI protocol versions
+----------------------
+As previously described, during vPCI device setup and teardown
+messages are passed over a VMBus channel between the Hyper-V
+host and the Hyper-v vPCI driver in the Linux guest.  Some
+messages have been revised in newer versions of Hyper-V, so
+the guest and host must agree on the vPCI protocol version to
+be used.  The version is negotiated when communication over
+the VMBus channel is first established.  See
+hv_pci_protocol_negotiation(). Newer versions of the protocol
+extend support to VMs with more than 64 vCPUs, and provide
+additional information about the vPCI device, such as the
+guest virtual NUMA node to which it is most closely affined in
+the underlying hardware.
+
+Guest NUMA node affinity
+------------------------
+When the vPCI protocol version provides it, the guest NUMA
+node affinity of the vPCI device is stored as part of the Linux
+device information for subsequent use by the Linux driver. See
+hv_pci_assign_numa_node().  If the negotiated protocol version
+does not support the host providing NUMA affinity information,
+the Linux guest defaults the device NUMA node to 0.  But even
+when the negotiated protocol version includes NUMA affinity
+information, the ability of the host to provide such
+information depends on certain host configuration options.  If
+the guest receives NUMA node value "0", it could mean NUMA
+node 0, or it could mean "no information is available".
+Unfortunately it is not possible to distinguish the two cases
+from the guest side.
+
+PCI config space access in a CoCo VM
+------------------------------------
+Linux PCI device drivers access PCI config space using a
+standard set of functions provided by the Linux PCI subsystem.
+In Hyper-V guests these standard functions map to functions
+hv_pcifront_read_config() and hv_pcifront_write_config()
+in the Hyper-V virtual PCI driver.  In normal VMs,
+these hv_pcifront_*() functions directly access the PCI config
+space, and the accesses trap to Hyper-V to be handled.
+But in CoCo VMs, memory encryption prevents Hyper-V
+from reading the guest instruction stream to emulate the
+access, so the hv_pcifront_*() functions must invoke
+hypercalls with explicit arguments describing the access to be
+made.
+
+Config Block back-channel
+-------------------------
+The Hyper-V host and Hyper-V virtual PCI driver in Linux
+together implement a non-standard back-channel communication
+path between the host and guest.  The back-channel path uses
+messages sent over the VMBus channel associated with the vPCI
+device.  The functions hyperv_read_cfg_blk() and
+hyperv_write_cfg_blk() are the primary interfaces provided to
+other parts of the Linux kernel.  As of this writing, these
+interfaces are used only by the Mellanox mlx5 driver to pass
+diagnostic data to a Hyper-V host running in the Azure public
+cloud.  The functions hyperv_read_cfg_blk() and
+hyperv_write_cfg_blk() are implemented in a separate module
+(pci-hyperv-intf.c, under CONFIG_PCI_HYPERV_INTERFACE) that
+effectively stubs them out when running in non-Hyper-V
+environments.
index 3ec0b7a455a0cf489b93683a49b5362cded0b570..09c7e585ff5800da5a72a1f9dbd8b719f0b6d595 100644 (file)
@@ -8791,6 +8791,11 @@ means the VM type with value @n is supported.  Possible values of @n are::
   #define KVM_X86_DEFAULT_VM   0
   #define KVM_X86_SW_PROTECTED_VM      1
 
+Note, KVM_X86_SW_PROTECTED_VM is currently only for development and testing.
+Do not use KVM_X86_SW_PROTECTED_VM for "real" VMs, and especially not in
+production.  The behavior and effective ABI for software-protected VMs is
+unstable.
+
 9. Known KVM API problems
 =========================
 
index 1d82cc96398bde69455713e9de81fab29fa6c182..6f589aee6146354dd9a044e3e044814af35022b2 100644 (file)
@@ -1395,6 +1395,7 @@ F:        drivers/hwmon/max31760.c
 
 ANALOGBITS PLL LIBRARIES
 M:     Paul Walmsley <paul.walmsley@sifive.com>
+M:     Samuel Holland <samuel.holland@sifive.com>
 S:     Supported
 F:     drivers/clk/analogbits/*
 F:     include/linux/clk/analogbits*
@@ -2156,7 +2157,7 @@ M:        Shawn Guo <shawnguo@kernel.org>
 M:     Sascha Hauer <s.hauer@pengutronix.de>
 R:     Pengutronix Kernel Team <kernel@pengutronix.de>
 R:     Fabio Estevam <festevam@gmail.com>
-R:     NXP Linux Team <linux-imx@nxp.com>
+L:     imx@lists.linux.dev
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -5379,7 +5380,7 @@ CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
 M:     Roman Gushchin <roman.gushchin@linux.dev>
-M:     Shakeel Butt <shakeelb@google.com>
+M:     Shakeel Butt <shakeel.butt@linux.dev>
 R:     Muchun Song <muchun.song@linux.dev>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
@@ -8496,7 +8497,7 @@ FREESCALE IMX / MXC FEC DRIVER
 M:     Wei Fang <wei.fang@nxp.com>
 R:     Shenwei Wang <shenwei.wang@nxp.com>
 R:     Clark Wang <xiaoning.wang@nxp.com>
-R:     NXP Linux Team <linux-imx@nxp.com>
+L:     imx@lists.linux.dev
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -8531,7 +8532,7 @@ F:        drivers/i2c/busses/i2c-imx.c
 FREESCALE IMX LPI2C DRIVER
 M:     Dong Aisheng <aisheng.dong@nxp.com>
 L:     linux-i2c@vger.kernel.org
-L:     linux-imx@nxp.com
+L:     imx@lists.linux.dev
 S:     Maintained
 F:     Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
 F:     drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -10735,7 +10736,7 @@ INTEL DRM I915 DRIVER (Meteor Lake, DG2 and older excluding Poulsbo, Moorestown
 M:     Jani Nikula <jani.nikula@linux.intel.com>
 M:     Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
 M:     Rodrigo Vivi <rodrigo.vivi@intel.com>
-M:     Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
+M:     Tvrtko Ursulin <tursulin@ursulin.net>
 L:     intel-gfx@lists.freedesktop.org
 S:     Supported
 W:     https://drm.pages.freedesktop.org/intel-docs/
@@ -14112,6 +14113,17 @@ F:     mm/
 F:     tools/mm/
 F:     tools/testing/selftests/mm/
 
+MEMORY MAPPING
+M:     Andrew Morton <akpm@linux-foundation.org>
+R:     Liam R. Howlett <Liam.Howlett@oracle.com>
+R:     Vlastimil Babka <vbabka@suse.cz>
+R:     Lorenzo Stoakes <lstoakes@gmail.com>
+L:     linux-mm@kvack.org
+S:     Maintained
+W:     http://www.linux-mm.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F:     mm/mmap.c
+
 MEMORY TECHNOLOGY DEVICES (MTD)
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
 M:     Richard Weinberger <richard@nod.at>
@@ -14370,7 +14382,7 @@ MICROCHIP MCP16502 PMIC DRIVER
 M:     Claudiu Beznea <claudiu.beznea@tuxon.dev>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
-F:     Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
+F:     Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
 F:     drivers/regulator/mcp16502.c
 
 MICROCHIP MCP3564 ADC DRIVER
@@ -15243,6 +15255,8 @@ F:      Documentation/networking/
 F:     Documentation/networking/net_cachelines/
 F:     Documentation/process/maintainer-netdev.rst
 F:     Documentation/userspace-api/netlink/
+F:     include/linux/framer/framer-provider.h
+F:     include/linux/framer/framer.h
 F:     include/linux/in.h
 F:     include/linux/indirect_call_wrapper.h
 F:     include/linux/net.h
@@ -15715,7 +15729,7 @@ F:      drivers/iio/gyro/fxas21002c_spi.c
 NXP i.MX 7D/6SX/6UL/93 AND VF610 ADC DRIVER
 M:     Haibo Chen <haibo.chen@nxp.com>
 L:     linux-iio@vger.kernel.org
-L:     linux-imx@nxp.com
+L:     imx@lists.linux.dev
 S:     Maintained
 F:     Documentation/devicetree/bindings/iio/adc/fsl,imx7d-adc.yaml
 F:     Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
@@ -15752,7 +15766,7 @@ F:      drivers/gpu/drm/imx/dcss/
 NXP i.MX 8QXP ADC DRIVER
 M:     Cai Huoqing <cai.huoqing@linux.dev>
 M:     Haibo Chen <haibo.chen@nxp.com>
-L:     linux-imx@nxp.com
+L:     imx@lists.linux.dev
 L:     linux-iio@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
@@ -15760,7 +15774,7 @@ F:      drivers/iio/adc/imx8qxp-adc.c
 
 NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER
 M:     Mirela Rabulea <mirela.rabulea@nxp.com>
-R:     NXP Linux Team <linux-imx@nxp.com>
+L:     imx@lists.linux.dev
 L:     linux-media@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
@@ -15770,7 +15784,7 @@ NXP i.MX CLOCK DRIVERS
 M:     Abel Vesa <abelvesa@kernel.org>
 R:     Peng Fan <peng.fan@nxp.com>
 L:     linux-clk@vger.kernel.org
-L:     linux-imx@nxp.com
+L:     imx@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
 F:     Documentation/devicetree/bindings/clock/imx*
@@ -16731,6 +16745,7 @@ F:      drivers/pci/controller/dwc/*layerscape*
 PCI DRIVER FOR FU740
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 M:     Greentime Hu <greentime.hu@sifive.com>
+M:     Samuel Holland <samuel.holland@sifive.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
@@ -17983,33 +17998,34 @@ F:    drivers/media/tuners/qt1010*
 
 QUALCOMM ATH12K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
-M:     Jeff Johnson <quic_jjohnson@quicinc.com>
+M:     Jeff Johnson <jjohnson@kernel.org>
 L:     ath12k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath12k/
+N:     ath12k
 
 QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
-M:     Jeff Johnson <quic_jjohnson@quicinc.com>
+M:     Jeff Johnson <jjohnson@kernel.org>
 L:     ath10k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
-F:     Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
 F:     drivers/net/wireless/ath/ath10k/
+N:     ath10k
 
 QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
-M:     Jeff Johnson <quic_jjohnson@quicinc.com>
+M:     Jeff Johnson <jjohnson@kernel.org>
 L:     ath11k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
 B:     https://wireless.wiki.kernel.org/en/users/Drivers/ath11k/bugreport
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
-F:     Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
 F:     drivers/net/wireless/ath/ath11k/
+N:     ath11k
 
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:     Toke Høiland-Jørgensen <toke@toke.dk>
@@ -19640,7 +19656,7 @@ F:      drivers/mmc/host/sdhci-of-at91.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) NXP i.MX DRIVER
 M:     Haibo Chen <haibo.chen@nxp.com>
-L:     linux-imx@nxp.com
+L:     imx@lists.linux.dev
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
 F:     drivers/mmc/host/sdhci-esdhc-imx.c
@@ -19975,36 +19991,15 @@ S:    Maintained
 F:     drivers/watchdog/simatic-ipc-wdt.c
 
 SIFIVE DRIVERS
-M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
+M:     Samuel Holland <samuel.holland@sifive.com>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
+F:     drivers/dma/sf-pdma/
 N:     sifive
+K:     fu[57]40
 K:     [^@]sifive
 
-SIFIVE CACHE DRIVER
-M:     Conor Dooley <conor@kernel.org>
-L:     linux-riscv@lists.infradead.org
-S:     Maintained
-F:     Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
-F:     drivers/cache/sifive_ccache.c
-
-SIFIVE FU540 SYSTEM-ON-CHIP
-M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@dabbelt.com>
-L:     linux-riscv@lists.infradead.org
-S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
-N:     fu540
-K:     fu540
-
-SIFIVE PDMA DRIVER
-M:     Green Wan <green.wan@sifive.com>
-S:     Maintained
-F:     Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
-F:     drivers/dma/sf-pdma/
-
-
 SILEAD TOUCHSCREEN DRIVER
 M:     Hans de Goede <hdegoede@redhat.com>
 L:     linux-input@vger.kernel.org
@@ -20213,8 +20208,8 @@ F:      Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
 F:     drivers/net/ethernet/socionext/sni_ave.c
 
 SOCIONEXT (SNI) NETSEC NETWORK DRIVER
-M:     Jassi Brar <jaswinder.singh@linaro.org>
 M:     Ilias Apalodimas <ilias.apalodimas@linaro.org>
+M:     Masahisa Kojima <kojima.masahisa@socionext.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml
@@ -22885,9 +22880,8 @@ S:      Maintained
 F:     drivers/usb/typec/mux/pi3usb30532.c
 
 USB TYPEC PORT CONTROLLER DRIVERS
-M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-usb@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/usb/typec/tcpm/
 
 USB UHCI DRIVER
index 41fa8a2565f54e4773321a193d3d7c5abb5f07f9..c7ee53f4bf044539bb783fbccd207b449a2dd880 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index ff68dfb4eb7874a00d398bf7dfc2d242385c5620..90bd12feac010108def3f68756edf4e2d76c2e84 100644 (file)
                msix: msix@fbe00000 {
                        compatible = "al,alpine-msix";
                        reg = <0x0 0xfbe00000 0x0 0x100000>;
-                       interrupt-controller;
                        msi-controller;
                        al,msi-base-spi = <96>;
                        al,msi-num-spis = <64>;
index 530491ae5eb26060f68802cf3318914f7fb2d361..857cb26ed6d7e8acd13c5695daa9fb3b8699c3c1 100644 (file)
        i2c0: i2c-bus@40 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x40 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c1: i2c-bus@80 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x80 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c2: i2c-bus@c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0xc0 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c3: i2c-bus@100 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x100 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c4: i2c-bus@140 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x140 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c5: i2c-bus@180 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x180 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c6: i2c-bus@1c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x1c0 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c7: i2c-bus@300 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x300 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c8: i2c-bus@340 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x340 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c9: i2c-bus@380 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x380 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c10: i2c-bus@3c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x3c0 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c11: i2c-bus@400 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x400 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c12: i2c-bus@440 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x440 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
        i2c13: i2c-bus@480 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x480 0x40>;
                compatible = "aspeed,ast2400-i2c-bus";
index 04f98d1dbb97c84c318c7e6a133fbf4572237c47..e6f3cf3c721e574f8b9975254cdcc79e3ce3b725 100644 (file)
                                interrupts = <40>;
                                reg = <0x1e780200 0x0100>;
                                clocks = <&syscon ASPEED_CLK_APB>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                                bus-frequency = <12000000>;
                                pinctrl-names = "default";
        i2c0: i2c-bus@40 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x40 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c1: i2c-bus@80 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x80 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c2: i2c-bus@c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0xc0 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c3: i2c-bus@100 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x100 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c4: i2c-bus@140 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x140 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c5: i2c-bus@180 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x180 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c6: i2c-bus@1c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x1c0 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c7: i2c-bus@300 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x300 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c8: i2c-bus@340 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x340 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c9: i2c-bus@380 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x380 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c10: i2c-bus@3c0 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x3c0 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c11: i2c-bus@400 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x400 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c12: i2c-bus@440 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x440 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
        i2c13: i2c-bus@480 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
 
                reg = <0x480 0x40>;
                compatible = "aspeed,ast2500-i2c-bus";
index c4d1faade8be33d52c91f797f3fedaa0b22566a2..29f94696d8b189cba0113e7a65bbb25611358710 100644 (file)
                                reg = <0x1e780500 0x100>;
                                interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&syscon ASPEED_CLK_APB2>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                                bus-frequency = <12000000>;
                                pinctrl-names = "default";
                                reg = <0x1e780600 0x100>;
                                interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&syscon ASPEED_CLK_APB2>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                                bus-frequency = <12000000>;
                                pinctrl-names = "default";
        i2c0: i2c-bus@80 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x80 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c1: i2c-bus@100 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x100 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c2: i2c-bus@180 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x180 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c3: i2c-bus@200 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x200 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c4: i2c-bus@280 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x280 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c5: i2c-bus@300 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x300 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c6: i2c-bus@380 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x380 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c7: i2c-bus@400 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x400 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c8: i2c-bus@480 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x480 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c9: i2c-bus@500 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x500 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c10: i2c-bus@580 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x580 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c11: i2c-bus@600 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x600 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c12: i2c-bus@680 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x680 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c13: i2c-bus@700 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x700 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c14: i2c-bus@780 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x780 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
        i2c15: i2c-bus@800 {
                #address-cells = <1>;
                #size-cells = <0>;
-               #interrupt-cells = <1>;
                reg = <0x800 0x80>;
                compatible = "aspeed,ast2600-i2c-bus";
                clocks = <&syscon ASPEED_CLK_APB2>;
index f9f79ed825181b7e71b12f87d7ba21ade0fd6d4d..07ca0d993c9fdb27ef50e3c450f3472ebe67f858 100644 (file)
                        #gpio-cells = <2>;
                        gpio-controller;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&mailbox>;
                        interrupts = <0>;
                };
                        gpio-controller;
                        interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                i2c1: i2c@1800b000 {
                        gpio-controller;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                        gpio-ranges = <&pinctrl 0 42 1>,
                                        <&pinctrl 1 44 3>,
index 788a6806191a33a04aa326a0645d5af06365571d..75545b10ef2fa69570f42422e15a2341d4cfaf92 100644 (file)
                        gpio-controller;
                        ngpios = <4>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index 9d20ba3b1ffb13d4983f28e66de7ae140af528be..6a4482c9316741d89eb67371ac13a3670783b8fc 100644 (file)
                        gpio-controller;
                        ngpios = <32>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                        gpio-ranges = <&pinctrl 0 0 32>;
                };
                        gpio-controller;
                        ngpios = <4>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index 4d70f6afd13ab5ee5df7ea621b56f81f4e642d41..6d5e69035f94dcaa3f323c833c1edd064d4f7dfd 100644 (file)
@@ -60,6 +60,8 @@
                         * We have slots (IDSEL) 1 and 2 with one assigned IRQ
                         * each handling all IRQs.
                         */
+                       #interrupt-cells = <1>;
+                       interrupt-map-mask = <0xf800 0 0 7>;
                        interrupt-map =
                        /* IDSEL 1 */
                        <0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
index 9ec0169bacf8c2098814ec6c1399e41c910df464..5f4c849915db71390ab3050b7277b7893b075307 100644 (file)
@@ -89,6 +89,8 @@
                         * The slots have Ethernet, Ethernet, NEC and MPCI.
                         * The IDSELs are 11, 12, 13, 14.
                         */
+                       #interrupt-cells = <1>;
+                       interrupt-map-mask = <0xf800 0 0 7>;
                        interrupt-map =
                        /* IDSEL 11 - Ethernet A */
                        <0x5800 0 0 1 &gpio0 4 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 11 is irq 4 */
index dffb9f84e67c50c63ba5268a9975c62b93e75157..c841eb8e7fb1d0404301f4f8b21899fb60b77a25 100644 (file)
@@ -65,6 +65,7 @@
                        gpio2: gpio-expander@20 {
                                #gpio-cells = <2>;
                                #interrupt-cells = <2>;
+                               interrupt-controller;
                                compatible = "semtech,sx1505q";
                                reg = <0x20>;
 
@@ -79,6 +80,7 @@
                        gpio3: gpio-expander@21 {
                                #gpio-cells = <2>;
                                #interrupt-cells = <2>;
+                               interrupt-controller;
                                compatible = "semtech,sx1505q";
                                reg = <0x21>;
 
index fd671c7a1e5d64c6eafb0a7434c7d14b19f4d1b6..6e1f0f164cb4f511d19774a8c39a9a3090d85b9d 100644 (file)
                                interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
                                             <3 IRQ_TYPE_LEVEL_HIGH>,
                                             <4 IRQ_TYPE_LEVEL_HIGH>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                        };
 
                                gpio-controller;
                                #gpio-cells = <2>;
                                interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                        };
 
index 1640763fd4af2216c225b95e60e954afd5255fb5..ff0d684622f74d13eb1b4b2c7178c38e93ab4293 100644 (file)
                        compatible = "st,stmpe811";
                        reg = <0x41>;
                        irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
-                       interrupt-controller;
                        id = <0>;
                        blocks = <0x5>;
                        irq-trigger = <0x1>;
index 3b6fad273cabf17a6ddff7ede1d72de13079ed1f..d38f1dd38a9068371c25ddf82f4c284a555ffb03 100644 (file)
                        compatible = "st,stmpe811";
                        reg = <0x41>;
                        irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
-                       interrupt-controller;
                        id = <0>;
                        blocks = <0x5>;
                        irq-trigger = <0x1>;
index 4eb526fe9c55888d6a595d68d3a95616bb913404..81c8a5fd92ccea33b3673d61302d39397e8fa72f 100644 (file)
                        compatible = "st,stmpe811";
                        reg = <0x41>;
                        irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
-                       interrupt-controller;
                        id = <0>;
                        blocks = <0x5>;
                        irq-trigger = <0x1>;
index db8c332df6a1d53f1b3eff6572a9f080ac10fe0a..cad112e054758f7ce364f2346eb4e1e291086a61 100644 (file)
 
                #address-cells = <3>;
                #size-cells = <2>;
-               #interrupt-cells = <1>;
 
                bridge@2,1 {
                        compatible = "pci10b5,8605";
 
                        #address-cells = <3>;
                        #size-cells = <2>;
-                       #interrupt-cells = <1>;
 
                        /* Intel Corporation I210 Gigabit Network Connection */
                        ethernet@3,0 {
 
                        #address-cells = <3>;
                        #size-cells = <2>;
-                       #interrupt-cells = <1>;
 
                        /* Intel Corporation I210 Gigabit Network Connection */
                        switch_nic: ethernet@4,0 {
index 99f4f6ac71d4a18f6f6eb2f0476c47280ba844b7..c1ae7c47b44227c2438d4e7c73fbafd6eaa269b9 100644 (file)
                                reg = <0x74>;
                                gpio-controller;
                                #gpio-cells = <2>;
+                               #interrupt-cells = <2>;
                                interrupt-controller;
                                interrupt-parent = <&gpio2>;
                                interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
 
                #address-cells = <3>;
                #size-cells = <2>;
-               #interrupt-cells = <1>;
        };
 };
 
index 2ae93f57fe5acac1f3f437b082e258ed81a391e0..ea40623d12e5fddc11b2af150ca6a80af93510a3 100644 (file)
                blocks = <0x5>;
                id = <0>;
                interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
-               interrupt-controller;
                interrupt-parent = <&gpio4>;
                irq-trigger = <0x1>;
                pinctrl-names = "default";
index 55c90f6393ad5e1176b5f8af6ca94bcf9c368477..d3a7a6eeb8e09edff6963de86527e13899e3c956 100644 (file)
                blocks = <0x5>;
                interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
                interrupt-parent = <&gpio6>;
-               interrupt-controller;
                id = <0>;
                irq-trigger = <0x1>;
                pinctrl-names = "default";
index a63e73adc1fc532175d8cd1baca8ede060f4d2f8..42b2ba23aefc9e26ddb3a8e0317013e30602fdbe 100644 (file)
                pinctrl-0 = <&pinctrl_pmic>;
                interrupt-parent = <&gpio2>;
                interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
-               interrupt-controller;
 
                onkey {
                        compatible = "dlg,da9063-onkey";
index 113974520d544b72ff3397629935037c1d1cae53..c0c47adc5866e3ea157b499f15d8edf8b2d1fcde 100644 (file)
                reg = <0x58>;
                interrupt-parent = <&gpio2>;
                interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */
+               #interrupt-cells = <2>;
                interrupt-controller;
 
                regulators {
index 86b4269e0e0117b3906b625537444533c28510fb..85e278eb201610a1c851c4093025bb205e02a3b3 100644 (file)
                interrupt-parent = <&gpio1>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
                gpio-controller;
                #gpio-cells = <2>;
 
index 12361fcbe24aff98a70482f2a7885c6ce28cb3b2..1b965652291bfaf5d6bad76ac3eaf10974eac6ea 100644 (file)
@@ -63,6 +63,7 @@
                gpio-controller;
                #gpio-cells = <2>;
                #interrupt-cells = <2>;
+               interrupt-controller;
                reg = <0x25>;
        };
 
index ebf7befcc11e3e8cd5985d72c384ae2248635bcc..9c81c6baa2d39ae7cd73a34144598d513423c343 100644 (file)
                                        <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
                                clock-names = "pix", "axi";
                                status = "disabled";
-
-                               port {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-
-                                       lcdif_out_mipi_dsi: endpoint@0 {
-                                               reg = <0>;
-                                               remote-endpoint = <&mipi_dsi_in_lcdif>;
-                                       };
-                               };
                        };
 
                        mipi_csi: mipi-csi@30750000 {
                                samsung,esc-clock-frequency = <20000000>;
                                samsung,pll-clock-frequency = <24000000>;
                                status = "disabled";
-
-                               ports {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-
-                                       port@0 {
-                                               reg = <0>;
-                                               #address-cells = <1>;
-                                               #size-cells = <0>;
-
-                                               mipi_dsi_in_lcdif: endpoint@0 {
-                                                       reg = <0>;
-                                                       remote-endpoint = <&lcdif_out_mipi_dsi>;
-                                               };
-                                       };
-                               };
                        };
                };
 
index b0ed68af0546702d9413c492da6796194208c347..029f49be40e373f706f7f67c34358ba9272ea0af 100644 (file)
                reg = <0x22>;
                gpio-controller;
                #gpio-cells = <2>;
+               #interrupt-cells = <2>;
                interrupt-controller;
                interrupt-parent = <&gpio3>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
index 2045fc779f887030735f9310982bdef228f8a481..27429d0fedfba8ac6f144c55dbd49d295f5cec29 100644 (file)
                                          "msi8";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc 0 0 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-                                       <0 0 0 2 &intc 0 0 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-                                       <0 0 0 3 &intc 0 0 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-                                       <0 0 0 4 &intc 0 0 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+                       interrupt-map = <0 0 0 1 &intc 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+                                       <0 0 0 2 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+                                       <0 0 0 3 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+                                       <0 0 0 4 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
 
                        clocks = <&gcc GCC_PCIE_PIPE_CLK>,
                                 <&gcc GCC_PCIE_AUX_CLK>,
index 2fba4d084001b9646ee012eb967e96a27695bfa6..8590981245a62057c2b61370e57a7627f36496e8 100644 (file)
                        interrupt-parent = <&irqc0>;
                        interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
 
                        rtc {
                                compatible = "dlg,da9063-rtc";
index f9bc5b4f019d02136aa99631c1b2e8c67e9651de..683f7395fab0b6961e5f00a3985fc9b690469237 100644 (file)
                interrupt-parent = <&irqc0>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                onkey {
                        compatible = "dlg,da9063-onkey";
index e9c13bb03772af44eada731a13b5ee88a2e3de7c..0efd9f98c75aced03009396d1c6e6ac023d84c4a 100644 (file)
                interrupt-parent = <&irqc0>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                rtc {
                        compatible = "dlg,da9063-rtc";
index 7e8bc06715f6564badf502267a33c3737c206cf9..93c86e9216455577271652dcbeb8623faba69885 100644 (file)
                interrupt-parent = <&irqc0>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                watchdog {
                        compatible = "dlg,da9063-watchdog";
index 4f9838cf97ee4fb608b27bfc3d637edee39f3c95..540a9ad28f28ac1a08c7b4f5d3e6a23bcfc262e0 100644 (file)
                interrupt-parent = <&irqc>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                rtc {
                        compatible = "dlg,da9063-rtc";
index 1744fdbf9e0ce08d2a30180e1462dd46a18152f9..1ea6c757893bc0bf5ae4d7c6a6c91854939f9b3f 100644 (file)
                interrupt-parent = <&irqc0>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                rtc {
                        compatible = "dlg,da9063-rtc";
index c0d067df22a03d4e2590333965c7c8d7a6f539d6..b5ecafbb2e4de582e4449e7abba6217d4e35dcdb 100644 (file)
                interrupt-parent = <&gpio3>;
                interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                rtc {
                        compatible = "dlg,da9063-rtc";
index 43d480a7f3eacc21636788f15e2b27ce3d4dec43..595e074085eb4cd3cf9ad84d59b138051302ef5e 100644 (file)
                interrupt-parent = <&gpio3>;
                interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                onkey {
                        compatible = "dlg,da9063-onkey";
index abf3006f0a842435b9d56750e805fe93261649c6..f3291f3bbc6fd2b480e975632847f9310c082225 100644 (file)
        pwm4: pwm@10280000 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x10280000 0x10>;
-               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm5: pwm@10280010 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x10280010 0x10>;
-               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm6: pwm@10280020 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x10280020 0x10>;
-               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm7: pwm@10280030 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x10280030 0x10>;
-               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm0: pwm@20040000 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x20040000 0x10>;
-               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm1: pwm@20040010 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x20040010 0x10>;
-               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm2: pwm@20040020 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x20040020 0x10>;
-               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
        pwm3: pwm@20040030 {
                compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
                reg = <0x20040030 0x10>;
-               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
index 576235ec3c516ee2136dd2b4a9c95a2ded61a3b3..afa417b34b25ffd7351885071e72989dd635b382 100644 (file)
                reg = <0x42>;
                interrupts = <8 3>;
                interrupt-parent = <&gpioi>;
-               interrupt-controller;
                wakeup-source;
 
                stmpegpio: stmpe_gpio {
index 510cca5acb79ca449dc11ba043475cfc43becc4c..7a701f7ef0c70467181e71719f17712ca4341562 100644 (file)
@@ -64,7 +64,6 @@
                reg = <0x38>;
                interrupts = <2 2>;
                interrupt-parent = <&gpiof>;
-               interrupt-controller;
                touchscreen-size-x = <480>;
                touchscreen-size-y = <800>;
                status = "okay";
index c8e55642f9c6e5acc43a741a769f798be6cccb37..3e834fc7e3707d4573b75cbfd89a49423c3ec6a5 100644 (file)
                reg = <0x41>;
                interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
                interrupt-parent = <&gpio2>;
-               interrupt-controller;
                id = <0>;
                blocks = <0x5>;
                irq-trigger = <0x1>;
index 0a90583f9f017ed2f88cd20cb6f731440909e830..8f9dbe8d90291ef33f42498d29f477cf54337b2a 100644 (file)
@@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_L4F00242T03=y
 CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_GPIO=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
index 71b1139764204c506bae31fc31c23f7a51bf61a3..8b1ec60a9a467abcc3bc80c07be12bf734d7c236 100644 (file)
@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
                                GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
                GPIO_LOOKUP_IDX("G", 0, NULL, 1,
                                GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+               { }
        },
 };
 
index 91d505b385de5a55f66b125586158c75720672a6..1f1f8d865d0e52a2a872d677504a125e06f57746 100644 (file)
@@ -42,5 +42,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-cb1-manta.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-pi.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-orangepi-zero2.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-x96-mate.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
index dccbba6e7f98e49f572b57c86415dced108fee2d..dbf2dce8d1d68a5225311bf330704e9f6d1ead40 100644 (file)
                msix: msix@fbe00000 {
                        compatible = "al,alpine-msix";
                        reg = <0x0 0xfbe00000 0x0 0x100000>;
-                       interrupt-controller;
                        msi-controller;
                        al,msi-base-spi = <160>;
                        al,msi-num-spis = <160>;
index 39481d7fd7d4da806fe1ab1e4b2320cc732f37d5..3ea178acdddfe2072352283f47318f0f75808c4f 100644 (file)
                msix: msix@fbe00000 {
                        compatible = "al,alpine-msix";
                        reg = <0x0 0xfbe00000 0x0 0x100000>;
-                       interrupt-controller;
                        msi-controller;
                        al,msi-base-spi = <336>;
                        al,msi-num-spis = <959>;
index 9dcd25ec2c04183fb90f160452142c2f5a790136..896d1f33b5b6173e3b4b701d4e08f4ad277856e0 100644 (file)
                        #gpio-cells = <2>;
                        gpio-controller;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index f049687d6b96d23fb0383401ef9c19e50af34148..d8516ec0dae7450e2c5e81f0bddf8ffdeba2bb5e 100644 (file)
                        #gpio-cells = <2>;
                        gpio-controller;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
                        gpio-ranges = <&pinmux 0 0 16>,
                                        <&pinmux 16 71 2>,
index 2e027675d7bbe16300b91be4b6f5522b245dea12..2cb0212b63c6eda77567f90d7960ce89825bd114 100644 (file)
@@ -20,23 +20,41 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-frwy.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-tqmls1046a-mbls10xxa.dtb
+DTC_FLAGS_fsl-ls1088a-qds := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb
+DTC_FLAGS_fsl-ls1088a-rdb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
+DTC_FLAGS_fsl-ls1088a-ten64 := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-ten64.dtb
+DTC_FLAGS_fsl-ls1088a-tqmls1088a-mbls10xxa := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-tqmls1088a-mbls10xxa.dtb
+DTC_FLAGS_fsl-ls2080a-qds := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
+DTC_FLAGS_fsl-ls2080a-rdb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
+DTC_FLAGS_fsl-ls2081a-rdb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2081a-rdb.dtb
+DTC_FLAGS_fsl-ls2080a-simu := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
+DTC_FLAGS_fsl-ls2088a-qds := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
+DTC_FLAGS_fsl-ls2088a-rdb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+DTC_FLAGS_fsl-lx2160a-bluebox3 := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-bluebox3.dtb
+DTC_FLAGS_fsl-lx2160a-bluebox3-rev-a := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-bluebox3-rev-a.dtb
+DTC_FLAGS_fsl-lx2160a-clearfog-cx := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-clearfog-cx.dtb
+DTC_FLAGS_fsl-lx2160a-honeycomb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
+DTC_FLAGS_fsl-lx2160a-qds := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
+DTC_FLAGS_fsl-lx2160a-rdb := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+DTC_FLAGS_fsl-lx2162a-clearfog := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-clearfog.dtb
+DTC_FLAGS_fsl-lx2162a-qds := -Wno-interrupt_map
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-qds.dtb
 
 fsl-ls1028a-qds-13bb-dtbs := fsl-ls1028a-qds.dtb fsl-ls1028a-qds-13bb.dtbo
@@ -53,6 +71,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-85bb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-899b.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-9999.dtb
 
+DTC_FLAGS_fsl-lx2160a-tqmlx2160a-mblx2160a := -Wno-interrupt_map
 fsl-lx2160a-tqmlx2160a-mblx2160a-12-11-x-dtbs := fsl-lx2160a-tqmlx2160a-mblx2160a.dtb \
        fsl-lx2160a-tqmlx2160a-mblx2160a_12_x_x.dtbo \
        fsl-lx2160a-tqmlx2160a-mblx2160a_x_11_x.dtbo
index f38ee2266b25dd811e1a8f29c7380aed337a1337..a6b94d1957c92ac6bcc18667b477ca05eda8b1bc 100644 (file)
                pinctrl-0 = <&pinctrl_ptn5150>;
                status = "okay";
 
-               connector {
-                       compatible = "usb-c-connector";
-                       label = "USB-C";
-
-                       port {
-                               typec1_dr_sw: endpoint {
-                                       remote-endpoint = <&usb1_drd_sw>;
-                               };
+               port {
+                       typec1_dr_sw: endpoint {
+                               remote-endpoint = <&usb1_drd_sw>;
                        };
                };
        };
index d98a040860a48a3ff2c6592420853a0dacc9b48a..5828c9d7821de1eab50967972cf406f8f6359da5 100644 (file)
 &uart4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_uart4>;
-       status = "okay";
+       status = "disabled";
 };
 
 &usb3_phy0 {
index fea67a9282f033121323ef2c86e200deac9463d4..b749e28e5ede5cf85f309f2f7903ebee44b41f98 100644 (file)
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_ptn5150>;
 
-                               connector {
-                                       compatible = "usb-c-connector";
-                                       label = "USB-C";
-
-                                       port {
-                                               ptn5150_out_ep: endpoint {
-                                                       remote-endpoint = <&dwc3_0_ep>;
-                                               };
+                               port {
+
+                                       ptn5150_out_ep: endpoint {
+                                               remote-endpoint = <&dwc3_0_ep>;
                                        };
                                };
                        };
index 4ae4fdab461e008d4816816eedb90f91e7d32561..43f1d45ccc96f01686534d228de9b69630db3ebb 100644 (file)
                                  <&clk IMX8MP_AUDIO_PLL2_OUT>;
                assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
                assigned-clock-rates = <13000000>, <13000000>, <156000000>;
-               reset-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio1 GPIO_ACTIVE_HIGH>;
                status = "disabled";
 
                ports {
index a2d5d19b2de0cb8b69a8ce55fbaeb0c6ba410907..86d3da36e4f3eecf64c0168c825baee86dfdab3f 100644 (file)
                enable-active-high;
        };
 
+       reg_vcc_1v8: regulator-1v8 {
+               compatible = "regulator-fixed";
+               regulator-name = "VCC_1V8";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
        reg_vcc_3v3: regulator-3v3 {
                compatible = "regulator-fixed";
                regulator-name = "VCC_3V3";
                clock-names = "mclk";
                clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
                reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
-               iov-supply = <&reg_vcc_3v3>;
+               iov-supply = <&reg_vcc_1v8>;
                ldoin-supply = <&reg_vcc_3v3>;
        };
 
index 76c73daf546bd0f64bc22e5e1176d814ad677e18..39a550c1cd261dd516da26757bfa8eccd908b92a 100644 (file)
                                        compatible = "fsl,imx8mp-ldb";
                                        reg = <0x5c 0x4>, <0x128 0x4>;
                                        reg-names = "ldb", "lvds";
-                                       clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
+                                       clocks = <&clk IMX8MP_CLK_MEDIA_LDB_ROOT>;
                                        clock-names = "ldb";
                                        assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
                                        assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
index 48ec4ebec0a83e65bb4978e2f2ffa9cb7aba873c..b864ffa74ea8b6ff72afbd698eab4d30ad990a37 100644 (file)
        amba {
                #address-cells = <2>;
                #size-cells = <1>;
-               #interrupt-cells = <3>;
 
                compatible = "simple-bus";
                interrupt-parent = <&gic>;
index 3869460aa5dcb5da3a3fb32f8e0df6903b88862c..996fb39bb50c1f2074ddd5ac03f191091920c96b 100644 (file)
        amba {
                #address-cells = <2>;
                #size-cells = <1>;
-               #interrupt-cells = <3>;
 
                compatible = "simple-bus";
                interrupt-parent = <&gic>;
index 2c920e22cec2b52dd983f2d20812e7fe80a0c379..7ec7c789d87eff436c4f7362e417c71e2033a5b1 100644 (file)
 
                        odmi: odmi@300000 {
                                compatible = "marvell,odmi-controller";
-                               interrupt-controller;
                                msi-controller;
                                marvell,odmi-frames = <4>;
                                reg = <0x300000 0x4000>,
index 69c7f3954ae59a8008a257807d31f227ba1cd2a8..4127cb84eba41a39f0fbff423a43de827dbea695 100644 (file)
                compatible = "mediatek,mt6360";
                reg = <0x34>;
                interrupt-controller;
+               #interrupt-cells = <1>;
                interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
                interrupt-names = "IRQB";
 
index ea13c4a7027c46ba5f5151947537b5376bcbad20..81a82933e35004e7df51383ed22d291e40874dd9 100644 (file)
                        status = "okay";
 
                        phy-handle = <&mgbe0_phy>;
-                       phy-mode = "usxgmii";
+                       phy-mode = "10gbase-r";
 
                        mdio {
                                #address-cells = <1>;
index 3f16595d099c5620b0d2dde77f0e2c6491c4a576..d1bd328892afa2c319750b20c5b8b979283e6481 100644 (file)
                                        <&mc TEGRA234_MEMORY_CLIENT_MGBEAWR &emc>;
                        interconnect-names = "dma-mem", "write";
                        iommus = <&smmu_niso0 TEGRA234_SID_MGBE>;
-                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEA>;
+                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
                        status = "disabled";
                };
 
                                        <&mc TEGRA234_MEMORY_CLIENT_MGBEBWR &emc>;
                        interconnect-names = "dma-mem", "write";
                        iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF1>;
-                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
+                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
                        status = "disabled";
                };
 
                                        <&mc TEGRA234_MEMORY_CLIENT_MGBECWR &emc>;
                        interconnect-names = "dma-mem", "write";
                        iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF2>;
-                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
+                       power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBED>;
                        status = "disabled";
                };
 
index 5e1277fea7250b4132039efb18f1cfaafdc5257e..61c8fd49c96678740684696397eb15118d83e1b9 100644 (file)
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-                                       <0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-                                       <0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-                                       <0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+                       interrupt-map = <0 0 0 1 &intc 0 0 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+                                       <0 0 0 2 &intc 0 0 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+                                       <0 0 0 3 &intc 0 0 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+                                       <0 0 0 4 &intc 0 0 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
 
                        clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
                                 <&gcc GCC_PCIE0_AXI_M_CLK>,
index cf295bed32998087cee60bd0ce61d0cf587d2c0a..26441447c866f6095aa26d48bb15c79f73bdd6c8 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc 0 142
+                       interrupt-map = <0 0 0 1 &intc 0 142
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-                                       <0 0 0 2 &intc 0 143
+                                       <0 0 0 2 &intc 0 143
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-                                       <0 0 0 3 &intc 0 144
+                                       <0 0 0 3 &intc 0 144
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-                                       <0 0 0 4 &intc 0 145
+                                       <0 0 0 4 &intc 0 145
                                         IRQ_TYPE_LEVEL_HIGH>; /* int_d */
 
                        clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>,
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc 0 75
+                       interrupt-map = <0 0 0 1 &intc 0 75
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-                                       <0 0 0 2 &intc 0 78
+                                       <0 0 0 2 &intc 0 78
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-                                       <0 0 0 3 &intc 0 79
+                                       <0 0 0 3 &intc 0 79
                                         IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-                                       <0 0 0 4 &intc 0 83
+                                       <0 0 0 4 &intc 0 83
                                         IRQ_TYPE_LEVEL_HIGH>; /* int_d */
 
                        clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
index 8d41ed261adfbfc99e15c07755f54d8f4cf5cc80..ee6f87c828aefab76ff58c1ba1f59ae023068381 100644 (file)
                };
        };
 
-       mpm: interrupt-controller {
-               compatible = "qcom,mpm";
-               qcom,rpm-msg-ram = <&apss_mpm>;
-               interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
-               mboxes = <&apcs_glb 1>;
-               interrupt-controller;
-               #interrupt-cells = <2>;
-               #power-domain-cells = <0>;
-               interrupt-parent = <&intc>;
-               qcom,mpm-pin-count = <96>;
-               qcom,mpm-pin-map = <2 184>,  /* TSENS1 upper_lower_int */
-                                  <52 243>, /* DWC3_PRI ss_phy_irq */
-                                  <79 347>, /* DWC3_PRI hs_phy_irq */
-                                  <80 352>, /* DWC3_SEC hs_phy_irq */
-                                  <81 347>, /* QUSB2_PHY_PRI DP+DM */
-                                  <82 352>, /* QUSB2_PHY_SEC DP+DM */
-                                  <87 326>; /* SPMI */
-       };
-
        psci {
                compatible = "arm,psci-1.0";
                method = "smc";
                };
 
                rpm_msg_ram: sram@68000 {
-                       compatible = "qcom,rpm-msg-ram", "mmio-sram";
+                       compatible = "qcom,rpm-msg-ram";
                        reg = <0x00068000 0x6000>;
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       ranges = <0 0x00068000 0x7000>;
-
-                       apss_mpm: sram@1b8 {
-                               reg = <0x1b8 0x48>;
-                       };
                };
 
                qfprom@74000 {
                        reg = <0x004ad000 0x1000>, /* TM */
                              <0x004ac000 0x1000>; /* SROT */
                        #qcom,sensors = <8>;
-                       interrupts-extended = <&mpm 2 IRQ_TYPE_LEVEL_HIGH>,
-                                             <&intc GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "uplow", "critical";
                        #thermal-sensor-cells = <1>;
                };
                        interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
                        gpio-controller;
                        gpio-ranges = <&tlmm 0 0 150>;
-                       wakeup-parent = <&mpm>;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
                              <0x0400a000 0x002100>;
                        reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
                        interrupt-names = "periph_irq";
-                       interrupts-extended = <&mpm 87 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
                        qcom,ee = <0>;
                        qcom,channel = <0>;
                        #address-cells = <2>;
                        #size-cells = <1>;
                        ranges;
 
-                       interrupts-extended = <&mpm 79 IRQ_TYPE_LEVEL_HIGH>,
-                                             <&mpm 52 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "hs_phy_irq", "ss_phy_irq";
 
                        clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
index ffc4406422ae2f82c9636e0fb521f34a1d28c1eb..41215567b3aed7d4211a8a4c5ab94042d205b422 100644 (file)
 };
 
 &pcie4 {
+       max-link-speed = <2>;
+
        perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
        wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
 
index def3976bd5bb154d27228831de14e9463239bdf8..eb657e544961d7c2ac60e0f505767c1427893a14 100644 (file)
 };
 
 &pcie4 {
+       max-link-speed = <2>;
+
        perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
        wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
 
index 160e098f10757e5f4e9c68e82ecc45f1ce27aa14..f9849b8befbf24b54992d49af812eaa94288c3fb 100644 (file)
                                                 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>,
                                                <&system_noc MASTER_QUP_0 RPM_ALWAYS_TAG
                                                 &bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>;
+                               interconnect-names = "qup-core",
+                                                    "qup-config",
+                                                    "qup-memory";
                                #address-cells = <1>;
                                #size-cells = <0>;
                                status = "disabled";
index 9d916edb1c73c10ef5e4fde52e33b75ff902a957..be133a3d5cbe0cb073c0fe8d4f253740da584992 100644 (file)
 
 &tlmm {
        /* Reserved I/Os for NFC */
-       gpio-reserved-ranges = <32 8>;
+       gpio-reserved-ranges = <32 8>, <74 1>;
 
        disp0_reset_n_active: disp0-reset-n-active-state {
                pins = "gpio133";
index 592a67a47c782f667cd48d8d8bcad7d457a84ee4..b9151c2ddf2e5ce7944bed07aa6864ccdc75f2a5 100644 (file)
 
 &tlmm {
        /* Reserved I/Os for NFC */
-       gpio-reserved-ranges = <32 8>;
+       gpio-reserved-ranges = <32 8>, <74 1>;
 
        bt_default: bt-default-state {
                bt-en-pins {
index 3885ef3454ff6e92d8f0d00509d0f935e7e40fa6..50de17e4fb3f25ed0ad490d9b4e593cab2b2cc5a 100644 (file)
                gpio-controller;
                #gpio-cells = <2>;
                interrupt-controller;
+               #interrupt-cells = <2>;
                interrupt-parent = <&gpio6>;
                interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
 
                gpio-controller;
                #gpio-cells = <2>;
                interrupt-controller;
+               #interrupt-cells = <2>;
                interrupt-parent = <&gpio6>;
                interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
        };
                gpio-controller;
                #gpio-cells = <2>;
                interrupt-controller;
+               #interrupt-cells = <2>;
                interrupt-parent = <&gpio7>;
                interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
        };
                gpio-controller;
                #gpio-cells = <2>;
                interrupt-controller;
+               #interrupt-cells = <2>;
                interrupt-parent = <&gpio5>;
                interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
        };
index d0905515399bb00b8562305b7fa5cf8a0eee65b2..9137dd76e72cedb0cfbf1995032e5852cab80f96 100644 (file)
                clock-names = "spiclk", "apb_pclk";
                dmas = <&dmac 12>, <&dmac 13>;
                dma-names = "tx", "rx";
+               num-cs = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
                #address-cells = <1>;
                clock-names = "spiclk", "apb_pclk";
                dmas = <&dmac 14>, <&dmac 15>;
                dma-names = "tx", "rx";
+               num-cs = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
                #address-cells = <1>;
index fb5dcf6e93272180bfd60b8e251a61e61f0e9155..7b4c15c4a9c319da2e92a19ca902884a788e9514 100644 (file)
        pwm3: pwm@ff1b0030 {
                compatible = "rockchip,rk3328-pwm";
                reg = <0x0 0xff1b0030 0x0 0x10>;
-               interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
                clock-names = "pwm", "pclk";
                pinctrl-names = "default";
index d4c70835e0fe28639548ed4bf4579439a5f92308..a4946cdc3bb34ef7bc084f74ae0a4ac8424994df 100644 (file)
@@ -72,7 +72,7 @@
                vin-supply = <&vcc3v3_sys>;
        };
 
-       vcc5v0_usb30_host: vcc5v0-usb30-host-regulator {
+       vcc5v0_usb_host1: vcc5v0_usb_host2: vcc5v0-usb-host-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vcc5v0_host";
                regulator-boot-on;
        status = "okay";
 };
 
+/* Standard pcie */
 &pcie3x2 {
        reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
        vpcie3v3-supply = <&vcc3v3_sys>;
 
 /* M.2 M-Key ssd */
 &pcie3x4 {
+       num-lanes = <2>;
        reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
        vpcie3v3-supply = <&vcc3v3_sys>;
        status = "okay";
 };
 
 &u2phy2_host {
-       phy-supply = <&vcc5v0_usb30_host>;
+       phy-supply = <&vcc5v0_usb_host1>;
        status = "okay";
 };
 
 &u2phy3_host {
-       phy-supply = <&vcc5v0_usb30_host>;
+       phy-supply = <&vcc5v0_usb_host2>;
        status = "okay";
 };
 
index 0b02f4d6e00331d4731e60251240748b5415b660..cce1c8e835877c4341d90f2fe80da7c57dde8d0c 100644 (file)
@@ -16,8 +16,8 @@
 
        aliases {
                mmc0 = &sdhci;
-               mmc1 = &sdio;
-               mmc2 = &sdmmc;
+               mmc1 = &sdmmc;
+               mmc2 = &sdio;
                serial2 = &uart2;
        };
 
index ac7c677b0fb9c3d6af9e7b8bcd399d9d24ef0b84..de30c2632b8e5fc8cc6d89272269353676b1e1a3 100644 (file)
                            <&rk806_dvs2_null>, <&rk806_dvs3_null>;
                pinctrl-names = "default";
                spi-max-frequency = <1000000>;
+               system-power-controller;
 
                vcc1-supply = <&vcc5v0_sys>;
                vcc2-supply = <&vcc5v0_sys>;
index 4ce70fb75a307ba34fdd8ad5a72d56401de0118e..39d65002add1e11e81bb0d660fd7f5ff90e4cdf7 100644 (file)
@@ -62,7 +62,6 @@
                compatible = "gpio-leds";
                pinctrl-names = "default";
                pinctrl-0 = <&led1_pin>;
-               status = "okay";
 
                /* LED1 on PCB */
                led-1 {
index d7722772ecd8a0afb7e844ffb168fa9a7462cb03..997b516c2533c1d1fe2db05f2b9df2ad5588e278 100644 (file)
        cpu-supply = <&vdd_cpu_lit_s0>;
 };
 
-&cpu_b0{
+&cpu_b0 {
        cpu-supply = <&vdd_cpu_big0_s0>;
 };
 
-&cpu_b1{
+&cpu_b1 {
        cpu-supply = <&vdd_cpu_big0_s0>;
 };
 
-&cpu_b2{
+&cpu_b2 {
        cpu-supply = <&vdd_cpu_big1_s0>;
 };
 
-&cpu_b3{
+&cpu_b3 {
        cpu-supply = <&vdd_cpu_big1_s0>;
 };
 
index ef4f058c20ff1565cb67e5c2c495f0f337ab2a1c..e037bf9db75af0402dccd26b82b50922823fe9f7 100644 (file)
@@ -19,8 +19,8 @@
 
        aliases {
                mmc0 = &sdhci;
-               mmc1 = &sdio;
-               mmc2 = &sdmmc;
+               mmc1 = &sdmmc;
+               mmc2 = &sdio;
        };
 
        analog-sound {
index dc677f29a9c7fca2359cf0d28b3ec3c9e97dda30..3c227888685192456ec7b4e9d348f187f3259063 100644 (file)
 
 &gpio1 {
        gpio-line-names = /* GPIO1 A0-A7 */
-                         "HEADER_27_3v3", "HEADER_28_3v3", "", "",
+                         "HEADER_27_3v3", "", "", "",
                          "HEADER_29_1v8", "", "HEADER_7_1v8", "",
                          /* GPIO1 B0-B7 */
                          "", "HEADER_31_1v8", "HEADER_33_1v8", "",
                          "HEADER_11_1v8", "HEADER_13_1v8", "", "",
                          /* GPIO1 C0-C7 */
-                         "", "", "", "",
+                         "", "HEADER_28_3v3", "", "",
                          "", "", "", "",
                          /* GPIO1 D0-D7 */
                          "", "", "", "",
 
 &gpio4 {
        gpio-line-names = /* GPIO4 A0-A7 */
-                         "", "", "HEADER_37_3v3", "HEADER_32_3v3",
-                         "HEADER_36_3v3", "", "HEADER_35_3v3", "HEADER_38_3v3",
+                         "", "", "HEADER_37_3v3", "HEADER_8_3v3",
+                         "HEADER_10_3v3", "", "HEADER_32_3v3", "HEADER_35_3v3",
                          /* GPIO4 B0-B7 */
                          "", "", "", "HEADER_40_3v3",
-                         "HEADER_8_3v3", "HEADER_10_3v3", "", "",
+                         "HEADER_38_3v3", "HEADER_36_3v3", "", "",
                          /* GPIO4 C0-C7 */
                          "", "", "", "",
                          "", "", "", "",
index bac4cabef6073e5b0c652d0ed031ea7cce97c72f..467ac2f768ac2bb423b92eb797dce8bde697f259 100644 (file)
@@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
                        src += blocks * AES_BLOCK_SIZE;
                }
                if (nbytes && walk.nbytes == walk.total) {
+                       u8 buf[AES_BLOCK_SIZE];
+                       u8 *d = dst;
+
+                       if (unlikely(nbytes < AES_BLOCK_SIZE))
+                               src = dst = memcpy(buf + sizeof(buf) - nbytes,
+                                                  src, nbytes);
+
                        neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
                                             nbytes, walk.iv);
+
+                       if (unlikely(nbytes < AES_BLOCK_SIZE))
+                               memcpy(d, dst, nbytes);
+
                        nbytes = 0;
                }
                kernel_neon_end();
index 481d94416d696a87ddaf3754c275562540bbc3a1..b67b89c54e1c83644cfdd7c63a4807dd24b8d06d 100644 (file)
@@ -386,6 +386,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
 extern unsigned int sme_get_vl(void);
 extern int sme_set_current_vl(unsigned long arg);
 extern int sme_get_current_vl(void);
+extern void sme_suspend_exit(void);
 
 /*
  * Return how many bytes of memory are required to store the full SME
@@ -421,6 +422,7 @@ static inline int sme_max_vl(void) { return 0; }
 static inline int sme_max_virtualisable_vl(void) { return 0; }
 static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
 static inline int sme_get_current_vl(void) { return -EINVAL; }
+static inline void sme_suspend_exit(void) { }
 
 static inline size_t sme_state_size(struct task_struct const *task)
 {
index b360c4c2b5e792ef276443ca550b535e7050cb35..6aafbb7899916e631eab9241c39c1313a7c93707 100644 (file)
 
 #define JUMP_LABEL_NOP_SIZE            AARCH64_INSN_SIZE
 
-/*
- * Prefer the constraint "S" to support PIC with GCC. Clang before 19 does not
- * support "S" on a symbol with a constant offset, so we use "i" as a fallback.
- */
 static __always_inline bool arch_static_branch(struct static_key * const key,
                                               const bool branch)
 {
@@ -27,9 +23,9 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
                 "      .pushsection    __jump_table, \"aw\"    \n\t"
                 "      .align          3                       \n\t"
                 "      .long           1b - ., %l[l_yes] - .   \n\t"
-                "      .quad           (%[key] - .) + %[bit0]  \n\t"
+                "      .quad           %c0 - .                 \n\t"
                 "      .popsection                             \n\t"
-                :  :  [key]"Si"(key), [bit0]"i"(branch) :  : l_yes);
+                :  :  "i"(&((char *)key)[branch]) :  : l_yes);
 
        return false;
 l_yes:
@@ -44,9 +40,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
                 "      .pushsection    __jump_table, \"aw\"    \n\t"
                 "      .align          3                       \n\t"
                 "      .long           1b - ., %l[l_yes] - .   \n\t"
-                "      .quad           (%[key] - .) + %[bit0]  \n\t"
+                "      .quad           %c0 - .                 \n\t"
                 "      .popsection                             \n\t"
-                :  :  [key]"Si"(key), [bit0]"i"(branch) :  : l_yes);
+                :  :  "i"(&((char *)key)[branch]) :  : l_yes);
 
        return false;
 l_yes:
index 25ceaee6b025da204857af8faeadae2fa83f128c..f27acca550d5539d00d958d441ca8631c8dba8d4 100644 (file)
@@ -1311,6 +1311,22 @@ void __init sme_setup(void)
                get_sme_default_vl());
 }
 
+void sme_suspend_exit(void)
+{
+       u64 smcr = 0;
+
+       if (!system_supports_sme())
+               return;
+
+       if (system_supports_fa64())
+               smcr |= SMCR_ELx_FA64;
+       if (system_supports_sme2())
+               smcr |= SMCR_ELx_EZT0;
+
+       write_sysreg_s(smcr, SYS_SMCR_EL1);
+       write_sysreg_s(0, SYS_SMPRI_EL1);
+}
+
 #endif /* CONFIG_ARM64_SME */
 
 static void sve_init_regs(void)
index 7f88028a00c02c0e176af5ae7674ae606f5afd3a..b2a60e0bcfd21d28a60db750590732bf1115551d 100644 (file)
@@ -247,7 +247,7 @@ struct kunwind_consume_entry_data {
        void *cookie;
 };
 
-static bool
+static __always_inline bool
 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
 {
        struct kunwind_consume_entry_data *data = cookie;
index eca4d043521183adc7263da95cf656323a4cc73a..eaaff94329cddb8d1fb8d1523395453f3501c9a5 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/exec.h>
+#include <asm/fpsimd.h>
 #include <asm/mte.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
@@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void)
         */
        spectre_v4_enable_mitigation(NULL);
 
+       sme_suspend_exit();
+
        /* Restore additional feature-specific configuration */
        ptrauth_suspend_exit();
 }
index e2764d0ffa9f32094c57580ed5d987f99b5d2ade..28a93074eca17dbb10c7c75e23baf72edf126391 100644 (file)
@@ -468,6 +468,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
                }
 
                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+               if (!irq)
+                       continue;
+
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = pendmask & (1U << bit_nr);
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -1432,6 +1435,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
 
        for (i = 0; i < irq_count; i++) {
                irq = vgic_get_irq(kvm, NULL, intids[i]);
+               if (!irq)
+                       continue;
 
                update_affinity(irq, vcpu2);
 
index b38071a4d0b023c7faf29935d3bb6d5e0c65ca76..8aefb0c126722980a345062cae02a6127c02b52e 100644 (file)
@@ -60,7 +60,7 @@
 
        #address-cells = <1>;
        #size-cells = <0>;
-       eeprom@57{
+       eeprom@57 {
                compatible = "atmel,24c16";
                reg = <0x57>;
                pagesize = <16>;
index 132a2d1ea8bce1ac95222875b6ad74d5ebf06b14..ed4d324340411dee9b88e52720329cf839307ad0 100644 (file)
@@ -78,7 +78,7 @@
 
        #address-cells = <1>;
        #size-cells = <0>;
-       eeprom@57{
+       eeprom@57 {
                compatible = "atmel,24c16";
                reg = <0x57>;
                pagesize = <16>;
index edf2bba80130670364e144ad301868a7dfd3bf93..634ef17fd38bf10d8bd9deef8a6693f0f4777c1e 100644 (file)
@@ -357,6 +357,8 @@ void __init platform_init(void)
        acpi_gbl_use_default_register_widths = false;
        acpi_boot_table_init();
 #endif
+
+       early_init_fdt_scan_reserved_mem();
        unflatten_and_copy_device_tree();
 
 #ifdef CONFIG_NUMA
@@ -390,8 +392,6 @@ static void __init arch_mem_init(char **cmdline_p)
 
        check_kernel_sections_mem();
 
-       early_init_fdt_scan_reserved_mem();
-
        /*
         * In order to reduce the possibility of kernel panic when failed to
         * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
index 2b49d30eb7c0185e043e462859e76a4ae64ecd67..aabee0b280fe5f43a70d8a1091e6268a37d701e9 100644 (file)
@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
        }
 }
 
+static inline void set_cpu_core_map(int cpu)
+{
+       int i;
+
+       cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+       for_each_cpu(i, &cpu_core_setup_map) {
+               if (cpu_data[cpu].package == cpu_data[i].package) {
+                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
+               }
+       }
+}
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+       for_each_cpu(i, &cpu_sibling_setup_map) {
+               if (cpus_are_siblings(cpu, i)) {
+                       cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+               }
+       }
+}
+
+static inline void clear_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       for_each_cpu(i, &cpu_sibling_setup_map) {
+               if (cpus_are_siblings(cpu, i)) {
+                       cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
+                       cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
+               }
+       }
+
+       cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+       int i, k, core_present;
+       cpumask_t temp_foreign_map;
+
+       /* Re-calculate the mask */
+       cpumask_clear(&temp_foreign_map);
+       for_each_online_cpu(i) {
+               core_present = 0;
+               for_each_cpu(k, &temp_foreign_map)
+                       if (cpus_are_siblings(i, k))
+                               core_present = 1;
+               if (!core_present)
+                       cpumask_set_cpu(i, &temp_foreign_map);
+       }
+
+       for_each_online_cpu(i)
+               cpumask_andnot(&cpu_foreign_map[i],
+                              &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
 /* Send mailbox buffer via Mail_Send */
 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
 {
@@ -303,6 +370,7 @@ int loongson_cpu_disable(void)
        numa_remove_cpu(cpu);
 #endif
        set_cpu_online(cpu, false);
+       clear_cpu_sibling_map(cpu);
        calculate_cpu_foreign_map();
        local_irq_save(flags);
        irq_migrate_all_off_this_cpu();
@@ -337,6 +405,7 @@ void __noreturn arch_cpu_idle_dead(void)
                addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
        } while (addr == 0);
 
+       local_irq_disable();
        init_fn = (void *)TO_CACHE(addr);
        iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
 
@@ -379,59 +448,6 @@ static int __init ipi_pm_init(void)
 core_initcall(ipi_pm_init);
 #endif
 
-static inline void set_cpu_sibling_map(int cpu)
-{
-       int i;
-
-       cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
-
-       for_each_cpu(i, &cpu_sibling_setup_map) {
-               if (cpus_are_siblings(cpu, i)) {
-                       cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
-                       cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
-               }
-       }
-}
-
-static inline void set_cpu_core_map(int cpu)
-{
-       int i;
-
-       cpumask_set_cpu(cpu, &cpu_core_setup_map);
-
-       for_each_cpu(i, &cpu_core_setup_map) {
-               if (cpu_data[cpu].package == cpu_data[i].package) {
-                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
-                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
-               }
-       }
-}
-
-/*
- * Calculate a new cpu_foreign_map mask whenever a
- * new cpu appears or disappears.
- */
-void calculate_cpu_foreign_map(void)
-{
-       int i, k, core_present;
-       cpumask_t temp_foreign_map;
-
-       /* Re-calculate the mask */
-       cpumask_clear(&temp_foreign_map);
-       for_each_online_cpu(i) {
-               core_present = 0;
-               for_each_cpu(k, &temp_foreign_map)
-                       if (cpus_are_siblings(i, k))
-                               core_present = 1;
-               if (!core_present)
-                       cpumask_set_cpu(i, &temp_foreign_map);
-       }
-
-       for_each_online_cpu(i)
-               cpumask_andnot(&cpu_foreign_map[i],
-                              &temp_foreign_map, &cpu_sibling_map[i]);
-}
-
 /* Preload SMP state for boot cpu */
 void smp_prepare_boot_cpu(void)
 {
index 27701991886dda7e3a6f75bd8a7f71a86995735b..36106922b5d75b7f7de70df5df0d72a697440f0f 100644 (file)
@@ -298,74 +298,73 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
        return ret;
 }
 
-static int _kvm_get_cpucfg(int id, u64 *v)
+static int _kvm_get_cpucfg_mask(int id, u64 *v)
 {
-       int ret = 0;
-
-       if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
+       if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
                return -EINVAL;
 
        switch (id) {
        case 2:
-               /* Return CPUCFG2 features which have been supported by KVM */
+               /* CPUCFG2 features unconditionally supported by KVM */
                *v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
                     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
                     CPUCFG2_LAM;
                /*
-                * If LSX is supported by CPU, it is also supported by KVM,
-                * as we implement it.
+                * For the ISA extensions listed below, if one is supported
+                * by the host, then it is also supported by KVM.
                 */
                if (cpu_has_lsx)
                        *v |= CPUCFG2_LSX;
-               /*
-                * if LASX is supported by CPU, it is also supported by KVM,
-                * as we implement it.
-                */
                if (cpu_has_lasx)
                        *v |= CPUCFG2_LASX;
 
-               break;
+               return 0;
        default:
-               ret = -EINVAL;
-               break;
+               /*
+                * No restrictions on other valid CPUCFG IDs' values, but
+                * CPUCFG data is limited to 32 bits as the LoongArch ISA
+                * manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
+                */
+               *v = U32_MAX;
+               return 0;
        }
-       return ret;
 }
 
 static int kvm_check_cpucfg(int id, u64 val)
 {
-       u64 mask;
-       int ret = 0;
-
-       if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
-               return -EINVAL;
+       int ret;
+       u64 mask = 0;
 
-       if (_kvm_get_cpucfg(id, &mask))
+       ret = _kvm_get_cpucfg_mask(id, &mask);
+       if (ret)
                return ret;
 
+       if (val & ~mask)
+               /* Unsupported features and/or the higher 32 bits should not be set */
+               return -EINVAL;
+
        switch (id) {
        case 2:
-               /* CPUCFG2 features checking */
-               if (val & ~mask)
-                       /* The unsupported features should not be set */
-                       ret = -EINVAL;
-               else if (!(val & CPUCFG2_LLFTP))
-                       /* The LLFTP must be set, as guest must has a constant timer */
-                       ret = -EINVAL;
-               else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
-                       /* Single and double float point must both be set when enable FP */
-                       ret = -EINVAL;
-               else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
-                       /* FP should be set when enable LSX */
-                       ret = -EINVAL;
-               else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
-                       /* LSX, FP should be set when enable LASX, and FP has been checked before. */
-                       ret = -EINVAL;
-               break;
+               if (!(val & CPUCFG2_LLFTP))
+                       /* Guests must have a constant timer */
+                       return -EINVAL;
+               if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
+                       /* Single and double float point must both be set when FP is enabled */
+                       return -EINVAL;
+               if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
+                       /* LSX architecturally implies FP but val does not satisfy that */
+                       return -EINVAL;
+               if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
+                       /* LASX architecturally implies LSX and FP but val does not satisfy that */
+                       return -EINVAL;
+               return 0;
        default:
-               break;
+               /*
+                * Values for the other CPUCFG IDs are not being further validated
+                * besides the mask check above.
+                */
+               return 0;
        }
-       return ret;
 }
 
 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
@@ -566,7 +565,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
        uint64_t val;
        uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
 
-       ret = _kvm_get_cpucfg(attr->attr, &val);
+       ret = _kvm_get_cpucfg_mask(attr->attr, &val);
        if (ret)
                return ret;
 
index 0a175ac876980c7c90b747bd8f8f34658499997a..0f42f5c8e3b66a8cbcf6f95a3312cb22f456cca8 100644 (file)
 #ifndef _PARISC_KPROBES_H
 #define _PARISC_KPROBES_H
 
+#include <asm-generic/kprobes.h>
+
 #ifdef CONFIG_KPROBES
 
-#include <asm-generic/kprobes.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
 #include <linux/notifier.h>
index d1defb9ede70c0ae73e46363e850fc28ef91cebd..621a4b386ae4fcc90fa5e2ad9b7ac6b947fd903d 100644 (file)
@@ -78,7 +78,7 @@ asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
 #endif
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
 int ftrace_enable_ftrace_graph_caller(void)
 {
        static_key_enable(&ftrace_graph_enable.key);
index e95a977ba5f376eb813d4c7806d205a92f539880..bf73562706b2e8ec337bc8cde4b6fd9e5cd7f43e 100644 (file)
@@ -172,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
        p->cpu_num = cpu_info.cpu_num;
        p->cpu_loc = cpu_info.cpu_loc;
 
-       set_cpu_possible(cpuid, true);
        store_cpu_topology(cpuid);
 
 #ifdef CONFIG_SMP
@@ -474,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
  */
 void __init processor_init(void)
 {
-       unsigned int cpu;
-
        reset_cpu_topology();
-
-       /* reset possible mask. We will mark those which are possible. */
-       for_each_possible_cpu(cpu)
-               set_cpu_possible(cpu, false);
-
        register_parisc_driver(&cpu_driver);
 }
index 27ae40a443b80c5fa575e8579bca7f08ef6d36ab..f7e0fee5ee55a3e055679e75b06c280679b603ad 100644 (file)
@@ -228,10 +228,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
 #ifdef CONFIG_IRQSTACKS
        extern void * const _call_on_stack;
 #endif /* CONFIG_IRQSTACKS */
-       void *ptr;
 
-       ptr = dereference_kernel_function_descriptor(&handle_interruption);
-       if (pc_is_kernel_fn(pc, ptr)) {
+       if (pc_is_kernel_fn(pc, handle_interruption)) {
                struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
                dbg("Unwinding through handle_interruption()\n");
                info->prev_sp = regs->gr[30];
@@ -239,13 +237,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
                return 1;
        }
 
-       if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
-           pc_is_kernel_fn(pc, syscall_exit)) {
+       if (pc == (unsigned long)&ret_from_kernel_thread ||
+           pc == (unsigned long)&syscall_exit) {
                info->prev_sp = info->prev_ip = 0;
                return 1;
        }
 
-       if (pc_is_kernel_fn(pc, intr_return)) {
+       if (pc == (unsigned long)&intr_return) {
                struct pt_regs *regs;
 
                dbg("Found intr_return()\n");
@@ -257,14 +255,14 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
        }
 
        if (pc_is_kernel_fn(pc, _switch_to) ||
-           pc_is_kernel_fn(pc, _switch_to_ret)) {
+           pc == (unsigned long)&_switch_to_ret) {
                info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
                info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
                return 1;
        }
 
 #ifdef CONFIG_IRQSTACKS
-       if (pc_is_kernel_fn(pc, _call_on_stack)) {
+       if (pc == (unsigned long)&_call_on_stack) {
                info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
                info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
                return 1;
index ce2b1b5eebddcf5eb2e84b5e8853f89cf06501a6..a8b7e8682f5bd6c58ff9faa31a152a31e1b5280d 100644 (file)
@@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
                                void *data);
 extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
 
+#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
+                                 defined(CONFIG_PPC_POWERNV))
+extern void ppc_iommu_register_device(struct pci_controller *phb);
+extern void ppc_iommu_unregister_device(struct pci_controller *phb);
+#else
+static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
+static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
+#endif
+
+
 /* From rtas_pci.h */
 extern void init_pci_config_tokens (void);
 extern unsigned long get_phb_buid (struct device_node *);
index 9bb2210c8d4417a4262aab81d68d851e175b77b4..065ffd1b2f8adaef8369846531bf4e6f78159b57 100644 (file)
@@ -69,7 +69,7 @@ enum rtas_function_index {
        RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
        RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
        RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
-       RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS,
+       RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW,
        RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
        RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
        RTAS_FNIDX__IBM_SET_EEH_OPTION,
@@ -164,7 +164,7 @@ typedef struct {
 #define RTAS_FN_IBM_READ_SLOT_RESET_STATE         rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
 #define RTAS_FN_IBM_READ_SLOT_RESET_STATE2        rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
 #define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW          rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
-#define RTAS_FN_IBM_RESET_PE_DMA_WINDOWS          rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS)
+#define RTAS_FN_IBM_RESET_PE_DMA_WINDOW           rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW)
 #define RTAS_FN_IBM_SCAN_LOG_DUMP                 rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
 #define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR         rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
 #define RTAS_FN_IBM_SET_EEH_OPTION                rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)
index a9bebfd56b3b3b542204b548f9a215397b37a3d3..1185efebf032b6e7d2cf08db4c953938948a44b1 100644 (file)
@@ -1360,7 +1360,7 @@ static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
        struct pci_controller *hose;
 
        if (!dev_is_pci(dev))
-               return ERR_PTR(-EPERM);
+               return ERR_PTR(-ENODEV);
 
        pdev = to_pci_dev(dev);
        hose = pdev->bus->sysdata;
@@ -1409,6 +1409,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
        NULL,
 };
 
+void ppc_iommu_register_device(struct pci_controller *phb)
+{
+       iommu_device_sysfs_add(&phb->iommu, phb->parent,
+                               spapr_tce_iommu_groups, "iommu-phb%04x",
+                               phb->global_number);
+       iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
+                               phb->parent);
+}
+
+void ppc_iommu_unregister_device(struct pci_controller *phb)
+{
+       iommu_device_unregister(&phb->iommu);
+       iommu_device_sysfs_remove(&phb->iommu);
+}
+
 /*
  * This registers IOMMU devices of PHBs. This needs to happen
  * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
@@ -1419,11 +1434,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
        struct pci_controller *hose;
 
        list_for_each_entry(hose, &hose_list, list_node) {
-               iommu_device_sysfs_add(&hose->iommu, hose->parent,
-                                      spapr_tce_iommu_groups, "iommu-phb%04x",
-                                      hose->global_number);
-               iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
-                                     hose->parent);
+               ppc_iommu_register_device(hose);
        }
        return 0;
 }
index 7e793b503e29f1ff878e7289c8703e7c4cf20edc..8064d9c3de8620d27d9c87f829676ef048aeed40 100644 (file)
@@ -375,8 +375,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
        [RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
                .name = "ibm,remove-pe-dma-window",
        },
-       [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
-               .name = "ibm,reset-pe-dma-windows",
+       [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
+               /*
+                * Note: PAPR+ v2.13 7.3.31.4.1 spells this as
+                * "ibm,reset-pe-dma-windows" (plural), but RTAS
+                * implementations use the singular form in practice.
+                */
+               .name = "ibm,reset-pe-dma-window",
        },
        [RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
                .name = "ibm,scan-log-dump",
index 52427fc2a33fa4ad7032bcc6323bc6364918d98f..0b921704da45eb6b718cac8f031c5d0c45176746 100644 (file)
@@ -391,6 +391,24 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
 /* Dummy value used in computing PCR value below */
 #define PCR_ARCH_31    (PCR_ARCH_300 << 1)
 
+static inline unsigned long map_pcr_to_cap(unsigned long pcr)
+{
+       unsigned long cap = 0;
+
+       switch (pcr) {
+       case PCR_ARCH_300:
+               cap = H_GUEST_CAP_POWER9;
+               break;
+       case PCR_ARCH_31:
+               cap = H_GUEST_CAP_POWER10;
+               break;
+       default:
+               break;
+       }
+
+       return cap;
+}
+
 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 {
        unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
@@ -424,11 +442,9 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
                        break;
                case PVR_ARCH_300:
                        guest_pcr_bit = PCR_ARCH_300;
-                       cap = H_GUEST_CAP_POWER9;
                        break;
                case PVR_ARCH_31:
                        guest_pcr_bit = PCR_ARCH_31;
-                       cap = H_GUEST_CAP_POWER10;
                        break;
                default:
                        return -EINVAL;
@@ -440,6 +456,12 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
                return -EINVAL;
 
        if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+               /*
+                * 'arch_compat == 0' would mean the guest should default to
+                * L1's compatibility. In this case, the guest would pick
+                * host's PCR and evaluate the corresponding capabilities.
+                */
+               cap = map_pcr_to_cap(guest_pcr_bit);
                if (!(cap & nested_capabilities))
                        return -EINVAL;
        }
index 5378eb40b162f2690879f43fbaeb3f0b003536a7..8e6f5355f08b5d925c54606db4a70cbe24d74e61 100644 (file)
@@ -138,6 +138,7 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
        vector128 v;
        int rc, i;
        u16 iden;
+       u32 arch_compat = 0;
 
        vcpu = gsm->data;
 
@@ -347,8 +348,23 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
                        break;
                }
                case KVMPPC_GSID_LOGICAL_PVR:
-                       rc = kvmppc_gse_put_u32(gsb, iden,
-                                               vcpu->arch.vcore->arch_compat);
+                       /*
+                        * Though 'arch_compat == 0' would mean the default
+                        * compatibility, arch_compat, being a Guest Wide
+                        * Element, cannot be filled with a value of 0 in GSB
+                        * as this would result into a kernel trap.
+                        * Hence, when `arch_compat == 0`, arch_compat should
+                        * default to L1's PVR.
+                        */
+                       if (!vcpu->arch.vcore->arch_compat) {
+                               if (cpu_has_feature(CPU_FTR_ARCH_31))
+                                       arch_compat = PVR_ARCH_31;
+                               else if (cpu_has_feature(CPU_FTR_ARCH_300))
+                                       arch_compat = PVR_ARCH_300;
+                       } else {
+                               arch_compat = vcpu->arch.vcore->arch_compat;
+                       }
+                       rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
                        break;
                }
 
index 496e16c588aaa8edcd0294825862312471928506..e8c4129697b142ba48490481ee38793086e8425a 100644 (file)
@@ -574,29 +574,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
 
 struct iommu_table_ops iommu_table_lpar_multi_ops;
 
-/*
- * iommu_table_setparms_lpar
- *
- * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
- */
-static void iommu_table_setparms_lpar(struct pci_controller *phb,
-                                     struct device_node *dn,
-                                     struct iommu_table *tbl,
-                                     struct iommu_table_group *table_group,
-                                     const __be32 *dma_window)
-{
-       unsigned long offset, size, liobn;
-
-       of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
-
-       iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
-                                   &iommu_table_lpar_multi_ops);
-
-
-       table_group->tce32_start = offset;
-       table_group->tce32_size = size;
-}
-
 struct iommu_table_ops iommu_table_pseries_ops = {
        .set = tce_build_pSeries,
        .clear = tce_free_pSeries,
@@ -724,26 +701,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
  * dynamic 64bit DMA window, walking up the device tree.
  */
 static struct device_node *pci_dma_find(struct device_node *dn,
-                                       const __be32 **dma_window)
+                                       struct dynamic_dma_window_prop *prop)
 {
-       const __be32 *dw = NULL;
+       const __be32 *default_prop = NULL;
+       const __be32 *ddw_prop = NULL;
+       struct device_node *rdn = NULL;
+       bool default_win = false, ddw_win = false;
 
        for ( ; dn && PCI_DN(dn); dn = dn->parent) {
-               dw = of_get_property(dn, "ibm,dma-window", NULL);
-               if (dw) {
-                       if (dma_window)
-                               *dma_window = dw;
-                       return dn;
+               default_prop = of_get_property(dn, "ibm,dma-window", NULL);
+               if (default_prop) {
+                       rdn = dn;
+                       default_win = true;
+               }
+               ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+               if (ddw_prop) {
+                       rdn = dn;
+                       ddw_win = true;
+                       break;
+               }
+               ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
+               if (ddw_prop) {
+                       rdn = dn;
+                       ddw_win = true;
+                       break;
                }
-               dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
-               if (dw)
-                       return dn;
-               dw = of_get_property(dn, DMA64_PROPNAME, NULL);
-               if (dw)
-                       return dn;
+
+               /* At least found default window, which is the case for normal boot */
+               if (default_win)
+                       break;
        }
 
-       return NULL;
+       /* For PCI devices there will always be a DMA window, either on the device
+        * or parent bus
+        */
+       WARN_ON(!(default_win | ddw_win));
+
+       /* caller doesn't want to get DMA window property */
+       if (!prop)
+               return rdn;
+
+       /* parse DMA window property. During normal system boot, only default
+        * DMA window is passed in OF. But, for kdump, a dedicated adapter might
+        * have both default and DDW in FDT. In this scenario, DDW takes precedence
+        * over default window.
+        */
+       if (ddw_win) {
+               struct dynamic_dma_window_prop *p;
+
+               p = (struct dynamic_dma_window_prop *)ddw_prop;
+               prop->liobn = p->liobn;
+               prop->dma_base = p->dma_base;
+               prop->tce_shift = p->tce_shift;
+               prop->window_shift = p->window_shift;
+       } else if (default_win) {
+               unsigned long offset, size, liobn;
+
+               of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
+
+               prop->liobn = cpu_to_be32((u32)liobn);
+               prop->dma_base = cpu_to_be64(offset);
+               prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
+               prop->window_shift = cpu_to_be32(order_base_2(size));
+       }
+
+       return rdn;
 }
 
 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
@@ -751,17 +773,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
        struct iommu_table *tbl;
        struct device_node *dn, *pdn;
        struct pci_dn *ppci;
-       const __be32 *dma_window = NULL;
+       struct dynamic_dma_window_prop prop;
 
        dn = pci_bus_to_OF_node(bus);
 
        pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
                 dn);
 
-       pdn = pci_dma_find(dn, &dma_window);
+       pdn = pci_dma_find(dn, &prop);
 
-       if (dma_window == NULL)
-               pr_debug("  no ibm,dma-window property !\n");
+       /* In PPC architecture, there will always be DMA window on bus or one of the
+        * parent bus. During reboot, there will be ibm,dma-window property to
+        * define DMA window. For kdump, there will at least be default window or DDW
+        * or both.
+        */
 
        ppci = PCI_DN(pdn);
 
@@ -771,13 +796,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
        if (!ppci->table_group) {
                ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
                tbl = ppci->table_group->tables[0];
-               if (dma_window) {
-                       iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
-                                                 ppci->table_group, dma_window);
 
-                       if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
-                               panic("Failed to initialize iommu table");
-               }
+               iommu_table_setparms_common(tbl, ppci->phb->bus->number,
+                               be32_to_cpu(prop.liobn),
+                               be64_to_cpu(prop.dma_base),
+                               1ULL << be32_to_cpu(prop.window_shift),
+                               be32_to_cpu(prop.tce_shift), NULL,
+                               &iommu_table_lpar_multi_ops);
+
+               /* Only for normal boot with default window. Doesn't matter even
+                * if we set these with DDW which is 64bit during kdump, since
+                * these will not be used during kdump.
+                */
+               ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
+               ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
+
+               if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+                       panic("Failed to initialize iommu table");
+
                iommu_register_group(ppci->table_group,
                                pci_domain_nr(bus), 0);
                pr_debug("  created table: %p\n", ppci->table_group);
@@ -968,6 +1004,12 @@ static void find_existing_ddw_windows_named(const char *name)
                        continue;
                }
 
+               /* If at the time of system initialization, there are DDWs in OF,
+                * it means this is during kexec. DDW could be direct or dynamic.
+                * We will just mark DDWs as "dynamic" since this is kdump path,
+                * no need to worry about perforance. ddw_list_new_entry() will
+                * set window->direct = false.
+                */
                window = ddw_list_new_entry(pdn, dma64);
                if (!window) {
                        of_node_put(pdn);
@@ -1524,8 +1566,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 {
        struct device_node *pdn, *dn;
        struct iommu_table *tbl;
-       const __be32 *dma_window = NULL;
        struct pci_dn *pci;
+       struct dynamic_dma_window_prop prop;
 
        pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
 
@@ -1538,7 +1580,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
        dn = pci_device_to_OF_node(dev);
        pr_debug("  node is %pOF\n", dn);
 
-       pdn = pci_dma_find(dn, &dma_window);
+       pdn = pci_dma_find(dn, &prop);
        if (!pdn || !PCI_DN(pdn)) {
                printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
                       "no DMA window found for pci dev=%s dn=%pOF\n",
@@ -1551,8 +1593,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
        if (!pci->table_group) {
                pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
                tbl = pci->table_group->tables[0];
-               iommu_table_setparms_lpar(pci->phb, pdn, tbl,
-                               pci->table_group, dma_window);
+
+               iommu_table_setparms_common(tbl, pci->phb->bus->number,
+                               be32_to_cpu(prop.liobn),
+                               be64_to_cpu(prop.dma_base),
+                               1ULL << be32_to_cpu(prop.window_shift),
+                               be32_to_cpu(prop.tce_shift), NULL,
+                               &iommu_table_lpar_multi_ops);
+
+               /* Only for normal boot with default window. Doesn't matter even
+                * if we set these with DDW which is 64bit during kdump, since
+                * these will not be used during kdump.
+                */
+               pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
+               pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
 
                iommu_init_table(tbl, pci->phb->node, 0, 0);
                iommu_register_group(pci->table_group,
index 4ba8245681192120860ad1278a1b7ec7110a4bfc..4448386268d99155657fe6179ad8fd0132676f13 100644 (file)
@@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
 
        pseries_msi_allocate_domains(phb);
 
+       ppc_iommu_register_device(phb);
+
        /* Create EEH devices for the PHB */
        eeh_phb_pe_create(phb);
 
@@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
                }
        }
 
+       ppc_iommu_unregister_device(phb);
+
        pseries_msi_free_domains(phb);
 
        /* Keep a reference so phb isn't freed yet */
index bffbd869a0682842883591788da784648acf1626..e3142ce531a097b8cf0e39251ba88ae143d6594c 100644 (file)
@@ -315,7 +315,6 @@ config AS_HAS_OPTION_ARCH
        # https://reviews.llvm.org/D123515
        def_bool y
        depends on $(as-instr, .option arch$(comma) +m)
-       depends on !$(as-instr, .option arch$(comma) -i)
 
 source "arch/riscv/Kconfig.socs"
 source "arch/riscv/Kconfig.errata"
index 07387f9c135ca7e8ddf7d45de10ccdb933a2e4d4..72b87b08ab444ef1dc1ed200a6e8b3cbb9bfc73f 100644 (file)
                interrupt-parent = <&gpio>;
                interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
+               #interrupt-cells = <2>;
 
                onkey {
                        compatible = "dlg,da9063-onkey";
index c216aaecac53f2d7d1ec47b4f250ea5ae08e11cb..8bcf36d07f3f7c38a164a5864974bc60ad11e8b1 100644 (file)
                        thermal-sensors = <&sfctemp>;
 
                        trips {
-                               cpu_alert0 {
+                               cpu-alert0 {
                                        /* milliCelsius */
                                        temperature = <75000>;
                                        hysteresis = <2000>;
                                        type = "passive";
                                };
 
-                               cpu_crit {
+                               cpu-crit {
                                        /* milliCelsius */
                                        temperature = <90000>;
                                        hysteresis = <2000>;
                };
        };
 
-       osc_sys: osc_sys {
+       osc_sys: osc-sys {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                /* This value must be overridden by the board */
                clock-frequency = <0>;
        };
 
-       osc_aud: osc_aud {
+       osc_aud: osc-aud {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                /* This value must be overridden by the board */
                clock-frequency = <0>;
        };
 
-       gmac_rmii_ref: gmac_rmii_ref {
+       gmac_rmii_ref: gmac-rmii-ref {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                /* Should be overridden by the board when needed */
                clock-frequency = <0>;
        };
 
-       gmac_gr_mii_rxclk: gmac_gr_mii_rxclk {
+       gmac_gr_mii_rxclk: gmac-gr-mii-rxclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                /* Should be overridden by the board when needed */
index 45213cdf50dc75a9fa6610710a4d0cbe58b44c51..74ed3b9264d8f15ee10400b4bf5fcf855b7cecd0 100644 (file)
                        };
 
                        trips {
-                               cpu_alert0: cpu_alert0 {
+                               cpu_alert0: cpu-alert0 {
                                        /* milliCelsius */
                                        temperature = <85000>;
                                        hysteresis = <2000>;
                                        type = "passive";
                                };
 
-                               cpu_crit {
+                               cpu-crit {
                                        /* milliCelsius */
                                        temperature = <100000>;
                                        hysteresis = <2000>;
index 510014051f5dbb1aa61098e4974e7e7ac02145ee..2468c55933cd0d5d55d71d83a52226172bd5121c 100644 (file)
 # define CSR_STATUS    CSR_MSTATUS
 # define CSR_IE                CSR_MIE
 # define CSR_TVEC      CSR_MTVEC
+# define CSR_ENVCFG    CSR_MENVCFG
 # define CSR_SCRATCH   CSR_MSCRATCH
 # define CSR_EPC       CSR_MEPC
 # define CSR_CAUSE     CSR_MCAUSE
 # define CSR_STATUS    CSR_SSTATUS
 # define CSR_IE                CSR_SIE
 # define CSR_TVEC      CSR_STVEC
+# define CSR_ENVCFG    CSR_SENVCFG
 # define CSR_SCRATCH   CSR_SSCRATCH
 # define CSR_EPC       CSR_SEPC
 # define CSR_CAUSE     CSR_SCAUSE
index 3291721229523456247532009bc2ed2ddc444540..15055f9df4daa1e4250c8a37c64193bf5c943ee3 100644 (file)
 
 #define ARCH_SUPPORTS_FTRACE_OPS 1
 #ifndef __ASSEMBLY__
+
+extern void *return_address(unsigned int level);
+
+#define ftrace_return_address(n) return_address(n)
+
 void MCOUNT_NAME(void);
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
index 20f9c3ba2341412812ba003caf86f546c162bd34..22deb7a2a6ec4e4daba8322c7c6c28137b49f5f8 100644 (file)
@@ -11,8 +11,10 @@ static inline void arch_clear_hugepage_flags(struct page *page)
 }
 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
 
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 bool arch_hugetlb_migration_supported(struct hstate *h);
 #define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+#endif
 
 #ifdef CONFIG_RISCV_ISA_SVNAPOT
 #define __HAVE_ARCH_HUGE_PTE_CLEAR
index 5340f818746b71a805319eb6f941fa311c9b36a2..1f2d2599c655d20be6df7516382e20a7e3956301 100644 (file)
@@ -81,6 +81,8 @@
 #define RISCV_ISA_EXT_ZTSO             72
 #define RISCV_ISA_EXT_ZACAS            73
 
+#define RISCV_ISA_EXT_XLINUXENVCFG     127
+
 #define RISCV_ISA_EXT_MAX              128
 #define RISCV_ISA_EXT_INVALID          U32_MAX
 
index d169a4f41a2e728276a97898e1270c7b4763f9ed..c80bb9990d32ef706452d7d4fcc1c049cd7436d9 100644 (file)
@@ -95,7 +95,13 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
                __pud_free(mm, pud);
 }
 
-#define __pud_free_tlb(tlb, pud, addr)  pud_free((tlb)->mm, pud)
+#define __pud_free_tlb(tlb, pud, addr)                                 \
+do {                                                                   \
+       if (pgtable_l4_enabled) {                                       \
+               pagetable_pud_dtor(virt_to_ptdesc(pud));                \
+               tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud));     \
+       }                                                               \
+} while (0)
 
 #define p4d_alloc_one p4d_alloc_one
 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -124,7 +130,11 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
                __p4d_free(mm, p4d);
 }
 
-#define __p4d_free_tlb(tlb, p4d, addr)  p4d_free((tlb)->mm, p4d)
+#define __p4d_free_tlb(tlb, p4d, addr)                                 \
+do {                                                                   \
+       if (pgtable_l5_enabled)                                         \
+               tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(p4d));     \
+} while (0)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 static inline void sync_kernel_mappings(pgd_t *pgd)
@@ -149,7 +159,11 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 #ifndef __PAGETABLE_PMD_FOLDED
 
-#define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, addr)                         \
+do {                                                           \
+       pagetable_pmd_dtor(virt_to_ptdesc(pmd));                \
+       tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd));     \
+} while (0)
 
 #endif /* __PAGETABLE_PMD_FOLDED */
 
index b42017d76924f74386bc712719280af21781bb5d..b99bd66107a69038c835ead6b77725aaeaf882c3 100644 (file)
@@ -136,7 +136,7 @@ enum napot_cont_order {
  * 10010 - IO   Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
  */
 #define _PAGE_PMA_THEAD                ((1UL << 62) | (1UL << 61) | (1UL << 60))
-#define _PAGE_NOCACHE_THEAD    ((1UL < 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD    ((1UL << 61) | (1UL << 60))
 #define _PAGE_IO_THEAD         ((1UL << 63) | (1UL << 60))
 #define _PAGE_MTMASK_THEAD     (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
 
index 0c94260b5d0c126f6302f39a59507f19eed48dac..6066822e7396fa5078a546356a3a6f6605470712 100644 (file)
@@ -84,7 +84,7 @@
  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
  */
-#define vmemmap                ((struct page *)VMEMMAP_START)
+#define vmemmap                ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
 
 #define PCI_IO_SIZE      SZ_16M
 #define PCI_IO_END       VMEMMAP_START
@@ -439,6 +439,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
        return pte;
 }
 
+#define pte_leaf_size(pte)     (pte_napot(pte) ?                               \
+                                       napot_cont_size(napot_cont_order(pte)) :\
+                                       PAGE_SIZE)
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * See the comment in include/asm-generic/pgtable.h
index 02f87867389a9e660f91b64c7ca818a6b61637dc..491296a335d0ce6cd9c8f242646c3c60c762bc87 100644 (file)
@@ -14,6 +14,7 @@ struct suspend_context {
        struct pt_regs regs;
        /* Saved and restored by high-level functions */
        unsigned long scratch;
+       unsigned long envcfg;
        unsigned long tvec;
        unsigned long ie;
 #ifdef CONFIG_MMU
index 924d01b56c9a1eb1eacd53a923fc55591cda654f..51f6dfe19745aa486bd73d7de472faa538cf0486 100644 (file)
@@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
        return true;
 }
 
-#ifdef CONFIG_RISCV_ISA_SVNAPOT
-#include <linux/pgtable.h>
+#endif
 
-#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
-static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
-                                                        u64 pfn, unsigned int max_page_shift)
-{
-       unsigned long map_size = PAGE_SIZE;
-       unsigned long size, order;
-
-       if (!has_svnapot())
-               return map_size;
-
-       for_each_napot_order_rev(order) {
-               if (napot_cont_shift(order) > max_page_shift)
-                       continue;
-
-               size = napot_cont_size(order);
-               if (end - addr < size)
-                       continue;
-
-               if (!IS_ALIGNED(addr, size))
-                       continue;
-
-               if (!IS_ALIGNED(PFN_PHYS(pfn), size))
-                       continue;
-
-               map_size = size;
-               break;
-       }
-
-       return map_size;
-}
-
-#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
-static inline int arch_vmap_pte_supported_shift(unsigned long size)
-{
-       int shift = PAGE_SHIFT;
-       unsigned long order;
-
-       if (!has_svnapot())
-               return shift;
-
-       WARN_ON_ONCE(size >= PMD_SIZE);
-
-       for_each_napot_order_rev(order) {
-               if (napot_cont_size(order) > size)
-                       continue;
-
-               if (!IS_ALIGNED(size, napot_cont_size(order)))
-                       continue;
-
-               shift = napot_cont_shift(order);
-               break;
-       }
-
-       return shift;
-}
-
-#endif /* CONFIG_RISCV_ISA_SVNAPOT */
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 #endif /* _ASM_RISCV_VMALLOC_H */
index f71910718053d841a361fd97e7d62da4f86bebcf..604d6bf7e47672e9b01902f6fa497aeb4e102ee5 100644 (file)
@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_patch.o  = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_sbi.o    = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
 endif
 CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
 CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
@@ -46,6 +47,7 @@ obj-y += irq.o
 obj-y  += process.o
 obj-y  += ptrace.o
 obj-y  += reset.o
+obj-y  += return_address.o
 obj-y  += setup.o
 obj-y  += signal.o
 obj-y  += syscall_table.o
index 89920f84d0a34385471e9afbf9c26d287cbbd838..79a5a35fab964d3b54db97b5504f45f68dface11 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/hwprobe.h>
 #include <asm/patch.h>
 #include <asm/processor.h>
+#include <asm/sbi.h>
 #include <asm/vector.h>
 
 #include "copy-unaligned.h"
@@ -201,6 +202,16 @@ static const unsigned int riscv_zvbb_exts[] = {
        RISCV_ISA_EXT_ZVKB
 };
 
+/*
+ * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V
+ * privileged ISA, the existence of the CSRs is implied by any extension which
+ * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the
+ * existence of the CSR, and treat it as a subset of those other extensions.
+ */
+static const unsigned int riscv_xlinuxenvcfg_exts[] = {
+       RISCV_ISA_EXT_XLINUXENVCFG
+};
+
 /*
  * The canonical order of ISA extension names in the ISA string is defined in
  * chapter 27 of the unprivileged specification.
@@ -250,8 +261,8 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
        __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
        __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
        __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
-       __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
-       __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
+       __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts),
+       __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts),
        __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
        __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
        __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
@@ -538,6 +549,20 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
                        set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
                }
 
+               /*
+                * "V" in ISA strings is ambiguous in practice: it should mean
+                * just the standard V-1.0 but vendors aren't well behaved.
+                * Many vendors with T-Head CPU cores which implement the 0.7.1
+                * version of the vector specification put "v" into their DTs.
+                * CPU cores with the ratified spec will contain non-zero
+                * marchid.
+                */
+               if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
+                   riscv_cached_marchid(cpu) == 0x0) {
+                       this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
+                       clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
+               }
+
                /*
                 * All "okay" hart should have same isa. Set HWCAP based on
                 * common capabilities of every "okay" hart, in case they don't
@@ -950,7 +975,7 @@ arch_initcall(check_unaligned_access_all_cpus);
 void riscv_user_isa_enable(void)
 {
        if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
-               csr_set(CSR_SENVCFG, ENVCFG_CBZE);
+               csr_set(CSR_ENVCFG, ENVCFG_CBZE);
 }
 
 #ifdef CONFIG_RISCV_ALTERNATIVE
diff --git a/arch/riscv/kernel/return_address.c b/arch/riscv/kernel/return_address.c
new file mode 100644 (file)
index 0000000..c8115ec
--- /dev/null
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This code come from arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2023 SiFive.
+ */
+
+#include <linux/export.h>
+#include <linux/kprobes.h>
+#include <linux/stacktrace.h>
+
+struct return_address_data {
+       unsigned int level;
+       void *addr;
+};
+
+static bool save_return_addr(void *d, unsigned long pc)
+{
+       struct return_address_data *data = d;
+
+       if (!data->level) {
+               data->addr = (void *)pc;
+               return false;
+       }
+
+       --data->level;
+
+       return true;
+}
+NOKPROBE_SYMBOL(save_return_addr);
+
+noinline void *return_address(unsigned int level)
+{
+       struct return_address_data data;
+
+       data.level = level + 3;
+       data.addr = NULL;
+
+       arch_stack_walk(save_return_addr, &data, current, NULL);
+
+       if (!data.level)
+               return data.addr;
+       else
+               return NULL;
+
+}
+EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
index 239509367e4233336806c19da964a06537d5a9b5..299795341e8a2207dc922373511e31118bbd0f8b 100644 (file)
@@ -15,6 +15,8 @@
 void suspend_save_csrs(struct suspend_context *context)
 {
        context->scratch = csr_read(CSR_SCRATCH);
+       if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
+               context->envcfg = csr_read(CSR_ENVCFG);
        context->tvec = csr_read(CSR_TVEC);
        context->ie = csr_read(CSR_IE);
 
@@ -36,6 +38,8 @@ void suspend_save_csrs(struct suspend_context *context)
 void suspend_restore_csrs(struct suspend_context *context)
 {
        csr_write(CSR_SCRATCH, context->scratch);
+       if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
+               csr_write(CSR_ENVCFG, context->envcfg);
        csr_write(CSR_TVEC, context->tvec);
        csr_write(CSR_IE, context->ie);
 
index 29c7606414d276d1c3639e2a80e10037ea899cfc..5ef2a6891158a6d59de8f36b4f4d98cf3ad6eb2a 100644 (file)
@@ -426,10 +426,12 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
        return __hugetlb_valid_size(size);
 }
 
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 bool arch_hugetlb_migration_supported(struct hstate *h)
 {
        return __hugetlb_valid_size(huge_page_size(h));
 }
+#endif
 
 #ifdef CONFIG_CONTIG_ALLOC
 static __init int gigantic_pages_init(void)
diff --git a/arch/s390/configs/compat.config b/arch/s390/configs/compat.config
new file mode 100644 (file)
index 0000000..6fd0514
--- /dev/null
@@ -0,0 +1,3 @@
+# Help: Enable compat support
+CONFIG_COMPAT=y
+CONFIG_COMPAT_32BIT_TIME=y
index cae2dd34fbb49d16ee020e72fb669010dca832f8..c924be0d7ed873b2ab9b82a7ab789598f497b016 100644 (file)
@@ -118,7 +118,6 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
-CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -374,6 +373,7 @@ CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
 CONFIG_GACT_PROB=y
 CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
 CONFIG_NET_ACT_NAT=m
 CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
@@ -436,9 +436,6 @@ CONFIG_SCSI_DH_ALUA=m
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 # CONFIG_MD_BITMAP_FILE is not set
-CONFIG_MD_LINEAR=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
 CONFIG_BLK_DEV_DM=y
@@ -637,7 +634,6 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
-CONFIG_NETFS_SUPPORT=m
 CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
@@ -709,7 +705,6 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
 CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
-CONFIG_INIT_STACK_NONE=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -739,7 +734,6 @@ CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_HCTR2=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_LRW=m
index 42b988873e5443df15b054d78610697fdf769293..c8f0c9fe40d708e9b082df3ac0fd5fb901883584 100644 (file)
@@ -109,7 +109,6 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
-CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -364,6 +363,7 @@ CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
 CONFIG_GACT_PROB=y
 CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
 CONFIG_NET_ACT_NAT=m
 CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
@@ -426,9 +426,6 @@ CONFIG_SCSI_DH_ALUA=m
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 # CONFIG_MD_BITMAP_FILE is not set
-CONFIG_MD_LINEAR=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
 CONFIG_BLK_DEV_DM=y
@@ -622,7 +619,6 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
-CONFIG_NETFS_SUPPORT=m
 CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
@@ -693,7 +689,6 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
 CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
-CONFIG_INIT_STACK_NONE=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_USER=m
@@ -724,11 +719,9 @@ CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_HCTR2=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_OFB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
index 30d2a16876650e9c3ea32997f771131e6372e2fc..c51f3ec4eb28ab189b7d27d12ca28b98261178e2 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
 CONFIG_MARCH_Z13=y
 CONFIG_NR_CPUS=2
@@ -64,7 +65,6 @@ CONFIG_ZFCP=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
-CONFIG_INIT_STACK_NONE=y
 # CONFIG_ZLIB_DFLTCC is not set
 CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
index 676ac74026a82b578f857e2426a501abdec014c7..52a44e353796c001a31e9a8242f39982203fb8be 100644 (file)
@@ -252,7 +252,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 /* combine single writes by using store-block insn */
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
 {
-       zpci_memcpy_toio(to, from, count);
+       zpci_memcpy_toio(to, from, count * 8);
 }
 
 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
index 5f60359361312e4159b45d769c1afe81533ace1b..2a03daa68f2857df85df97b5b632d6154e76496f 100644 (file)
@@ -60,7 +60,7 @@ libs-y                 += arch/sparc/prom/
 libs-y                 += arch/sparc/lib/
 
 drivers-$(CONFIG_PM) += arch/sparc/power/
-drivers-$(CONFIG_FB) += arch/sparc/video/
+drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
 
 boot := arch/sparc/boot
 
index 6baddbd58e4db3fa82c9ba76fd5e0d571a7c4f48..d4d83f1702c61f09e3dceac24c494ecd1632f3e5 100644 (file)
@@ -1,3 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-$(CONFIG_FB) += fbdev.o
+obj-$(CONFIG_FB_CORE) += fbdev.o
index 8c8d38f0cb1df0ee959e09c9f912ec1ab2afce40..0033790499245e3df5f10496986badbe0150aac2 100644 (file)
@@ -6,6 +6,9 @@
 #include <linux/export.h>
 #include <linux/linkage.h>
 #include <asm/msr-index.h>
+#include <asm/unwind_hints.h>
+#include <asm/segment.h>
+#include <asm/cache.h>
 
 .pushsection .noinstr.text, "ax"
 
@@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
 EXPORT_SYMBOL_GPL(entry_ibpb);
 
 .popsection
+
+/*
+ * Define the VERW operand that is disguised as entry code so that
+ * it can be referenced with KPTI enabled. This ensure VERW can be
+ * used late in exit-to-user path after page tables are switched.
+ */
+.pushsection .entry.text, "ax"
+
+.align L1_CACHE_BYTES, 0xcc
+SYM_CODE_START_NOALIGN(mds_verw_sel)
+       UNWIND_HINT_UNDEFINED
+       ANNOTATE_NOENDBR
+       .word __KERNEL_DS
+.align L1_CACHE_BYTES, 0xcc
+SYM_CODE_END(mds_verw_sel);
+/* For KVM */
+EXPORT_SYMBOL_GPL(mds_verw_sel);
+
+.popsection
+
index c73047bf9f4bff9c4631c0eab383cedceda41918..fba427646805d55221664538be2285c3ae188ca1 100644 (file)
@@ -885,6 +885,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
        BUG_IF_WRONG_CR3 no_user_check=1
        popfl
        popl    %eax
+       CLEAR_CPU_BUFFERS
 
        /*
         * Return back to the vDSO, which will pop ecx and edx.
@@ -954,6 +955,7 @@ restore_all_switch_stack:
 
        /* Restore user state */
        RESTORE_REGS pop=4                      # skip orig_eax/error_code
+       CLEAR_CPU_BUFFERS
 .Lirq_return:
        /*
         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -1146,6 +1148,7 @@ SYM_CODE_START(asm_exc_nmi)
 
        /* Not on SYSENTER stack. */
        call    exc_nmi
+       CLEAR_CPU_BUFFERS
        jmp     .Lnmi_return
 
 .Lnmi_from_sysenter_stack:
index c40f89ab1b4c70a18b632a50c1e659e3fd83cfa9..9bb4859776291593249b9998416505aeec505011 100644 (file)
@@ -161,6 +161,7 @@ syscall_return_via_sysret:
 SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
        swapgs
+       CLEAR_CPU_BUFFERS
        sysretq
 SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
@@ -573,6 +574,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
 
 .Lswapgs_and_iret:
        swapgs
+       CLEAR_CPU_BUFFERS
        /* Assert that the IRET frame indicates user mode. */
        testb   $3, 8(%rsp)
        jnz     .Lnative_iret
@@ -723,6 +725,8 @@ native_irq_return_ldt:
         */
        popq    %rax                            /* Restore user RAX */
 
+       CLEAR_CPU_BUFFERS
+
        /*
         * RSP now points to an ordinary IRET frame, except that the page
         * is read-only and RSP[31:16] are preloaded with the userspace
@@ -1449,6 +1453,12 @@ nmi_restore:
        std
        movq    $0, 5*8(%rsp)           /* clear "NMI executing" */
 
+       /*
+        * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
+        * NMI in kernel after user state is restored. For an unprivileged user
+        * these conditions are hard to meet.
+        */
+
        /*
         * iretq reads the "iret" frame and exits the NMI stack in a
         * single instruction.  We are returning to kernel mode, so this
@@ -1466,6 +1476,7 @@ SYM_CODE_START(entry_SYSCALL32_ignore)
        UNWIND_HINT_END_OF_STACK
        ENDBR
        mov     $-ENOSYS, %eax
+       CLEAR_CPU_BUFFERS
        sysretl
 SYM_CODE_END(entry_SYSCALL32_ignore)
 
index de94e2e84ecca927d9aa0e1ab99466466c163d44..eabf48c4d4b4c30367792f5d9a0b158a9ecf8a04 100644 (file)
@@ -270,6 +270,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
        xorl    %r9d, %r9d
        xorl    %r10d, %r10d
        swapgs
+       CLEAR_CPU_BUFFERS
        sysretl
 SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
index 96e6c51515f50467efbf7cb77082c6b9d18cb8f6..cf1b78cb2d0431ae7095d1c8769c1e140c4357a7 100644 (file)
 extern struct boot_params boot_params;
 static struct real_mode_header hv_vtl_real_mode_header;
 
+static bool __init hv_vtl_msi_ext_dest_id(void)
+{
+       return true;
+}
+
 void __init hv_vtl_init_platform(void)
 {
        pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
@@ -38,6 +43,8 @@ void __init hv_vtl_init_platform(void)
        x86_platform.legacy.warm_reset = 0;
        x86_platform.legacy.reserve_bios_regions = 0;
        x86_platform.legacy.devices.pnpbios = 0;
+
+       x86_init.hyper.msi_ext_dest_id = hv_vtl_msi_ext_dest_id;
 }
 
 static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
index 7dcbf153ad7257c3fda712df91b3195efe522ab2..768d73de0d098afc5d7a9b6ea1e7c2747aab4feb 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/io.h>
 #include <asm/coco.h>
 #include <asm/mem_encrypt.h>
+#include <asm/set_memory.h>
 #include <asm/mshyperv.h>
 #include <asm/hypervisor.h>
 #include <asm/mtrr.h>
@@ -502,6 +503,31 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
                return -EFAULT;
 }
 
+/*
+ * When transitioning memory between encrypted and decrypted, the caller
+ * of set_memory_encrypted() or set_memory_decrypted() is responsible for
+ * ensuring that the memory isn't in use and isn't referenced while the
+ * transition is in progress.  The transition has multiple steps, and the
+ * memory is in an inconsistent state until all steps are complete. A
+ * reference while the state is inconsistent could result in an exception
+ * that can't be cleanly fixed up.
+ *
+ * But the Linux kernel load_unaligned_zeropad() mechanism could cause a
+ * stray reference that can't be prevented by the caller, so Linux has
+ * specific code to handle this case. But when the #VC and #VE exceptions
+ * routed to a paravisor, the specific code doesn't work. To avoid this
+ * problem, mark the pages as "not present" while the transition is in
+ * progress. If load_unaligned_zeropad() causes a stray reference, a normal
+ * page fault is generated instead of #VC or #VE, and the page-fault-based
+ * handlers for load_unaligned_zeropad() resolve the reference.  When the
+ * transition is complete, hv_vtom_set_host_visibility() marks the pages
+ * as "present" again.
+ */
+static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
+{
+       return !set_memory_np(kbuffer, pagecount);
+}
+
 /*
  * hv_vtom_set_host_visibility - Set specified memory visible to host.
  *
@@ -515,16 +541,28 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
        enum hv_mem_host_visibility visibility = enc ?
                        VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
        u64 *pfn_array;
+       phys_addr_t paddr;
+       void *vaddr;
        int ret = 0;
        bool result = true;
        int i, pfn;
 
        pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-       if (!pfn_array)
-               return false;
+       if (!pfn_array) {
+               result = false;
+               goto err_set_memory_p;
+       }
 
        for (i = 0, pfn = 0; i < pagecount; i++) {
-               pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
+               /*
+                * Use slow_virt_to_phys() because the PRESENT bit has been
+                * temporarily cleared in the PTEs.  slow_virt_to_phys() works
+                * without the PRESENT bit while virt_to_hvpfn() or similar
+                * does not.
+                */
+               vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
+               paddr = slow_virt_to_phys(vaddr);
+               pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
                pfn++;
 
                if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
@@ -538,14 +576,30 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
                }
        }
 
- err_free_pfn_array:
+err_free_pfn_array:
        kfree(pfn_array);
+
+err_set_memory_p:
+       /*
+        * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present()
+        * did. Do this even if there is an error earlier in this function in
+        * order to avoid leaving the memory range in a "broken" state. Setting
+        * the PRESENT bits shouldn't fail, but return an error if it does.
+        */
+       if (set_memory_p(kbuffer, pagecount))
+               result = false;
+
        return result;
 }
 
 static bool hv_vtom_tlb_flush_required(bool private)
 {
-       return true;
+       /*
+        * Since hv_vtom_clear_present() marks the PTEs as "not present"
+        * and flushes the TLB, they can't be in the TLB. That makes the
+        * flush controlled by this function redundant, so return "false".
+        */
+       return false;
 }
 
 static bool hv_vtom_cache_flush_required(void)
@@ -608,6 +662,7 @@ void __init hv_vtom_init(void)
        x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
        x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
        x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
+       x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
        x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
 
        /* Set WB as the default cache mode. */
index fdf723b6f6d0ce9f6742ef3c67adce3c8d57c002..2b62cdd8dd1227f2425e698525b97639a4124f75 100644 (file)
@@ -95,7 +95,7 @@
 #define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
 #define X86_FEATURE_AMD_LBR_V2         ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC          ( 3*32+18) "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_CLEAR_CPU_BUF      ( 3*32+18) /* "" Clear CPU buffers using VERW */
 #define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
 #define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_ALWAYS             ( 3*32+21) /* "" Always-present feature */
index ce8f50192ae3e46da87fe3a24fc736b3b2fc3b21..7e523bb3d2d31a9a8ab9d32ca65a41b5b765c4c4 100644 (file)
@@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
 
 static __always_inline void arch_exit_to_user_mode(void)
 {
-       mds_user_clear_cpu_buffers();
        amd_clear_divider();
 }
 #define arch_exit_to_user_mode arch_exit_to_user_mode
index 262e65539f83c86d140552305c8a9d330b313c20..2aa52cab1e463af6f4105e2f887acf185dec9f31 100644 (file)
 #endif
 .endm
 
+/*
+ * Macro to execute VERW instruction that mitigate transient data sampling
+ * attacks such as MDS. On affected systems a microcode update overloaded VERW
+ * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+ *
+ * Note: Only the memory operand variant of VERW clears the CPU buffers.
+ */
+.macro CLEAR_CPU_BUFFERS
+       ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
+.endm
+
 #else /* __ASSEMBLY__ */
 
 #define ANNOTATE_RETPOLINE_SAFE                                        \
@@ -529,13 +540,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 
-DECLARE_STATIC_KEY_FALSE(mds_user_clear);
 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
 
 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
 
+extern u16 mds_verw_sel;
+
 #include <asm/segment.h>
 
 /**
@@ -561,17 +573,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
        asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
 }
 
-/**
- * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
- *
- * Clear CPU buffers if the corresponding static key is enabled
- */
-static __always_inline void mds_user_clear_cpu_buffers(void)
-{
-       if (static_branch_likely(&mds_user_clear))
-               mds_clear_cpu_buffers();
-}
-
 /**
  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
  *
index a5e89641bd2dac7e9fa5e1ab548369836640908a..9aee31862b4a8b8cbf2242db991a5cbeb3d41e21 100644 (file)
@@ -47,6 +47,7 @@ int set_memory_uc(unsigned long addr, int numpages);
 int set_memory_wc(unsigned long addr, int numpages);
 int set_memory_wb(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
+int set_memory_p(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
 int set_memory_encrypted(unsigned long addr, int numpages);
 int set_memory_decrypted(unsigned long addr, int numpages);
index ab60a71a8dcb98e62bccf3c045066df8f42f30f4..472f0263dbc6129c30636f149b94d5828ac300c6 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/seqlock.h>
 #include <uapi/asm/vsyscall.h>
+#include <asm/page_types.h>
 
 #ifdef CONFIG_X86_VSYSCALL_EMULATION
 extern void map_vsyscall(void);
@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
 }
 #endif
 
+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+       return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
 #endif /* _ASM_X86_VSYSCALL_H */
index bb0ab8466b919809a861d7a2f979e132ad863289..48d049cd74e7123a178564ba6fc8ef1dc0212e2d 100644 (file)
@@ -111,9 +111,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
 /* Control unconditional IBPB in switch_mm() */
 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 
-/* Control MDS CPU buffer clear before returning to user space */
-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
-EXPORT_SYMBOL_GPL(mds_user_clear);
 /* Control MDS CPU buffer clear before idling (halt, mwait) */
 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
 EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -252,7 +249,7 @@ static void __init mds_select_mitigation(void)
                if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
                        mds_mitigation = MDS_MITIGATION_VMWERV;
 
-               static_branch_enable(&mds_user_clear);
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
 
                if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
                    (mds_nosmt || cpu_mitigations_auto_nosmt()))
@@ -356,7 +353,7 @@ static void __init taa_select_mitigation(void)
         * For guests that can't determine whether the correct microcode is
         * present on host, enable the mitigation for UCODE_NEEDED as well.
         */
-       static_branch_enable(&mds_user_clear);
+       setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
 
        if (taa_nosmt || cpu_mitigations_auto_nosmt())
                cpu_smt_disable(false);
@@ -424,7 +421,7 @@ static void __init mmio_select_mitigation(void)
         */
        if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
                                              boot_cpu_has(X86_FEATURE_RTM)))
-               static_branch_enable(&mds_user_clear);
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
        else
                static_branch_enable(&mmio_stale_data_clear);
 
@@ -484,12 +481,12 @@ static void __init md_clear_update_mitigation(void)
        if (cpu_mitigations_off())
                return;
 
-       if (!static_key_enabled(&mds_user_clear))
+       if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
                goto out;
 
        /*
-        * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
-        * mitigation, if necessary.
+        * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
+        * Stale Data mitigation, if necessary.
         */
        if (mds_mitigation == MDS_MITIGATION_OFF &&
            boot_cpu_has_bug(X86_BUG_MDS)) {
index 0b97bcde70c6102a4b82b561c3256ec53b614770..fbc4e60d027cbff23b91e0d8cf2720cabb64803c 100644 (file)
@@ -1589,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                get_cpu_vendor(c);
                get_cpu_cap(c);
                setup_force_cpu_cap(X86_FEATURE_CPUID);
+               get_cpu_address_sizes(c);
                cpu_parse_early_param();
 
                if (this_cpu->c_early_init)
@@ -1601,10 +1602,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                        this_cpu->c_bsp_init(c);
        } else {
                setup_clear_cpu_cap(X86_FEATURE_CPUID);
+               get_cpu_address_sizes(c);
        }
 
-       get_cpu_address_sizes(c);
-
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
        cpu_set_bug_bits(c);
index a927a8fc962448035f041c8b17f45ffb6bb9e079..40dec9b56f87db8348c1a242330f243c22c5199d 100644 (file)
@@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
        return false;
 }
 
+#define MSR_IA32_TME_ACTIVATE          0x982
+
+/* Helpers to access TME_ACTIVATE MSR */
+#define TME_ACTIVATE_LOCKED(x)         (x & 0x1)
+#define TME_ACTIVATE_ENABLED(x)                (x & 0x2)
+
+#define TME_ACTIVATE_POLICY(x)         ((x >> 4) & 0xf)        /* Bits 7:4 */
+#define TME_ACTIVATE_POLICY_AES_XTS_128        0
+
+#define TME_ACTIVATE_KEYID_BITS(x)     ((x >> 32) & 0xf)       /* Bits 35:32 */
+
+#define TME_ACTIVATE_CRYPTO_ALGS(x)    ((x >> 48) & 0xffff)    /* Bits 63:48 */
+#define TME_ACTIVATE_CRYPTO_AES_XTS_128        1
+
+/* Values for mktme_status (SW only construct) */
+#define MKTME_ENABLED                  0
+#define MKTME_DISABLED                 1
+#define MKTME_UNINITIALIZED            2
+static int mktme_status = MKTME_UNINITIALIZED;
+
+static void detect_tme_early(struct cpuinfo_x86 *c)
+{
+       u64 tme_activate, tme_policy, tme_crypto_algs;
+       int keyid_bits = 0, nr_keyids = 0;
+       static u64 tme_activate_cpu0 = 0;
+
+       rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+
+       if (mktme_status != MKTME_UNINITIALIZED) {
+               if (tme_activate != tme_activate_cpu0) {
+                       /* Broken BIOS? */
+                       pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+                       pr_err_once("x86/tme: MKTME is not usable\n");
+                       mktme_status = MKTME_DISABLED;
+
+                       /* Proceed. We may need to exclude bits from x86_phys_bits. */
+               }
+       } else {
+               tme_activate_cpu0 = tme_activate;
+       }
+
+       if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+               pr_info_once("x86/tme: not enabled by BIOS\n");
+               mktme_status = MKTME_DISABLED;
+               return;
+       }
+
+       if (mktme_status != MKTME_UNINITIALIZED)
+               goto detect_keyid_bits;
+
+       pr_info("x86/tme: enabled by BIOS\n");
+
+       tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+       if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+               pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+
+       tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+       if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+               pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+                               tme_crypto_algs);
+               mktme_status = MKTME_DISABLED;
+       }
+detect_keyid_bits:
+       keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+       nr_keyids = (1UL << keyid_bits) - 1;
+       if (nr_keyids) {
+               pr_info_once("x86/mktme: enabled by BIOS\n");
+               pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+       } else {
+               pr_info_once("x86/mktme: disabled by BIOS\n");
+       }
+
+       if (mktme_status == MKTME_UNINITIALIZED) {
+               /* MKTME is usable */
+               mktme_status = MKTME_ENABLED;
+       }
+
+       /*
+        * KeyID bits effectively lower the number of physical address
+        * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
+        */
+       c->x86_phys_bits -= keyid_bits;
+}
+
 static void early_init_intel(struct cpuinfo_x86 *c)
 {
        u64 misc_enable;
@@ -322,6 +406,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         */
        if (detect_extended_topology_early(c) < 0)
                detect_ht_early(c);
+
+       /*
+        * Adjust the number of physical bits early because it affects the
+        * valid bits of the MTRR mask registers.
+        */
+       if (cpu_has(c, X86_FEATURE_TME))
+               detect_tme_early(c);
 }
 
 static void bsp_init_intel(struct cpuinfo_x86 *c)
@@ -482,90 +573,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
 #endif
 }
 
-#define MSR_IA32_TME_ACTIVATE          0x982
-
-/* Helpers to access TME_ACTIVATE MSR */
-#define TME_ACTIVATE_LOCKED(x)         (x & 0x1)
-#define TME_ACTIVATE_ENABLED(x)                (x & 0x2)
-
-#define TME_ACTIVATE_POLICY(x)         ((x >> 4) & 0xf)        /* Bits 7:4 */
-#define TME_ACTIVATE_POLICY_AES_XTS_128        0
-
-#define TME_ACTIVATE_KEYID_BITS(x)     ((x >> 32) & 0xf)       /* Bits 35:32 */
-
-#define TME_ACTIVATE_CRYPTO_ALGS(x)    ((x >> 48) & 0xffff)    /* Bits 63:48 */
-#define TME_ACTIVATE_CRYPTO_AES_XTS_128        1
-
-/* Values for mktme_status (SW only construct) */
-#define MKTME_ENABLED                  0
-#define MKTME_DISABLED                 1
-#define MKTME_UNINITIALIZED            2
-static int mktme_status = MKTME_UNINITIALIZED;
-
-static void detect_tme(struct cpuinfo_x86 *c)
-{
-       u64 tme_activate, tme_policy, tme_crypto_algs;
-       int keyid_bits = 0, nr_keyids = 0;
-       static u64 tme_activate_cpu0 = 0;
-
-       rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
-
-       if (mktme_status != MKTME_UNINITIALIZED) {
-               if (tme_activate != tme_activate_cpu0) {
-                       /* Broken BIOS? */
-                       pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
-                       pr_err_once("x86/tme: MKTME is not usable\n");
-                       mktme_status = MKTME_DISABLED;
-
-                       /* Proceed. We may need to exclude bits from x86_phys_bits. */
-               }
-       } else {
-               tme_activate_cpu0 = tme_activate;
-       }
-
-       if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
-               pr_info_once("x86/tme: not enabled by BIOS\n");
-               mktme_status = MKTME_DISABLED;
-               return;
-       }
-
-       if (mktme_status != MKTME_UNINITIALIZED)
-               goto detect_keyid_bits;
-
-       pr_info("x86/tme: enabled by BIOS\n");
-
-       tme_policy = TME_ACTIVATE_POLICY(tme_activate);
-       if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
-               pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
-
-       tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
-       if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
-               pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
-                               tme_crypto_algs);
-               mktme_status = MKTME_DISABLED;
-       }
-detect_keyid_bits:
-       keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
-       nr_keyids = (1UL << keyid_bits) - 1;
-       if (nr_keyids) {
-               pr_info_once("x86/mktme: enabled by BIOS\n");
-               pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
-       } else {
-               pr_info_once("x86/mktme: disabled by BIOS\n");
-       }
-
-       if (mktme_status == MKTME_UNINITIALIZED) {
-               /* MKTME is usable */
-               mktme_status = MKTME_ENABLED;
-       }
-
-       /*
-        * KeyID bits effectively lower the number of physical address
-        * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
-        */
-       c->x86_phys_bits -= keyid_bits;
-}
-
 static void init_cpuid_fault(struct cpuinfo_x86 *c)
 {
        u64 msr;
@@ -702,9 +709,6 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        init_ia32_feat_ctl(c);
 
-       if (cpu_has(c, X86_FEATURE_TME))
-               detect_tme(c);
-
        init_intel_misc_features(c);
 
        split_lock_init();
index fb8cf953380dab44a5426f78733a25452ade3b87..b66f540de054a72403dbe3b4a837d6b1e280610d 100644 (file)
@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
                e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
 
                /*
-                * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
-                * to be reserved.
+                * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
+                * kexec and do not need to be reserved.
                 */
-               if (data->type != SETUP_EFI && data->type != SETUP_IMA)
+               if (data->type != SETUP_EFI &&
+                   data->type != SETUP_IMA &&
+                   data->type != SETUP_RNG_SEED)
                        e820__range_update_kexec(pa_data,
                                                 sizeof(*data) + data->len,
                                                 E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
index 17e955ab69feda933cca3708822f6f9f598e31bf..3082cf24b69e34a3a0ca09a50a72ee1aaec8ebc8 100644 (file)
@@ -563,9 +563,6 @@ nmi_restart:
        }
        if (this_cpu_dec_return(nmi_state))
                goto nmi_restart;
-
-       if (user_mode(regs))
-               mds_user_clear_cpu_buffers();
 }
 
 #if IS_ENABLED(CONFIG_KVM_INTEL)
index 87e3da7b0439790dac6b35aa4f95e8e7573284d7..65ed14b6540bbebfb91e1d20d0c7627277da3f26 100644 (file)
@@ -80,9 +80,10 @@ config KVM_SW_PROTECTED_VM
        depends on KVM && X86_64
        select KVM_GENERIC_PRIVATE_MEM
        help
-         Enable support for KVM software-protected VMs.  Currently "protected"
-         means the VM can be backed with memory provided by
-         KVM_CREATE_GUEST_MEMFD.
+         Enable support for KVM software-protected VMs.  Currently, software-
+         protected VMs are purely a development and testing vehicle for
+         KVM_CREATE_GUEST_MEMFD.  Attempting to run a "real" VM workload as a
+         software-protected VM will fail miserably.
 
          If unsure, say "N".
 
index 2d6cdeab1f8a3e78306148d44a4665a1d51d8b1e..0544700ca50b8458ad97020bde53ec24432a21c2 100644 (file)
@@ -4405,6 +4405,31 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
        fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        smp_rmb();
 
+       /*
+        * Check for a relevant mmu_notifier invalidation event before getting
+        * the pfn from the primary MMU, and before acquiring mmu_lock.
+        *
+        * For mmu_lock, if there is an in-progress invalidation and the kernel
+        * allows preemption, the invalidation task may drop mmu_lock and yield
+        * in response to mmu_lock being contended, which is *very* counter-
+        * productive as this vCPU can't actually make forward progress until
+        * the invalidation completes.
+        *
+        * Retrying now can also avoid unnessary lock contention in the primary
+        * MMU, as the primary MMU doesn't necessarily hold a single lock for
+        * the duration of the invalidation, i.e. faulting in a conflicting pfn
+        * can cause the invalidation to take longer by holding locks that are
+        * needed to complete the invalidation.
+        *
+        * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
+        * will never yield mmu_lock in response to contention, as this vCPU is
+        * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
+        * to detect retry guarantees the worst case latency for the vCPU.
+        */
+       if (fault->slot &&
+           mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
+               return RET_PF_RETRY;
+
        ret = __kvm_faultin_pfn(vcpu, fault);
        if (ret != RET_PF_CONTINUE)
                return ret;
@@ -4415,6 +4440,18 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
        if (unlikely(!fault->slot))
                return kvm_handle_noslot_fault(vcpu, fault, access);
 
+       /*
+        * Check again for a relevant mmu_notifier invalidation event purely to
+        * avoid contending mmu_lock.  Most invalidations will be detected by
+        * the previous check, but checking is extremely cheap relative to the
+        * overall cost of failing to detect the invalidation until after
+        * mmu_lock is acquired.
+        */
+       if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
+               kvm_release_pfn_clean(fault->pfn);
+               return RET_PF_RETRY;
+       }
+
        return RET_PF_CONTINUE;
 }
 
@@ -4442,6 +4479,11 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
        if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
                return true;
 
+       /*
+        * Check for a relevant mmu_notifier invalidation event one last time
+        * now that mmu_lock is held, as the "unsafe" checks performed without
+        * holding mmu_lock can get false negatives.
+        */
        return fault->slot &&
               mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
 }
index f760106c31f8a58d2941dbabd82531b9779089fa..a8ce5226b3b5785b0b73741148076dc978761fa0 100644 (file)
@@ -57,7 +57,7 @@ static bool sev_es_enabled = true;
 module_param_named(sev_es, sev_es_enabled, bool, 0444);
 
 /* enable/disable SEV-ES DebugSwap support */
-static bool sev_es_debug_swap_enabled = true;
+static bool sev_es_debug_swap_enabled = false;
 module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
 #else
 #define sev_enabled false
@@ -612,8 +612,11 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        save->xss  = svm->vcpu.arch.ia32_xss;
        save->dr6  = svm->vcpu.arch.dr6;
 
-       if (sev_es_debug_swap_enabled)
+       if (sev_es_debug_swap_enabled) {
                save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+               pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
+                            "This will not work starting with Linux 6.10\n");
+       }
 
        pr_debug("Virtual Machine Save Area (VMSA):\n");
        print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
@@ -1975,20 +1978,22 @@ int sev_mem_enc_register_region(struct kvm *kvm,
                goto e_free;
        }
 
-       region->uaddr = range->addr;
-       region->size = range->size;
-
-       list_add_tail(&region->list, &sev->regions_list);
-       mutex_unlock(&kvm->lock);
-
        /*
         * The guest may change the memory encryption attribute from C=0 -> C=1
         * or vice versa for this memory range. Lets make sure caches are
         * flushed to ensure that guest data gets written into memory with
-        * correct C-bit.
+        * correct C-bit.  Note, this must be done before dropping kvm->lock,
+        * as region and its array of pages can be freed by a different task
+        * once kvm->lock is released.
         */
        sev_clflush_pages(region->pages, region->npages);
 
+       region->uaddr = range->addr;
+       region->size = range->size;
+
+       list_add_tail(&region->list, &sev->regions_list);
+       mutex_unlock(&kvm->lock);
+
        return ret;
 
 e_free:
index edc3f16cc1896f29e4eef46da685d22b4c31c668..6a9bfdfbb6e59b2e613385cd2ad46cc651a0eb28 100644 (file)
@@ -2,7 +2,10 @@
 #ifndef __KVM_X86_VMX_RUN_FLAGS_H
 #define __KVM_X86_VMX_RUN_FLAGS_H
 
-#define VMX_RUN_VMRESUME       (1 << 0)
-#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
+#define VMX_RUN_VMRESUME_SHIFT         0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT   1
+
+#define VMX_RUN_VMRESUME               BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL         BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
 
 #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
index 906ecd001511355d0939e4e90a3994a7bd9809e3..2bfbf758d06110f49c71a22c1f54da9d9499669a 100644 (file)
@@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
        mov (%_ASM_SP), %_ASM_AX
 
        /* Check if vmlaunch or vmresume is needed */
-       test $VMX_RUN_VMRESUME, %ebx
+       bt   $VMX_RUN_VMRESUME_SHIFT, %ebx
 
        /* Load guest registers.  Don't clobber flags. */
        mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -161,8 +161,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
        /* Load guest RAX.  This kills the @regs pointer! */
        mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 
-       /* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */
-       jz .Lvmlaunch
+       /* Clobbers EFLAGS.ZF */
+       CLEAR_CPU_BUFFERS
+
+       /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
+       jnc .Lvmlaunch
 
        /*
         * After a successful VMRESUME/VMLAUNCH, control flow "magically"
index 1111d9d089038b2f17b372891a235222b74f87bf..88a4ff200d04bf2ae6c9c2953608c3d34b2ac592 100644 (file)
@@ -388,7 +388,16 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
 
 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 {
-       vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
+       /*
+        * Disable VERW's behavior of clearing CPU buffers for the guest if the
+        * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
+        * the mitigation. Disabling the clearing behavior provides a
+        * performance boost for guests that aren't aware that manually clearing
+        * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
+        * and VM-Exit.
+        */
+       vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
+                               (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
                                !boot_cpu_has_bug(X86_BUG_MDS) &&
                                !boot_cpu_has_bug(X86_BUG_TAA);
 
@@ -7224,11 +7233,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 
        guest_state_enter_irqoff();
 
-       /* L1D Flush includes CPU buffer clear to mitigate MDS */
+       /*
+        * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
+        * mitigation for MDS is done late in VMentry and is still
+        * executed in spite of L1D Flush. This is because an extra VERW
+        * should not matter much after the big hammer L1D Flush.
+        */
        if (static_branch_unlikely(&vmx_l1d_should_flush))
                vmx_l1d_flush(vcpu);
-       else if (static_branch_unlikely(&mds_user_clear))
-               mds_clear_cpu_buffers();
        else if (static_branch_unlikely(&mmio_stale_data_clear) &&
                 kvm_arch_has_assigned_device(vcpu->kvm))
                mds_clear_cpu_buffers();
index 48a61d283406f36a4f5230f508376a1fb951d58b..e02cc710f56de285fc080a0c0a4ecca4addaff4f 100644 (file)
@@ -4580,7 +4580,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
 {
        return type == KVM_X86_DEFAULT_VM ||
               (type == KVM_X86_SW_PROTECTED_VM &&
-               IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_enabled);
+               IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -8007,6 +8007,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 
        if (r < 0)
                return X86EMUL_UNHANDLEABLE;
+
+       /*
+        * Mark the page dirty _before_ checking whether or not the CMPXCHG was
+        * successful, as the old value is written back on failure.  Note, for
+        * live migration, this is unnecessarily conservative as CMPXCHG writes
+        * back the original value and the access is atomic, but KVM's ABI is
+        * that all writes are dirty logged, regardless of the value written.
+        */
+       kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
+
        if (r)
                return X86EMUL_CMPXCHG_FAILED;
 
index 679b09cfe241c72e7f85bd7bbd406d59a259bf2a..d6375b3c633bc45474bbb2d6460512863ff14a51 100644 (file)
@@ -798,15 +798,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
        show_opcodes(regs, loglvl);
 }
 
-/*
- * The (legacy) vsyscall page is the long page in the kernel portion
- * of the address space that has user-accessible permissions.
- */
-static bool is_vsyscall_vaddr(unsigned long vaddr)
-{
-       return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
-}
-
 static void
 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                       unsigned long address, u32 pkey, int si_code)
index 6993f026adec9d12a68cdbf3af3314336882f36f..42115ac079cfe617b76199a167c61e5b3c7de10f 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 
+#include <asm/vsyscall.h>
+
 #ifdef CONFIG_X86_64
 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
 {
@@ -15,6 +17,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
        if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
                return false;
 
+       /*
+        * Reading from the vsyscall page may cause an unhandled fault in
+        * certain cases.  Though it is at an address above TASK_SIZE_MAX, it is
+        * usually considered as a user space address.
+        */
+       if (is_vsyscall_vaddr(vaddr))
+               return false;
+
        /*
         * Allow everything during early boot before 'x86_virt_bits'
         * is initialized.  Needed for instruction decoding in early
index adc497b93f03746aca087a71233b806a5790bf96..65e9a6e391c046d1c18c32ffa0049082461a82dd 100644 (file)
@@ -934,7 +934,7 @@ static int __init cmp_memblk(const void *a, const void *b)
        const struct numa_memblk *ma = *(const struct numa_memblk **)a;
        const struct numa_memblk *mb = *(const struct numa_memblk **)b;
 
-       return ma->start - mb->start;
+       return (ma->start > mb->start) - (ma->start < mb->start);
 }
 
 static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
@@ -944,14 +944,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
  * @start: address to begin fill
  * @end: address to end fill
  *
- * Find and extend numa_meminfo memblks to cover the @start-@end
- * physical address range, such that the first memblk includes
- * @start, the last memblk includes @end, and any gaps in between
- * are filled.
+ * Find and extend numa_meminfo memblks to cover the physical
+ * address range @start-@end
  *
  * RETURNS:
  * 0             : Success
- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
+ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
  */
 
 int __init numa_fill_memblks(u64 start, u64 end)
@@ -963,17 +961,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
 
        /*
         * Create a list of pointers to numa_meminfo memblks that
-        * overlap start, end. Exclude (start == bi->end) since
-        * end addresses in both a CFMWS range and a memblk range
-        * are exclusive.
-        *
-        * This list of pointers is used to make in-place changes
-        * that fill out the numa_meminfo memblks.
+        * overlap start, end. The list is used to make in-place
+        * changes that fill out the numa_meminfo memblks.
         */
        for (int i = 0; i < mi->nr_blks; i++) {
                struct numa_memblk *bi = &mi->blk[i];
 
-               if (start < bi->end && end >= bi->start) {
+               if (memblock_addrs_overlap(start, end - start, bi->start,
+                                          bi->end - bi->start)) {
                        blk[count] = &mi->blk[i];
                        count++;
                }
index e9b448d1b1b70f08dae6216250f02e783091a83a..10288040404635743bac78ee8bb0fd721b370b88 100644 (file)
@@ -755,10 +755,14 @@ pmd_t *lookup_pmd_address(unsigned long address)
  * areas on 32-bit NUMA systems.  The percpu areas can
  * end up in this kind of memory, for instance.
  *
- * This could be optimized, but it is only intended to be
- * used at initialization time, and keeping it
- * unoptimized should increase the testing coverage for
- * the more obscure platforms.
+ * Note that as long as the PTEs are well-formed with correct PFNs, this
+ * works without checking the PRESENT bit in the leaf PTE.  This is unlike
+ * the similar vmalloc_to_page() and derivatives.  Callers may depend on
+ * this behavior.
+ *
+ * This could be optimized, but it is only used in paths that are not perf
+ * sensitive, and keeping it unoptimized should increase the testing coverage
+ * for the more obscure platforms.
  */
 phys_addr_t slow_virt_to_phys(void *__virt_addr)
 {
@@ -2041,17 +2045,12 @@ int set_mce_nospec(unsigned long pfn)
        return rc;
 }
 
-static int set_memory_p(unsigned long *addr, int numpages)
-{
-       return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0);
-}
-
 /* Restore full speculative operation to the pfn. */
 int clear_mce_nospec(unsigned long pfn)
 {
        unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
 
-       return set_memory_p(&addr, 1);
+       return set_memory_p(addr, 1);
 }
 EXPORT_SYMBOL_GPL(clear_mce_nospec);
 #endif /* CONFIG_X86_64 */
@@ -2104,6 +2103,11 @@ int set_memory_np_noalias(unsigned long addr, int numpages)
                                        CPA_NO_CHECK_ALIAS, NULL);
 }
 
+int set_memory_p(unsigned long addr, int numpages)
+{
+       return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
+}
+
 int set_memory_4k(unsigned long addr, int numpages)
 {
        return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
index dec7ce3a3edb7027b971232269846390e3baa834..d247a457bf6e3fd03c0e0f496988c6a5e8999ff1 100644 (file)
@@ -71,6 +71,7 @@ enum opal_response_token {
 #define SHORT_ATOM_BYTE  0xBF
 #define MEDIUM_ATOM_BYTE 0xDF
 #define LONG_ATOM_BYTE   0xE3
+#define EMPTY_ATOM_BYTE  0xFF
 
 #define OPAL_INVAL_PARAM 12
 #define OPAL_MANUFACTURED_INACTIVE 0x08
index 3d9e9cd250bd541f3166932bde9e43b35c13f13a..fa4dba5d85319e49a3bb411a7c78a6deed65d3e9 100644 (file)
@@ -1056,16 +1056,20 @@ static int response_parse(const u8 *buf, size_t length,
                        token_length = response_parse_medium(iter, pos);
                else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
                        token_length = response_parse_long(iter, pos);
+               else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
+                       token_length = 1;
                else /* TOKEN */
                        token_length = response_parse_token(iter, pos);
 
                if (token_length < 0)
                        return token_length;
 
+               if (pos[0] != EMPTY_ATOM_BYTE)
+                       num_entries++;
+
                pos += token_length;
                total -= token_length;
                iter++;
-               num_entries++;
        }
 
        resp->num = num_entries;
index 0b6dd8aa21f2edace686fb5531705698e7acc18d..0f1bd7dcde245988bb7d01dc9d0e32655669bdf8 100644 (file)
@@ -212,13 +212,12 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
 
        ivsize = crypto_lskcipher_ivsize(tfm);
        ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
+       memcpy(ivs, req->iv, ivsize);
 
        flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 
        if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
                flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
-       else
-               memcpy(ivs, req->iv, ivsize);
 
        if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
                flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
@@ -234,8 +233,7 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
                flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
        }
 
-       if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
-               memcpy(req->iv, ivs, ivsize);
+       memcpy(req->iv, ivs, ivsize);
 
        return err;
 }
index 1c995307c1138885dc9cacd7ad73ca16633f2158..a1523d0b1ef3660709ae087003a703fb4f8237bd 100644 (file)
@@ -24,7 +24,7 @@
 #define SKU_HW_ID_SHIFT              16u
 #define SKU_HW_ID_MASK               0xffff0000u
 
-#define PLL_CONFIG_DEFAULT           0x1
+#define PLL_CONFIG_DEFAULT           0x0
 #define PLL_CDYN_DEFAULT             0x80
 #define PLL_EPP_DEFAULT              0x80
 #define PLL_REF_CLK_FREQ            (50 * 1000000)
index fe825a432c5bfcce4776d83e0f072c9675507dae..ab2a82cb1b0b48ab21682bdb87c052707f19d282 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/interrupt.h>
 #include <linux/timer.h>
 #include <linux/cper.h>
-#include <linux/cxl-event.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/ratelimit.h>
@@ -674,52 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
        schedule_work(&entry->work);
 }
 
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
-                               struct cxl_cper_event_rec *rec)
-{
-       if (rec->hdr.length <= sizeof(rec->hdr) ||
-           rec->hdr.length > sizeof(*rec)) {
-               pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
-                      rec->hdr.length);
-               return;
-       }
-
-       if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
-               pr_err(FW_WARN "CXL CPER invalid event\n");
-               return;
-       }
-
-       guard(rwsem_read)(&cxl_cper_rw_sem);
-       if (cper_callback)
-               cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
-       guard(rwsem_write)(&cxl_cper_rw_sem);
-       if (cper_callback)
-               return -EINVAL;
-       cper_callback = callback;
-       return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
-       guard(rwsem_write)(&cxl_cper_rw_sem);
-       if (callback != cper_callback)
-               return -EINVAL;
-       cper_callback = NULL;
-       return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
 static bool ghes_do_proc(struct ghes *ghes,
                         const struct acpi_hest_generic_status *estatus)
 {
@@ -754,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
                }
                else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                        queued = ghes_handle_arm_hw_error(gdata, sev, sync);
-               } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
-                       struct cxl_cper_event_rec *rec =
-                               acpi_hest_get_payload(gdata);
-
-                       cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
-               } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
-                       struct cxl_cper_event_rec *rec =
-                               acpi_hest_get_payload(gdata);
-
-                       cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
-               } else if (guid_equal(sec_type,
-                                     &CPER_SEC_CXL_MEM_MODULE_GUID)) {
-                       struct cxl_cper_event_rec *rec =
-                               acpi_hest_get_payload(gdata);
-
-                       cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
                } else {
                        void *err = acpi_hest_get_payload(gdata);
 
index dbdee2924594a921f27fead574fcf1855c4e471b..02255795b800d1a42ceb7694216d2b6c92594b6b 100644 (file)
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
 
 static void acpi_ec_enable_event(struct acpi_ec *ec)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        if (acpi_ec_started(ec))
                __acpi_ec_enable_event(ec);
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 
        /* Drain additional events if hardware requires that */
        if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
 
 static void acpi_ec_disable_event(struct acpi_ec *ec)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        __acpi_ec_disable_event(ec);
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 
        /*
         * When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
 
 static bool acpi_ec_guard_event(struct acpi_ec *ec)
 {
+       unsigned long flags;
        bool guarded;
 
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, flags);
        /*
         * If firmware SCI_EVT clearing timing is "event", we actually
         * don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
        guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
                ec->event_state != EC_EVENT_READY &&
                (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
        return guarded;
 }
 
 static int ec_transaction_polled(struct acpi_ec *ec)
 {
+       unsigned long flags;
        int ret = 0;
 
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, flags);
        if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
                ret = 1;
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
        return ret;
 }
 
 static int ec_transaction_completed(struct acpi_ec *ec)
 {
+       unsigned long flags;
        int ret = 0;
 
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, flags);
        if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
                ret = 1;
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
        return ret;
 }
 
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
 
 static int ec_poll(struct acpi_ec *ec)
 {
+       unsigned long flags;
        int repeat = 5; /* number of command restarts */
 
        while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
                do {
                        if (!ec_guard(ec))
                                return 0;
-                       spin_lock(&ec->lock);
+                       spin_lock_irqsave(&ec->lock, flags);
                        advance_transaction(ec, false);
-                       spin_unlock(&ec->lock);
+                       spin_unlock_irqrestore(&ec->lock, flags);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
-               spin_lock(&ec->lock);
+               spin_lock_irqsave(&ec->lock, flags);
                start_transaction(ec);
-               spin_unlock(&ec->lock);
+               spin_unlock_irqrestore(&ec->lock, flags);
        }
        return -ETIME;
 }
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
                                        struct transaction *t)
 {
+       unsigned long tmp;
        int ret = 0;
 
        /* start transaction */
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, tmp);
        /* Enable GPE for command processing (IBF=0/OBF=1) */
        if (!acpi_ec_submit_flushable_request(ec)) {
                ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        ec->curr = t;
        ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
        start_transaction(ec);
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, tmp);
 
        ret = ec_poll(ec);
 
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, tmp);
        if (t->irq_count == ec_storm_threshold)
                acpi_ec_unmask_events(ec);
        ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        acpi_ec_complete_request(ec);
        ec_dbg_ref(ec, "Decrease command");
 unlock:
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, tmp);
        return ret;
 }
 
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
 
 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
                ec_dbg_drv("Starting EC");
                /* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
                }
                ec_log_drv("EC started");
        }
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 }
 
 static bool acpi_ec_stopped(struct acpi_ec *ec)
 {
+       unsigned long flags;
        bool flushed;
 
-       spin_lock(&ec->lock);
+       spin_lock_irqsave(&ec->lock, flags);
        flushed = acpi_ec_flushed(ec);
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
        return flushed;
 }
 
 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        if (acpi_ec_started(ec)) {
                ec_dbg_drv("Stopping EC");
                set_bit(EC_FLAGS_STOPPED, &ec->flags);
-               spin_unlock(&ec->lock);
+               spin_unlock_irqrestore(&ec->lock, flags);
                wait_event(ec->wait, acpi_ec_stopped(ec));
-               spin_lock(&ec->lock);
+               spin_lock_irqsave(&ec->lock, flags);
                /* Disable GPE for event processing (SCI_EVT=1) */
                if (!suspending) {
                        acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
                clear_bit(EC_FLAGS_STOPPED, &ec->flags);
                ec_log_drv("EC stopped");
        }
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 }
 
 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        ec->busy_polling = true;
        ec->polling_guard = 0;
        ec_log_drv("interrupt blocked");
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 }
 
 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
        ec->busy_polling = ec_busy_polling;
        ec->polling_guard = ec_polling_guard;
        ec_log_drv("interrupt unblocked");
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 }
 
 void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
 
        ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
 
-       spin_lock(&ec->lock);
+       spin_lock_irq(&ec->lock);
        ec->queries_in_progress--;
-       spin_unlock(&ec->lock);
+       spin_unlock_irq(&ec->lock);
 
        acpi_ec_put_query_handler(handler);
        kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
         */
        ec_dbg_evt("Query(0x%02x) scheduled", value);
 
-       spin_lock(&ec->lock);
+       spin_lock_irq(&ec->lock);
 
        ec->queries_in_progress++;
        queue_work(ec_query_wq, &q->work);
 
-       spin_unlock(&ec->lock);
+       spin_unlock_irq(&ec->lock);
 
        return 0;
 
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
 
        ec_dbg_evt("Event started");
 
-       spin_lock(&ec->lock);
+       spin_lock_irq(&ec->lock);
 
        while (ec->events_to_process) {
-               spin_unlock(&ec->lock);
+               spin_unlock_irq(&ec->lock);
 
                acpi_ec_submit_query(ec);
 
-               spin_lock(&ec->lock);
+               spin_lock_irq(&ec->lock);
 
                ec->events_to_process--;
        }
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
 
                ec_dbg_evt("Event stopped");
 
-               spin_unlock(&ec->lock);
+               spin_unlock_irq(&ec->lock);
 
                guard_timeout = !!ec_guard(ec);
 
-               spin_lock(&ec->lock);
+               spin_lock_irq(&ec->lock);
 
                /* Take care of SCI_EVT unless someone else is doing that. */
                if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
 
        ec->events_in_progress--;
 
-       spin_unlock(&ec->lock);
+       spin_unlock_irq(&ec->lock);
 }
 
 static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
 
 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
 {
-       spin_lock(&ec->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
 
        clear_gpe_and_advance_transaction(ec, true);
 
-       spin_unlock(&ec->lock);
+       spin_unlock_irqrestore(&ec->lock, flags);
 }
 
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
         * Dispatch the EC GPE in-band, but do not report wakeup in any case
         * to allow the caller to process events properly after that.
         */
-       spin_lock(&first_ec->lock);
+       spin_lock_irq(&first_ec->lock);
 
        if (acpi_ec_gpe_status_set(first_ec)) {
                pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
                work_in_progress = acpi_ec_work_in_progress(first_ec);
        }
 
-       spin_unlock(&first_ec->lock);
+       spin_unlock_irq(&first_ec->lock);
 
        if (!work_in_progress)
                return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
 
                pm_pr_dbg("ACPI EC work flushed\n");
 
-               spin_lock(&first_ec->lock);
+               spin_lock_irq(&first_ec->lock);
 
                work_in_progress = acpi_ec_work_in_progress(first_ec);
 
-               spin_unlock(&first_ec->lock);
+               spin_unlock_irq(&first_ec->lock);
        } while (work_in_progress && !pm_wakeup_pending());
 
        return false;
index da2e74fce2d995a932914876b44b3fb5d4275d2e..682ff550ccfb98381515b4821594176f5561f869 100644 (file)
@@ -671,9 +671,17 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
-       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
-               dev_info(&pdev->dev, "ASM1166 has only six ports\n");
-               hpriv->saved_port_map = 0x3f;
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+               switch (pdev->device) {
+               case 0x1166:
+                       dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+                       hpriv->saved_port_map = 0x3f;
+                       break;
+               case 0x1064:
+                       dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+                       hpriv->saved_port_map = 0xf;
+                       break;
+               }
        }
 
        if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
index 64f7f7d6ba84e07c2f2db2fbbdfb3d315f821ec2..11a2c199a7c24628e858f2fc8e88e69a60c8b94b 100644 (file)
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
        u32 axicc;
        bool is_cci_enabled;
        int flags;
-       struct reset_control *rst;
 };
 
 static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
 };
 
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+       int rc, i;
+
+       rc = ahci_platform_enable_regulators(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_platform_enable_clks(hpriv);
+       if (rc)
+               goto disable_regulator;
+
+       /* Assert the controller reset */
+       rc = ahci_platform_assert_rsts(hpriv);
+       if (rc)
+               goto disable_clks;
+
+       for (i = 0; i < hpriv->nports; i++) {
+               rc = phy_init(hpriv->phys[i]);
+               if (rc)
+                       goto disable_rsts;
+       }
+
+       /* De-assert the controller reset */
+       ahci_platform_deassert_rsts(hpriv);
+
+       for (i = 0; i < hpriv->nports; i++) {
+               rc = phy_power_on(hpriv->phys[i]);
+               if (rc) {
+                       phy_exit(hpriv->phys[i]);
+                       goto disable_phys;
+               }
+       }
+
+       return 0;
+
+disable_rsts:
+       ahci_platform_deassert_rsts(hpriv);
+
+disable_phys:
+       while (--i >= 0) {
+               phy_power_off(hpriv->phys[i]);
+               phy_exit(hpriv->phys[i]);
+       }
+
+disable_clks:
+       ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+       ahci_platform_disable_regulators(hpriv);
+
+       return rc;
+}
+
 static int ceva_ahci_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        cevapriv->ahci_pdev = pdev;
-
-       cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
-                                                                 NULL);
-       if (IS_ERR(cevapriv->rst))
-               dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
-                             "failed to get reset\n");
-
        hpriv = ahci_platform_get_resources(pdev, 0);
        if (IS_ERR(hpriv))
                return PTR_ERR(hpriv);
 
-       if (!cevapriv->rst) {
-               rc = ahci_platform_enable_resources(hpriv);
-               if (rc)
-                       return rc;
-       } else {
-               int i;
+       hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
+                                                               NULL);
+       if (IS_ERR(hpriv->rsts))
+               return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
+                                    "failed to get reset\n");
 
-               rc = ahci_platform_enable_clks(hpriv);
-               if (rc)
-                       return rc;
-               /* Assert the controller reset */
-               reset_control_assert(cevapriv->rst);
-
-               for (i = 0; i < hpriv->nports; i++) {
-                       rc = phy_init(hpriv->phys[i]);
-                       if (rc)
-                               return rc;
-               }
-
-               /* De-assert the controller reset */
-               reset_control_deassert(cevapriv->rst);
-
-               for (i = 0; i < hpriv->nports; i++) {
-                       rc = phy_power_on(hpriv->phys[i]);
-                       if (rc) {
-                               phy_exit(hpriv->phys[i]);
-                               return rc;
-                       }
-               }
-       }
+       rc = ceva_ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
 
        if (of_property_read_bool(np, "ceva,broken-gen2"))
                cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
        if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
                                        (u8 *)&cevapriv->pp2c[0], 4) < 0) {
                dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
                                        (u8 *)&cevapriv->pp2c[1], 4) < 0) {
                dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        /* Read OOB timing value for COMWAKE from device-tree*/
        if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
                                        (u8 *)&cevapriv->pp3c[0], 4) < 0) {
                dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
                                        (u8 *)&cevapriv->pp3c[1], 4) < 0) {
                dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        /* Read phy BURST timing value from device-tree */
        if (of_property_read_u8_array(np, "ceva,p0-burst-params",
                                        (u8 *)&cevapriv->pp4c[0], 4) < 0) {
                dev_warn(dev, "ceva,p0-burst-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        if (of_property_read_u8_array(np, "ceva,p1-burst-params",
                                        (u8 *)&cevapriv->pp4c[1], 4) < 0) {
                dev_warn(dev, "ceva,p1-burst-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        /* Read phy RETRY interval timing value from device-tree */
        if (of_property_read_u16_array(np, "ceva,p0-retry-params",
                                        (u16 *)&cevapriv->pp5c[0], 2) < 0) {
                dev_warn(dev, "ceva,p0-retry-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        if (of_property_read_u16_array(np, "ceva,p1-retry-params",
                                        (u16 *)&cevapriv->pp5c[1], 2) < 0) {
                dev_warn(dev, "ceva,p1-retry-params property not defined\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto disable_resources;
        }
 
        /*
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
        struct ahci_host_priv *hpriv = host->private_data;
        int rc;
 
-       rc = ahci_platform_enable_resources(hpriv);
+       rc = ceva_ahci_platform_enable_resources(hpriv);
        if (rc)
                return rc;
 
index 09ed67772fae492323361ab7e94f8a8d4345d2e8..be3412cdb22e78a1d663337698f07b07c66727e4 100644 (file)
@@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
        return true;
 }
 
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+       struct ata_taskfile tf;
+       unsigned int err_mask;
+
+       ata_tf_init(dev, &tf);
+       tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+       tf.protocol = ATA_PROT_NODATA;
+       tf.command = ATA_CMD_CHK_POWER;
+
+       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+       if (err_mask) {
+               ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+                           err_mask);
+               /*
+                * Assume we are in standby mode so that we always force a
+                * spinup in ata_dev_power_set_active().
+                */
+               return false;
+       }
+
+       ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+       /* Active or idle */
+       return tf.nsect == 0xff;
+}
+
 /**
  *     ata_dev_power_set_standby - Set a device power mode to standby
  *     @dev: target device
@@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev)
        struct ata_taskfile tf;
        unsigned int err_mask;
 
+       /* If the device is already sleeping or in standby, do nothing. */
+       if ((dev->flags & ATA_DFLAG_SLEEPING) ||
+           !ata_dev_power_is_active(dev))
+               return;
+
        /*
         * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
         * causing some drives to spin up and down again. For these, do nothing
@@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev)
                            err_mask);
 }
 
-static bool ata_dev_power_is_active(struct ata_device *dev)
-{
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       ata_tf_init(dev, &tf);
-       tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
-       tf.protocol = ATA_PROT_NODATA;
-       tf.command = ATA_CMD_CHK_POWER;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       if (err_mask) {
-               ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
-                           err_mask);
-               /*
-                * Assume we are in standby mode so that we always force a
-                * spinup in ata_dev_power_set_active().
-                */
-               return false;
-       }
-
-       ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
-
-       /* Active or idle */
-       return tf.nsect == 0xff;
-}
-
 /**
  *     ata_dev_power_set_active -  Set a device power mode to active
  *     @dev: target device
index fdb0fae88d1c584e94bdc3b206999203779cd755..b40b32fa7f1c38c5d12931ee7b06e5b8ab144d77 100644 (file)
@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
        bt_dev_dbg(hdev, "QCA Patch config");
 
        skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
-                               cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+                               cmd, 0, HCI_INIT_TIMEOUT);
        if (IS_ERR(skb)) {
                err = PTR_ERR(skb);
                bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
index a617578356953c30a4a882f7928d16d464a4a04d..9a7243d5db71ff35697cf26cf7a744910f2741fd 100644 (file)
@@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
 
        bda = (struct hci_rp_read_bd_addr *)skb->data;
        if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
-               set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+               set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
 
        kfree_skb(skb);
        return 0;
@@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
        hdev->setup = bcm4377_hci_setup;
 
-       set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
        if (bcm4377->hw->broken_mws_transport_config)
                set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
        if (bcm4377->hw->broken_ext_scan)
index 94b8c406f0c0edf0245064bd994ea6b84637b7b1..edd2a81b4d5ed7f5f9f36058ffe9131877ddde56 100644 (file)
@@ -7,6 +7,7 @@
  *
  *  Copyright (C) 2007 Texas Instruments, Inc.
  *  Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+ *  Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  *  Acknowledgements:
  *  This file is based on hci_ll.c, which was...
@@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
 
 static void hci_coredump_qca(struct hci_dev *hdev)
 {
+       int err;
        static const u8 param[] = { 0x26 };
-       struct sk_buff *skb;
 
-       skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
-       if (IS_ERR(skb))
-               bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
-       kfree_skb(skb);
+       err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+       if (err < 0)
+               bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
 }
 
 static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
@@ -1904,7 +1904,17 @@ retry:
        case QCA_WCN6750:
        case QCA_WCN6855:
        case QCA_WCN7850:
-               set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+               /* Set BDA quirk bit for reading BDA value from fwnode property
+                * only if that property exist in DT.
+                */
+               if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+                       set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+                       bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+               } else {
+                       bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+               }
+
                hci_set_aosp_capable(hdev);
 
                ret = qca_read_soc_version(hdev, &ver, soc_type);
index 6b5da73c85417644b5885e534c39917e4e5496a3..837bf9d51c6ec93888cec97ecde0eb2a792339e2 100644 (file)
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
                i++;
        }
 
-       if (i == 0 || i % 4)
+       if (i == 0)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
index 57186c58dc849c15db2f9c25ad8c816398f29986..1d7dd3d2c101cd4412876d62162fb733c800c02c 100644 (file)
@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
        unsigned long line_size;
        unsigned long flags;
 
+       if (unlikely(start == end))
+               return;
+
        line_size = ax45mp_priv.ax45mp_cache_line_size;
        start = start & (~(line_size - 1));
+       end = ((end + line_size - 1) & (~(line_size - 1)));
        local_irq_save(flags);
        ax45mp_cpu_dcache_wb_range(start, end);
        local_irq_restore(flags);
index 0964bb11657f100916b85b7f00074a9bdb365c62..782993951fff8f7cc209329fc84af7f825fee143 100644 (file)
@@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
        .nr_clk_ids             = CLKS_NR_MISC,
        .clk_regs               = misc_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(misc_clk_regs),
-       .clk_name               = "dout_cmu_misc_bus",
+       .clk_name               = "bus",
 };
 
 /* ---- platform_driver ----------------------------------------------------- */
index e4974b508328d1ae50e839602c572581f15ada56..a933ef53845a5b4ad24b3adfb1a303d44e9b2517 100644 (file)
@@ -159,6 +159,7 @@ static int __subdev_8255_init(struct comedi_device *dev,
                return -ENOMEM;
 
        spriv->context = context;
+       spriv->io      = io;
 
        s->type         = COMEDI_SUBD_DIO;
        s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
index 30ea8b53ebf8191db808b928041b0ac9a6e1512a..05ae9122823f8032bf62d8e7cd1f7570115a04f8 100644 (file)
@@ -87,6 +87,8 @@ struct waveform_private {
        struct comedi_device *dev;      /* parent comedi device */
        u64 ao_last_scan_time;          /* time of previous AO scan in usec */
        unsigned int ao_scan_period;    /* AO scan period in usec */
+       bool ai_timer_enable:1;         /* should AI timer be running? */
+       bool ao_timer_enable:1;         /* should AO timer be running? */
        unsigned short ao_loopbacks[N_CHANS];
 };
 
@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
                        time_increment = devpriv->ai_convert_time - now;
                else
                        time_increment = 1;
-               mod_timer(&devpriv->ai_timer,
-                         jiffies + usecs_to_jiffies(time_increment));
+               spin_lock(&dev->spinlock);
+               if (devpriv->ai_timer_enable) {
+                       mod_timer(&devpriv->ai_timer,
+                                 jiffies + usecs_to_jiffies(time_increment));
+               }
+               spin_unlock(&dev->spinlock);
        }
 
 overrun:
@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
         * Seem to need an extra jiffy here, otherwise timer expires slightly
         * early!
         */
+       spin_lock_bh(&dev->spinlock);
+       devpriv->ai_timer_enable = true;
        devpriv->ai_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
        add_timer(&devpriv->ai_timer);
+       spin_unlock_bh(&dev->spinlock);
        return 0;
 }
 
@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
 {
        struct waveform_private *devpriv = dev->private;
 
+       spin_lock_bh(&dev->spinlock);
+       devpriv->ai_timer_enable = false;
+       spin_unlock_bh(&dev->spinlock);
        if (in_softirq()) {
                /* Assume we were called from the timer routine itself. */
                del_timer(&devpriv->ai_timer);
@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
                unsigned int time_inc = devpriv->ao_last_scan_time +
                                        devpriv->ao_scan_period - now;
 
-               mod_timer(&devpriv->ao_timer,
-                         jiffies + usecs_to_jiffies(time_inc));
+               spin_lock(&dev->spinlock);
+               if (devpriv->ao_timer_enable) {
+                       mod_timer(&devpriv->ao_timer,
+                                 jiffies + usecs_to_jiffies(time_inc));
+               }
+               spin_unlock(&dev->spinlock);
        }
 
 underrun:
@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
        async->inttrig = NULL;
 
        devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
+       spin_lock_bh(&dev->spinlock);
+       devpriv->ao_timer_enable = true;
        devpriv->ao_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
        add_timer(&devpriv->ao_timer);
+       spin_unlock_bh(&dev->spinlock);
 
        return 1;
 }
@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        s->async->inttrig = NULL;
+       spin_lock_bh(&dev->spinlock);
+       devpriv->ao_timer_enable = false;
+       spin_unlock_bh(&dev->spinlock);
        if (in_softirq()) {
                /* Assume we were called from the timer routine itself. */
                del_timer(&devpriv->ao_timer);
index 09c77afb33ca84e79c077c87659252c64840929a..3f24481fc04a1258624020a9a919f2d10640057a 100644 (file)
@@ -31,10 +31,11 @@ struct counter_device_allochelper {
        struct counter_device counter;
 
        /*
-        * This is cache line aligned to ensure private data behaves like if it
-        * were kmalloced separately.
+        * This ensures private data behaves like if it were kmalloced
+        * separately. Also ensures the minimum alignment for safe DMA
+        * operations (which may or may not mean cache alignment).
         */
-       unsigned long privdata[] ____cacheline_aligned;
+       unsigned long privdata[] __aligned(ARCH_DMA_MINALIGN);
 };
 
 static void counter_device_release(struct device *dev)
index ca94e60e705a1df435b1dd75a13c0a50dc3f8c27..79619227ea511b5247ca7941400ae821b1030f73 100644 (file)
@@ -2987,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
        if (min_pstate < cpu->min_perf_ratio)
                min_pstate = cpu->min_perf_ratio;
 
+       if (min_pstate > cpu->max_perf_ratio)
+               min_pstate = cpu->max_perf_ratio;
+
        max_pstate = min(cap_pstate, cpu->max_perf_ratio);
        if (max_pstate < min_pstate)
                max_pstate = min_pstate;
index 1262a7773ef304d184799771166ca5700fb7871a..de50c00ba218fb19302438b6df29f24a38a9c591 100644 (file)
@@ -299,22 +299,6 @@ theend:
        return err;
 }
 
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
-       struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
-       struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
-       struct sun8i_ce_dev *ce = op->ce;
-       struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
-       int flow, err;
-
-       flow = rctx->flow;
-       err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
-       local_bh_disable();
-       crypto_finalize_skcipher_request(engine, breq, err);
-       local_bh_enable();
-}
-
 static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
                                      void *async_req)
 {
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
        dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
 }
 
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+{
+       struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+       struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+       struct sun8i_ce_dev *ce = op->ce;
+       struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+       int flow, err;
+
+       flow = rctx->flow;
+       err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+       sun8i_ce_cipher_unprepare(engine, areq);
+       local_bh_disable();
+       crypto_finalize_skcipher_request(engine, breq, err);
+       local_bh_enable();
+}
+
 int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
 {
        int err = sun8i_ce_cipher_prepare(engine, areq);
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
                return err;
 
        sun8i_ce_cipher_run(engine, areq);
-       sun8i_ce_cipher_unprepare(engine, areq);
        return 0;
 }
 
index 1b13b4aa16ecc441a37266996f1b4aca6863a436..a235e6c300f1e5419eb06945757946ced70f12e2 100644 (file)
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
 theend:
        pm_runtime_put_autosuspend(rkc->dev);
 
+       rk_hash_unprepare(engine, breq);
+
        local_bh_disable();
        crypto_finalize_hash_request(engine, breq, err);
        local_bh_enable();
 
-       rk_hash_unprepare(engine, breq);
-
        return 0;
 }
 
index 2621ff8a93764d4ad905bcfe7e52331f45bb2c71..de53eddf6796b6c6ac6eafdeaee9a7ee03c979d3 100644 (file)
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
 }
 
 static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
-               struct virtio_crypto_ctrl_header *header, void *para,
+               struct virtio_crypto_ctrl_header *header,
+               struct virtio_crypto_akcipher_session_para *para,
                const uint8_t *key, unsigned int keylen)
 {
        struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
 
        ctrl = &vc_ctrl_req->ctrl;
        memcpy(&ctrl->header, header, sizeof(ctrl->header));
-       memcpy(&ctrl->u, para, sizeof(ctrl->u));
+       memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
        input = &vc_ctrl_req->input;
        input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
 
index dcf2b39e1048822ca90324667d85f68225c05fa4..1a3e6aafbdcc33dd2aae8731be8a5ad52cc0891e 100644 (file)
@@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = {
        .qos_class = cxl_acpi_qos_class,
 };
 
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
-                          const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+                            struct cxl_cfmws_context *ctx)
 {
        int target_map[CXL_DECODER_MAX_INTERLEAVE];
-       struct cxl_cfmws_context *ctx = arg;
        struct cxl_port *root_port = ctx->root_port;
        struct resource *cxl_res = ctx->cxl_res;
        struct cxl_cxims_context cxims_ctx;
        struct cxl_root_decoder *cxlrd;
        struct device *dev = ctx->dev;
-       struct acpi_cedt_cfmws *cfmws;
        cxl_calc_hb_fn cxl_calc_hb;
        struct cxl_decoder *cxld;
        unsigned int ways, i, ig;
        struct resource *res;
        int rc;
 
-       cfmws = (struct acpi_cedt_cfmws *) header;
-
        rc = cxl_acpi_cfmws_verify(dev, cfmws);
        if (rc) {
                dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
                        cfmws->base_hpa,
                        cfmws->base_hpa + cfmws->window_size - 1);
-               return 0;
+               return rc;
        }
 
        rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
 
        cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
        if (IS_ERR(cxlrd))
-               return 0;
+               return PTR_ERR(cxlrd);
 
        cxld = &cxlrd->cxlsd.cxld;
        cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -420,16 +416,7 @@ err_xormap:
                put_device(&cxld->dev);
        else
                rc = cxl_decoder_autoremove(dev, cxld);
-       if (rc) {
-               dev_err(dev, "Failed to add decode range: %pr", res);
-               return rc;
-       }
-       dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
-               dev_name(&cxld->dev),
-               phys_to_target_node(cxld->hpa_range.start),
-               cxld->hpa_range.start, cxld->hpa_range.end);
-
-       return 0;
+       return rc;
 
 err_insert:
        kfree(res->name);
@@ -438,6 +425,29 @@ err_name:
        return -ENOMEM;
 }
 
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+                          const unsigned long end)
+{
+       struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+       struct cxl_cfmws_context *ctx = arg;
+       struct device *dev = ctx->dev;
+       int rc;
+
+       rc = __cxl_parse_cfmws(cfmws, ctx);
+       if (rc)
+               dev_err(dev,
+                       "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+                       cfmws->base_hpa,
+                       cfmws->base_hpa + cfmws->window_size - 1, rc);
+       else
+               dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+                       phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+                       cfmws->base_hpa + cfmws->window_size - 1);
+
+       /* never fail cxl_acpi load for a single window failure */
+       return 0;
+}
+
 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
                                              struct device *dev)
 {
index 6fe11546889fabb48e997fda83e1f184a64179c6..08fd0baea7a0eb0f1c1442e9f454e3c32736d19c 100644 (file)
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
        return 0;
 }
 
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
-                          struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+                             struct cxl_dpa_perf *dpa_perf)
 {
-       struct cxl_dpa_perf *dpa_perf;
-
-       dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
-       if (!dpa_perf)
-               return;
-
        dpa_perf->dpa_range = dent->dpa_range;
        dpa_perf->coord = dent->coord;
        dpa_perf->qos_class = dent->qos_class;
-       list_add_tail(&dpa_perf->list, list);
        dev_dbg(dev,
                "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
                dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
                dent->coord.read_latency, dent->coord.write_latency);
 }
 
-static void free_perf_ents(void *data)
-{
-       struct cxl_memdev_state *mds = data;
-       struct cxl_dpa_perf *dpa_perf, *n;
-       LIST_HEAD(discard);
-
-       list_splice_tail_init(&mds->ram_perf_list, &discard);
-       list_splice_tail_init(&mds->pmem_perf_list, &discard);
-       list_for_each_entry_safe(dpa_perf, n, &discard, list) {
-               list_del(&dpa_perf->list);
-               kfree(dpa_perf);
-       }
-}
-
 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
                                     struct xarray *dsmas_xa)
 {
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
        xa_for_each(dsmas_xa, index, dent) {
                if (resource_size(&cxlds->ram_res) &&
                    range_contains(&ram_range, &dent->dpa_range))
-                       add_perf_entry(dev, dent, &mds->ram_perf_list);
+                       update_perf_entry(dev, dent, &mds->ram_perf);
                else if (resource_size(&cxlds->pmem_res) &&
                         range_contains(&pmem_range, &dent->dpa_range))
-                       add_perf_entry(dev, dent, &mds->pmem_perf_list);
+                       update_perf_entry(dev, dent, &mds->pmem_perf);
                else
                        dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
                                dent->dpa_range.start);
        }
-
-       devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
 }
 
 static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
        return 0;
 }
 
-static void cxl_qos_match(struct cxl_port *root_port,
-                         struct list_head *work_list,
-                         struct list_head *discard_list)
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
 {
-       struct cxl_dpa_perf *dpa_perf, *n;
+       *dpa_perf = (struct cxl_dpa_perf) {
+               .qos_class = CXL_QOS_CLASS_INVALID,
+       };
+}
 
-       list_for_each_entry_safe(dpa_perf, n, work_list, list) {
-               int rc;
+static bool cxl_qos_match(struct cxl_port *root_port,
+                         struct cxl_dpa_perf *dpa_perf)
+{
+       if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+               return false;
 
-               if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
-                       return;
+       if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+                                  match_cxlrd_qos_class))
+               return false;
 
-               rc = device_for_each_child(&root_port->dev,
-                                          (void *)&dpa_perf->qos_class,
-                                          match_cxlrd_qos_class);
-               if (!rc)
-                       list_move_tail(&dpa_perf->list, discard_list);
-       }
+       return true;
 }
 
 static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
        return 0;
 }
 
-static void discard_dpa_perf(struct list_head *list)
-{
-       struct cxl_dpa_perf *dpa_perf, *n;
-
-       list_for_each_entry_safe(dpa_perf, n, list, list) {
-               list_del(&dpa_perf->list);
-               kfree(dpa_perf);
-       }
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
 {
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       LIST_HEAD(__discard);
-       struct list_head *discard __free(dpa_perf) = &__discard;
        struct cxl_port *root_port;
        int rc;
 
@@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
        root_port = &cxl_root->port;
 
        /* Check that the QTG IDs are all sane between end device and root decoders */
-       cxl_qos_match(root_port, &mds->ram_perf_list, discard);
-       cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+       if (!cxl_qos_match(root_port, &mds->ram_perf))
+               reset_dpa_perf(&mds->ram_perf);
+       if (!cxl_qos_match(root_port, &mds->pmem_perf))
+               reset_dpa_perf(&mds->pmem_perf);
 
        /* Check to make sure that the device's host bridge is under a root decoder */
        rc = device_for_each_child(&root_port->dev,
-                                  (void *)cxlmd->endpoint->host_bridge,
-                                  match_cxlrd_hb);
+                                  cxlmd->endpoint->host_bridge, match_cxlrd_hb);
        if (!rc) {
-               list_splice_tail_init(&mds->ram_perf_list, discard);
-               list_splice_tail_init(&mds->pmem_perf_list, discard);
+               reset_dpa_perf(&mds->ram_perf);
+               reset_dpa_perf(&mds->pmem_perf);
        }
 
        return rc;
@@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
 
        cxl_memdev_set_qos_class(cxlds, dsmas_xa);
        cxl_qos_class_verify(cxlmd);
+       cxl_memdev_update_perf(cxlmd);
 }
 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
 
index 27166a41170579a9441a2f9bf3e2a915ed85d893..9adda4795eb786b8658b573dd1e79befbad52255 100644 (file)
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
        mds->cxlds.reg_map.host = dev;
        mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
        mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
-       INIT_LIST_HEAD(&mds->ram_perf_list);
-       INIT_LIST_HEAD(&mds->pmem_perf_list);
+       mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+       mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
 
        return mds;
 }
index dae8802ecdb01ee748e3891120bc0011e9e8894e..d4e259f3a7e914b9e3f17330cbc57f691d1976c2 100644 (file)
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
        NULL,
 };
 
+static ssize_t pmem_qos_class_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+       return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+       __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
 static struct attribute *cxl_memdev_pmem_attributes[] = {
        &dev_attr_pmem_size.attr,
+       &dev_attr_pmem_qos_class.attr,
        NULL,
 };
 
+static ssize_t ram_qos_class_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+       return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+       __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
 static struct attribute *cxl_memdev_ram_attributes[] = {
        &dev_attr_ram_size.attr,
+       &dev_attr_ram_qos_class.attr,
        NULL,
 };
 
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
        .is_visible = cxl_memdev_visible,
 };
 
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+       if (a == &dev_attr_ram_qos_class.attr)
+               if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+                       return 0;
+
+       return a->mode;
+}
+
 static struct attribute_group cxl_memdev_ram_attribute_group = {
        .name = "ram",
        .attrs = cxl_memdev_ram_attributes,
+       .is_visible = cxl_ram_visible,
 };
 
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+       if (a == &dev_attr_pmem_qos_class.attr)
+               if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+                       return 0;
+
+       return a->mode;
+}
+
 static struct attribute_group cxl_memdev_pmem_attribute_group = {
        .name = "pmem",
        .attrs = cxl_memdev_pmem_attributes,
+       .is_visible = cxl_pmem_visible,
 };
 
 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
        NULL,
 };
 
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+       sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+       sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
 static const struct device_type cxl_memdev_type = {
        .name = "cxl_memdev",
        .release = cxl_memdev_release,
index 6c9c8d92f8f71401af70fec26be60e0339c18c64..e9e6c81ce034a8ffaba105132d5b9ecc59d51880 100644 (file)
@@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
                allowed++;
        }
 
-       if (!allowed) {
-               cxl_set_mem_enable(cxlds, 0);
-               info->mem_enabled = 0;
+       if (!allowed && info->mem_enabled) {
+               dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+               return -ENXIO;
        }
 
        /*
@@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
 void cxl_cor_error_detected(struct pci_dev *pdev)
 {
        struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+       struct device *dev = &cxlds->cxlmd->dev;
+
+       scoped_guard(device, dev) {
+               if (!dev->driver) {
+                       dev_warn(&pdev->dev,
+                                "%s: memdev disabled, abort error handling\n",
+                                dev_name(dev));
+                       return;
+               }
 
-       if (cxlds->rcd)
-               cxl_handle_rdport_errors(cxlds);
+               if (cxlds->rcd)
+                       cxl_handle_rdport_errors(cxlds);
 
-       cxl_handle_endpoint_cor_ras(cxlds);
+               cxl_handle_endpoint_cor_ras(cxlds);
+       }
 }
 EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
 
@@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
        struct device *dev = &cxlmd->dev;
        bool ue;
 
-       if (cxlds->rcd)
-               cxl_handle_rdport_errors(cxlds);
+       scoped_guard(device, dev) {
+               if (!dev->driver) {
+                       dev_warn(&pdev->dev,
+                                "%s: memdev disabled, abort error handling\n",
+                                dev_name(dev));
+                       return PCI_ERS_RESULT_DISCONNECT;
+               }
+
+               if (cxlds->rcd)
+                       cxl_handle_rdport_errors(cxlds);
+               /*
+                * A frozen channel indicates an impending reset which is fatal to
+                * CXL.mem operation, and will likely crash the system. On the off
+                * chance the situation is recoverable dump the status of the RAS
+                * capability registers and bounce the active state of the memdev.
+                */
+               ue = cxl_handle_endpoint_ras(cxlds);
+       }
 
-       /*
-        * A frozen channel indicates an impending reset which is fatal to
-        * CXL.mem operation, and will likely crash the system. On the off
-        * chance the situation is recoverable dump the status of the RAS
-        * capability registers and bounce the active state of the memdev.
-        */
-       ue = cxl_handle_endpoint_ras(cxlds);
 
        switch (state) {
        case pci_channel_io_normal:
index ce0e2d82bb2b4cfdc61761d5e32a8c91cc121d82..4c7fd2d5cccb2965eb528cbc26bb261ef01dcdce 100644 (file)
@@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data)
        return 0;
 }
 
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
-                                                  struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+                       struct cxl_endpoint_decoder *cxled,
+                       struct cxl_region *cxlr)
 {
        struct device *dev;
        int id = 0;
 
+       if (port == cxled_to_port(cxled))
+               return &cxled->cxld;
+
        if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
                dev = device_find_child(&port->dev, &cxlr->params,
                                        match_auto_decoder);
@@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
        return to_cxl_decoder(dev);
 }
 
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
-                                              struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+                         struct cxl_decoder *cxld)
+{
+       struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+       struct cxl_decoder *cxld_iter = rr->decoder;
+
+       /*
+        * Allow the out of order assembly of auto-discovered regions.
+        * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+        * in HPA order. Confirm that the decoder with the lesser HPA
+        * starting address has the lesser id.
+        */
+       dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+               dev_name(&cxld->dev), cxld->id,
+               dev_name(&cxld_iter->dev), cxld_iter->id);
+
+       if (cxld_iter->id > cxld->id)
+               return true;
+
+       return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+                struct cxl_endpoint_decoder *cxled)
 {
        struct cxl_region_params *p = &cxlr->params;
        struct cxl_region_ref *cxl_rr, *iter;
@@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
        xa_for_each(&port->regions, index, iter) {
                struct cxl_region_params *ip = &iter->region->params;
 
-               if (!ip->res)
+               if (!ip->res || ip->res->start < p->res->start)
                        continue;
 
-               if (ip->res->start > p->res->start) {
-                       dev_dbg(&cxlr->dev,
-                               "%s: HPA order violation %s:%pr vs %pr\n",
-                               dev_name(&port->dev),
-                               dev_name(&iter->region->dev), ip->res, p->res);
-                       return ERR_PTR(-EBUSY);
+               if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+                       struct cxl_decoder *cxld;
+
+                       cxld = cxl_region_find_decoder(port, cxled, cxlr);
+                       if (auto_order_ok(port, iter->region, cxld))
+                               continue;
                }
+               dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+                       dev_name(&port->dev),
+                       dev_name(&iter->region->dev), ip->res, p->res);
+
+               return ERR_PTR(-EBUSY);
        }
 
        cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
 {
        struct cxl_decoder *cxld;
 
-       if (port == cxled_to_port(cxled))
-               cxld = &cxled->cxld;
-       else
-               cxld = cxl_region_find_decoder(port, cxlr);
+       cxld = cxl_region_find_decoder(port, cxled, cxlr);
        if (!cxld) {
                dev_dbg(&cxlr->dev, "%s: no decoder available\n",
                        dev_name(&port->dev));
@@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
                        nr_targets_inc = true;
                }
        } else {
-               cxl_rr = alloc_region_ref(port, cxlr);
+               cxl_rr = alloc_region_ref(port, cxlr, cxled);
                if (IS_ERR(cxl_rr)) {
                        dev_dbg(&cxlr->dev,
                                "%s: failed to allocate region reference\n",
index b6017c0c57b4d5e69dfe45011b7a8b3f5bf0b913..003feebab79b5f8e7563ba2e32665b4377871a55 100644 (file)
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
                                      struct access_coordinate *coord);
 
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
 /*
  * Unit test builds overrides this to __weak, find the 'strong' version
  * of these symbols in tools/testing/cxl/.
index 5303d6942b880af65dcf8e77b02d26626c2bb94d..20fb3b35e89e0473ee8ad42dcd17407086fb8cdb 100644 (file)
@@ -395,13 +395,11 @@ enum cxl_devtype {
 
 /**
  * struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
  * @dpa_range - range for DPA address
  * @coord - QoS performance data (i.e. latency, bandwidth)
  * @qos_class - QoS Class cookies
  */
 struct cxl_dpa_perf {
-       struct list_head list;
        struct range dpa_range;
        struct access_coordinate coord;
        int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
  * @security: security driver state info
  * @fw: firmware upload / activation state
  * @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
  *
  * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
  * details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
        u64 next_volatile_bytes;
        u64 next_persistent_bytes;
 
-       struct list_head ram_perf_list;
-       struct list_head pmem_perf_list;
+       struct cxl_dpa_perf ram_perf;
+       struct cxl_dpa_perf pmem_perf;
 
        struct cxl_event_state event;
        struct cxl_poison_state poison;
index c5c9d8e0d88d69fcc9f031e1bd46ba7c44de4fd4..0c79d9ce877ccaef9895a9885801d4fff69c5093 100644 (file)
@@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
 }
 static DEVICE_ATTR_WO(trigger_poison_list);
 
-static ssize_t ram_qos_class_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       struct cxl_dpa_perf *dpa_perf;
-
-       if (!dev->driver)
-               return -ENOENT;
-
-       if (list_empty(&mds->ram_perf_list))
-               return -ENOENT;
-
-       dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
-                                   list);
-
-       return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
-       __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       struct cxl_dpa_perf *dpa_perf;
-
-       if (!dev->driver)
-               return -ENOENT;
-
-       if (list_empty(&mds->pmem_perf_list))
-               return -ENOENT;
-
-       dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
-                                   list);
-
-       return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
-       __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
 static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
 {
        struct device *dev = kobj_to_dev(kobj);
@@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
                              mds->poison.enabled_cmds))
                        return 0;
 
-       if (a == &dev_attr_pmem_qos_class.attr)
-               if (list_empty(&mds->pmem_perf_list))
-                       return 0;
-
-       if (a == &dev_attr_ram_qos_class.attr)
-               if (list_empty(&mds->ram_perf_list))
-                       return 0;
-
        return a->mode;
 }
 
 static struct attribute *cxl_mem_attrs[] = {
        &dev_attr_trigger_poison_list.attr,
-       &dev_attr_ram_qos_class.attr,
-       &dev_attr_pmem_qos_class.attr,
        NULL
 };
 
index 233e7c42c161d8e0b64424776d121f5d08176010..2ff361e756d66147d8d20969c376730ae2bcc90e 100644 (file)
@@ -974,61 +974,6 @@ static struct pci_driver cxl_pci_driver = {
        },
 };
 
-#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
-static void cxl_cper_event_call(enum cxl_event_type ev_type,
-                               struct cxl_cper_event_rec *rec)
-{
-       struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
-       struct pci_dev *pdev __free(pci_dev_put) = NULL;
-       enum cxl_event_log_type log_type;
-       struct cxl_dev_state *cxlds;
-       unsigned int devfn;
-       u32 hdr_flags;
-
-       devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
-       pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
-                                          device_id->bus_num, devfn);
-       if (!pdev)
-               return;
-
-       guard(pci_dev)(pdev);
-       if (pdev->driver != &cxl_pci_driver)
-               return;
-
-       cxlds = pci_get_drvdata(pdev);
-       if (!cxlds)
-               return;
-
-       /* Fabricate a log type */
-       hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
-       log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
-
-       cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
-                              &uuid_null, &rec->event);
-}
-
-static int __init cxl_pci_driver_init(void)
-{
-       int rc;
-
-       rc = cxl_cper_register_callback(cxl_cper_event_call);
-       if (rc)
-               return rc;
-
-       rc = pci_register_driver(&cxl_pci_driver);
-       if (rc)
-               cxl_cper_unregister_callback(cxl_cper_event_call);
-
-       return rc;
-}
-
-static void __exit cxl_pci_driver_exit(void)
-{
-       pci_unregister_driver(&cxl_pci_driver);
-       cxl_cper_unregister_callback(cxl_cper_event_call);
-}
-
-module_init(cxl_pci_driver_init);
-module_exit(cxl_pci_driver_exit);
+module_pci_driver(cxl_pci_driver);
 MODULE_LICENSE("GPL v2");
 MODULE_IMPORT_NS(CXL);
index b38786f0ad7995d9b0d22aa18fdd6d2407320c26..b75fdaffad9a4ea6cd8d15e8f43bea550848b46c 100644 (file)
@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
        dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
 }
 
+static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+       /*
+        * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
+        * configuration registers and application memory are normally accessed
+        * over different buses. Ensure LL-data reaches the memory before the
+        * doorbell register is toggled by issuing the dummy-read from the remote
+        * LL memory in a hope that the MRd TLP will return only after the
+        * last MWr TLP is completed
+        */
+       if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+               readl(chunk->ll_region.vaddr.io);
+}
+
 static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
 {
        struct dw_edma_chan *chan = chunk->chan;
@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
                SET_CH_32(dw, chan->dir, chan->id, llp.msb,
                          upper_32_bits(chunk->ll_region.paddr));
        }
+
+       dw_edma_v0_sync_ll_data(chunk);
+
        /* Doorbell */
        SET_RW_32(dw, chan->dir, doorbell,
                  FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
index 00b735a0202ab2e8e030910db2747c02be8bf75e..10e8f0715114fb5f08f135b4f2d592ce6c53f10c 100644 (file)
@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
 
 static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
 {
-       u32 num_ch = 0;
-       int id;
-
-       for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
-               if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
-                       num_ch++;
-       }
-
-       if (num_ch > HDMA_V0_MAX_NR_CH)
-               num_ch = HDMA_V0_MAX_NR_CH;
-
-       return (u16)num_ch;
+       /*
+        * The HDMA IP have no way to know the number of hardware channels
+        * available, we set it to maximum channels and let the platform
+        * set the right number of channels.
+        */
+       return HDMA_V0_MAX_NR_CH;
 }
 
 static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
        dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
 }
 
+static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+       /*
+        * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
+        * configuration registers and application memory are normally accessed
+        * over different buses. Ensure LL-data reaches the memory before the
+        * doorbell register is toggled by issuing the dummy-read from the remote
+        * LL memory in a hope that the MRd TLP will return only after the
+        * last MWr TLP is completed
+        */
+       if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+               readl(chunk->ll_region.vaddr.io);
+}
+
 static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
 {
        struct dw_edma_chan *chan = chunk->chan;
@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
                /* Interrupt enable&unmask - done, abort */
                tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
                      HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
-                     HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+                     HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+               if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+                       tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
                SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
                /* Channel control */
                SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
        /* Set consumer cycle */
        SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
                  HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+
+       dw_hdma_v0_sync_ll_data(chunk);
+
        /* Doorbell */
        SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
 }
index a974abdf8aaf5ecd83eadd56f191a313ec37e9ff..eab5fd7177e545cab3f2217bd1a8add0d8dbb435 100644 (file)
@@ -15,7 +15,7 @@
 #define HDMA_V0_LOCAL_ABORT_INT_EN             BIT(6)
 #define HDMA_V0_REMOTE_ABORT_INT_EN            BIT(5)
 #define HDMA_V0_LOCAL_STOP_INT_EN              BIT(4)
-#define HDMA_V0_REMOTEL_STOP_INT_EN            BIT(3)
+#define HDMA_V0_REMOTE_STOP_INT_EN             BIT(3)
 #define HDMA_V0_ABORT_INT_MASK                 BIT(2)
 #define HDMA_V0_STOP_INT_MASK                  BIT(0)
 #define HDMA_V0_LINKLIST_EN                    BIT(0)
index b53f46245c377f05520c8275c95bf10c59be34d7..793f1a7ad5e343bbfe403c9e0ad28e891bd0d556 100644 (file)
@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
        if (fsl_chan->is_multi_fifo) {
                /* set mloff to support multiple fifo */
                burst = cfg->direction == DMA_DEV_TO_MEM ?
-                               cfg->src_addr_width : cfg->dst_addr_width;
+                               cfg->src_maxburst : cfg->dst_maxburst;
                nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
                /* enable DMLOE/SMLOE */
                if (cfg->direction == DMA_MEM_TO_DEV) {
index bb5221158a7702379322392a46a1ebfb4de0f476..f5e216b157c75ff2215d7c74cd1d9febad47031c 100644 (file)
@@ -30,8 +30,9 @@
 #define EDMA_TCD_ATTR_SSIZE(x)         (((x) & GENMASK(2, 0)) << 8)
 #define EDMA_TCD_ATTR_SMOD(x)          (((x) & GENMASK(4, 0)) << 11)
 
-#define EDMA_TCD_CITER_CITER(x)                ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x)                ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK             GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x)                ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x)                ((x) & EDMA_TCD_ITER_MASK)
 
 #define EDMA_TCD_CSR_START             BIT(0)
 #define EDMA_TCD_CSR_INT_MAJOR         BIT(1)
index 45cc419b1b4acbe87c12c3daaccafce73f8de1ba..d36e28b9c767ae7ebb44bc9e87de7bbc0363f926 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
                                        DMAENGINE_ALIGN_32_BYTES;
 
        /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
-       dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+       dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+                            FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
 
        fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 
index f405c77060ad8b3508e7c75bee09968e1ae9bc78..5005e138fc239bf23a8a888c90e5ad720f697d3d 100644 (file)
 #define FSL_QDMA_CMD_WTHROTL_OFFSET    20
 #define FSL_QDMA_CMD_DSEN_OFFSET       19
 #define FSL_QDMA_CMD_LWC_OFFSET                16
+#define FSL_QDMA_CMD_PF                        BIT(17)
 
 /* Field definition for Descriptor status */
 #define QDMA_CCDF_STATUS_RTE           BIT(5)
@@ -160,6 +161,10 @@ struct fsl_qdma_format {
                        u8 __reserved1[2];
                        u8 cfg8b_w1;
                } __packed;
+               struct {
+                       __le32 __reserved2;
+                       __le32 cmd;
+               } __packed;
                __le64 data;
        };
 } __packed;
@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
                                      dma_addr_t dst, dma_addr_t src, u32 len)
 {
-       u32 cmd;
        struct fsl_qdma_format *sdf, *ddf;
        struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
 
@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
        /* This entry is the last entry. */
        qdma_csgf_set_f(csgf_dest, len);
        /* Descriptor Buffer */
-       cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
-                         FSL_QDMA_CMD_RWTTYPE_OFFSET);
-       sdf->data = QDMA_SDDF_CMD(cmd);
-
-       cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
-                         FSL_QDMA_CMD_RWTTYPE_OFFSET);
-       cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
-       ddf->data = QDMA_SDDF_CMD(cmd);
+       sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+                              FSL_QDMA_CMD_PF);
+
+       ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+                              (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
 }
 
 /*
@@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 
 static int
 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-                                void *block,
+                                __iomem void *block,
                                 int id)
 {
        bool duplicate;
@@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
        if (!fsl_qdma->queue)
                return -ENOMEM;
 
-       ret = fsl_qdma_irq_init(pdev, fsl_qdma);
-       if (ret)
-               return ret;
-
        fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
        if (fsl_qdma->irq_base < 0)
                return fsl_qdma->irq_base;
@@ -1238,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, fsl_qdma);
 
-       ret = dma_async_device_register(&fsl_qdma->dma_dev);
+       ret = fsl_qdma_reg_init(fsl_qdma);
        if (ret) {
-               dev_err(&pdev->dev,
-                       "Can't register NXP Layerscape qDMA engine.\n");
+               dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
                return ret;
        }
 
-       ret = fsl_qdma_reg_init(fsl_qdma);
+       ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+       if (ret)
+               return ret;
+
+       ret = dma_async_device_register(&fsl_qdma->dma_dev);
        if (ret) {
-               dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+               dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
                return ret;
        }
 
index 77f8885cf4075acfd3ff535b7e09519a8df41c70..e5a94a93a3cc4e6da66aca64cc2174b20d80a7bb 100644 (file)
@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
        spin_lock(&evl->lock);
        status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
        t = status.tail;
-       h = evl->head;
+       h = status.head;
        size = evl->size;
 
        while (h != t) {
index 9cfbd9b14c4c43306326e857b8b3d982c612314f..f3f25ee676f30eb283989586d458a5c8b8c01f9f 100644 (file)
@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
 
        spin_lock(&evl->lock);
 
-       h = evl->head;
        evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
        t = evl_status.tail;
+       h = evl_status.head;
        evl_size = evl->size;
 
        seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
index 47de3f93ff1e9a72eb718b07c05213d19ec1d23b..d0f5db6cf1eda103db09c31449cf3a58d58b7971 100644 (file)
@@ -300,7 +300,6 @@ struct idxd_evl {
        unsigned int log_size;
        /* The number of entries in the event log. */
        u16 size;
-       u16 head;
        unsigned long *bmap;
        bool batch_fail[IDXD_MAX_BATCH_IDENT];
 };
index 14df1f1347a8dd83b82263438acf3fe613513564..4954adc6bb609e508c510daf630f1077191fd2c7 100644 (file)
@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
 static int idxd_init_evl(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
+       unsigned int evl_cache_size;
        struct idxd_evl *evl;
+       const char *idxd_name;
 
        if (idxd->hw.gen_cap.evl_support == 0)
                return 0;
@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
        spin_lock_init(&evl->lock);
        evl->size = IDXD_EVL_SIZE_MIN;
 
-       idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
-                                           sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
-                                           0, 0, NULL);
+       idxd_name = dev_name(idxd_confdev(idxd));
+       evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
+       /*
+        * Since completion record in evl_cache will be copied to user
+        * when handling completion record page fault, need to create
+        * the cache suitable for user copy.
+        */
+       idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
+                                                    0, 0, 0, evl_cache_size,
+                                                    NULL);
        if (!idxd->evl_cache) {
                kfree(evl);
                return -ENOMEM;
index c8a0aa874b1153f845278e03e9e5153cc487c0fb..348aa21389a9fceb4cd522579c8f8a9963e72ef3 100644 (file)
@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
        /* Clear interrupt pending bit */
        iowrite32(evl_status.bits_upper32,
                  idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
-       h = evl->head;
        evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
        t = evl_status.tail;
+       h = evl_status.head;
        size = idxd->evl->size;
 
        while (h != t) {
@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
                h = (h + 1) % size;
        }
 
-       evl->head = h;
        evl_status.head = h;
        iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
        spin_unlock(&evl->lock);
index 1aa65e5de0f3ad9bc0fa0907ebda8e8c0fe6d0ab..f792407348077dd9fe481cfdec6577a701493487 100644 (file)
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
        chan->vc.desc_free = pt_do_cleanup;
        vchan_init(&chan->vc, dma_dev);
 
-       dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
        ret = dma_async_device_register(dma_dev);
        if (ret)
                goto err_reg;
index 5152bd1b0daf599869195e81805fbb2709dbe6b4..7f686d179fc93c85f684d051595a1d4c1934bdbb 100644 (file)
@@ -508,6 +508,26 @@ err_pin_prop:
        return ERR_PTR(ret);
 }
 
+static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+       rtnl_lock();
+       rcu_assign_pointer(dev->dpll_pin, dpll_pin);
+       rtnl_unlock();
+}
+
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+       WARN_ON(!dpll_pin);
+       dpll_netdev_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_set);
+
+void dpll_netdev_pin_clear(struct net_device *dev)
+{
+       dpll_netdev_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_clear);
+
 /**
  * dpll_pin_get - find existing or create new dpll pin
  * @clock_id: clock_id of creator
@@ -564,7 +584,7 @@ void dpll_pin_put(struct dpll_pin *pin)
                xa_destroy(&pin->parent_refs);
                xa_erase(&dpll_pin_xa, pin->id);
                dpll_pin_prop_free(&pin->prop);
-               kfree(pin);
+               kfree_rcu(pin, rcu);
        }
        mutex_unlock(&dpll_lock);
 }
index 717f715015c742238d5585fddc5cd267fbb0db9f..2b6d8ef1cdf36cff24328e497c49d667659dd0e6 100644 (file)
@@ -47,6 +47,7 @@ struct dpll_device {
  * @prop:              pin properties copied from the registerer
  * @rclk_dev_name:     holds name of device when pin can recover clock from it
  * @refcount:          refcount
+ * @rcu:               rcu_head for kfree_rcu()
  **/
 struct dpll_pin {
        u32 id;
@@ -57,6 +58,7 @@ struct dpll_pin {
        struct xarray parent_refs;
        struct dpll_pin_properties prop;
        refcount_t refcount;
+       struct rcu_head rcu;
 };
 
 /**
index 4ca9ad16cd957aaefaf50a74bbeb27ab3f3d1ec7..b57355e0c214bb3badca414c7d127d79a772bcdb 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/netdevice.h>
 #include <net/genetlink.h>
 #include "dpll_core.h"
 #include "dpll_netlink.h"
@@ -47,18 +48,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
        return 0;
 }
 
-/**
- * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
- * @pin: pin pointer
- *
- * Return: byte size of pin handle attribute for given pin.
- */
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
-{
-       return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
-}
-EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
-
 /**
  * dpll_msg_add_pin_handle - attach pin handle attribute to a given message
  * @msg: pointer to sk_buff message to attach a pin handle
@@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
  * * 0 - success
  * * -EMSGSIZE - no space in message to attach pin handle
  */
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
 {
        if (!pin)
                return 0;
@@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
                return -EMSGSIZE;
        return 0;
 }
-EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
+{
+       return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
+/**
+ * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
+ * @dev: netdev from which to get the pin
+ *
+ * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
+ */
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+       return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+                              const struct net_device *dev)
+{
+       return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
+}
 
 static int
 dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
index 8aaa7fcb2630dcf47a5325982cbed1037f685b3b..401a77e3b5fa8ed9e9b834c4a55cde98d2b2a8db 100644 (file)
@@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
                fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
                          new_root_id, gap_count);
                fw_send_phy_config(card, new_root_id, generation, gap_count);
-               reset_bus(card, true);
+               /*
+                * Where possible, use a short bus reset to minimize
+                * disruption to isochronous transfers. But in the event
+                * of a gap count inconsistency, use a long bus reset.
+                *
+                * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+                * may set different gap counts after a bus reset. On a mixed
+                * 1394/1394a bus, a short bus reset can get doubled. Some
+                * nodes may treat the double reset as one bus reset and others
+                * may treat it as two, causing a gap count inconsistency
+                * again. Using a long bus reset prevents this.
+                */
+               reset_bus(card, card->gap_count != 0);
                /* Will allocate broadcast channel after the reset. */
                goto out;
        }
index 9db9290c326930d7ac903382f234f9435876b39b..7bc71f4be64a07510507e1c9b7d0f1a61de30e3b 100644 (file)
@@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev,
        return 0;
 
  fail_msi:
+       devm_free_irq(&dev->dev, dev->irq, ohci);
        pci_disable_msi(dev);
 
        return err;
@@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev)
 
        software_reset(ohci);
 
+       devm_free_irq(&dev->dev, dev->irq, ohci);
        pci_disable_msi(dev);
 
        dev_notice(&dev->dev, "removing fw-ohci device\n");
index 3e8d4b51a8140c16720eef8f08d311b024b1a830..97bafb5f7038924fb99eea6f5679b18b2d459e5a 100644 (file)
@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        }
 
-       cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+       cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
        if (!cap_info->phys) {
                kfree(cap_info->pages);
                kfree(cap_info);
index 81f5f62e34fce04fb6db2db11294f8281c58f5b7..fbeeaee4ac85603783412b2afddd9c5ec6fafd49 100644 (file)
@@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
        u32 *response_msg;
        int ret;
 
-       response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+       response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
                                    GFP_KERNEL);
        if (!response_msg)
                return -ENOMEM;
@@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
        u32 *response_msg;
        int ret;
 
-       response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+       response_msg = devm_kzalloc(priv->dev,
+                                   AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
                                    GFP_KERNEL);
        if (!response_msg)
                return -ENOMEM;
index e00c333105170f5a2a702593feab340ddc4a7d8e..753e7be039e4d9cd830190d75d8b62ca1219ec96 100644 (file)
@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
        if (IS_ERR(chip->gpiod_oe))
                return PTR_ERR(chip->gpiod_oe);
 
-       gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
        spi_set_drvdata(spi, chip);
 
        chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
                goto exit_destroy;
        }
 
+       gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
        ret = gpiochip_add_data(&chip->gpio_chip, chip);
        if (!ret)
                return 0;
index 8b3a0f45b57456b13a04ecf4bf706b8cbf27b8dd..75be4a3ca7f8443f55a68aff5185044bbbdaa367 100644 (file)
@@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
 
        ret = gpiochip_irqchip_init_valid_mask(gc);
        if (ret)
-               goto err_remove_acpi_chip;
+               goto err_free_hogs;
 
        ret = gpiochip_irqchip_init_hw(gc);
        if (ret)
-               goto err_remove_acpi_chip;
+               goto err_remove_irqchip_mask;
 
        ret = gpiochip_add_irqchip(gc, lock_key, request_key);
        if (ret)
@@ -997,13 +997,13 @@ err_remove_irqchip:
        gpiochip_irqchip_remove(gc);
 err_remove_irqchip_mask:
        gpiochip_irqchip_free_valid_mask(gc);
-err_remove_acpi_chip:
+err_free_hogs:
+       gpiochip_free_hogs(gc);
        acpi_gpiochip_remove(gc);
+       gpiochip_remove_pin_ranges(gc);
 err_remove_of_chip:
-       gpiochip_free_hogs(gc);
        of_gpiochip_remove(gc);
 err_free_gpiochip_mask:
-       gpiochip_remove_pin_ranges(gc);
        gpiochip_free_valid_mask(gc);
 err_remove_from_list:
        spin_lock_irqsave(&gpio_lock, flags);
@@ -2042,6 +2042,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
 int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
                            unsigned long config)
 {
+#ifdef CONFIG_PINCTRL
+       if (list_empty(&gc->gpiodev->pin_ranges))
+               return -ENOTSUPP;
+#endif
+
        return pinctrl_gpio_set_config(gc, offset, config);
 }
 EXPORT_SYMBOL_GPL(gpiochip_generic_config);
index 2520db0b776e1bccf213fd541baf6275dbb192eb..c7edba18a6f09c4d3c75af737d94737a0e6f2890 100644 (file)
@@ -199,7 +199,7 @@ config DRM_TTM
 config DRM_TTM_KUNIT_TEST
         tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
         default n
-        depends on DRM && KUNIT && MMU
+        depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
         select DRM_TTM
         select DRM_EXPORT_FOR_TESTS if m
         select DRM_KUNIT_TEST_HELPERS
@@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
         help
           Enables unit tests for TTM, a GPU memory manager subsystem used
           to manage memory buffers. This option is mostly useful for kernel
-          developers.
+          developers. It depends on (UML || COMPILE_TEST) since no other driver
+          which uses TTM can be loaded while running the tests.
 
           If in doubt, say "N".
 
index cc21ed67a33075d4f197f2217b883faacc777f3d..7099ff9cf8c50d7b7ea96149bcef235368fae165 100644 (file)
@@ -1528,6 +1528,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
  */
 void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
 {
+       if (adev->in_runpm)
+               return;
+
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else if (amdgpu_acpi_is_s3_active(adev))
index c64c01e2944a2e4c1f4177355771a1b47cfcc666..1c614451deadd10d5dfb29a591fbeb394505ac91 100644 (file)
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
                return AMD_RESET_METHOD_MODE1;
 }
 
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+       u32 sol_reg;
+
+       sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+       /* Will reset for the following suspend abort cases.
+        * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+        * 2) S3 suspend abort and TOS already launched.
+        */
+       if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+                       !adev->suspend_complete &&
+                       sol_reg)
+               return true;
+
+       return false;
+}
+
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
        /* original raven doesn't have full asic reset */
-       if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
-           (adev->apu_flags & AMD_APU_IS_RAVEN2))
+       /* On the latest Raven, the GPU reset can be performed
+        * successfully. So now, temporarily enable it for the
+        * S3 suspend abort case.
+        */
+       if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+           (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+               !soc15_need_reset_on_resume(adev))
                return 0;
 
        switch (soc15_asic_reset_method(adev)) {
@@ -1298,24 +1321,6 @@ static int soc15_common_suspend(void *handle)
        return soc15_common_hw_fini(adev);
 }
 
-static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
-{
-       u32 sol_reg;
-
-       sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-
-       /* Will reset for the following suspend abort cases.
-        * 1) Only reset limit on APU side, dGPU hasn't checked yet.
-        * 2) S3 suspend abort and TOS already launched.
-        */
-       if (adev->flags & AMD_IS_APU && adev->in_s3 &&
-                       !adev->suspend_complete &&
-                       sol_reg)
-               return true;
-
-       return false;
-}
-
 static int soc15_common_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
index cf875751971feea51531ffd8580ca54fdca5287d..1a9bbb04bd5e2c7fb9d29b5c7f2e1d0cd92d978c 100644 (file)
@@ -1843,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                        DRM_ERROR("amdgpu: fail to register dmub aux callback");
                        goto error;
                }
-               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
-                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
-                       goto error;
-               }
-               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
-                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
-                       goto error;
-               }
-       }
-
-       /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
-        * It is expected that DMUB will resend any pending notifications at this point, for
-        * example HPD from DPIA.
-        */
-       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+                * It is expected that DMUB will resend any pending notifications at this point. Note
+                * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+                * align legacy interface initialization sequence. Connection status will be proactivly
+                * detected once in the amdgpu_dm_initialize_drm_device.
+                */
                dc_enable_dmub_outbox(adev->dm.dc);
 
                /* DPIA trace goes to dmesg logs only if outbox is enabled */
@@ -2287,6 +2278,7 @@ static int dm_sw_fini(void *handle)
 
        if (adev->dm.dmub_srv) {
                dmub_srv_destroy(adev->dm.dmub_srv);
+               kfree(adev->dm.dmub_srv);
                adev->dm.dmub_srv = NULL;
        }
 
@@ -3536,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
 
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+       }
+
        list_for_each_entry(connector,
                        &dev->mode_config.connector_list, head) {
 
@@ -3564,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
                }
-
-               if (adev->dm.hpd_rx_offload_wq)
-                       adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
-                               aconnector;
        }
 }
 
@@ -4561,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
 
+               if (dm->hpd_rx_offload_wq)
+                       dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+                               aconnector;
+
                if (!dc_link_detect_connection_type(link, &new_connection_type))
                        DRM_ERROR("KMS: Failed to detect connector\n");
 
@@ -6534,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
        struct dc_link *dc_link = aconnector->dc_link;
        struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
        struct edid *edid;
+       struct i2c_adapter *ddc;
+
+       if (dc_link && dc_link->aux_mode)
+               ddc = &aconnector->dm_dp_aux.aux.ddc;
+       else
+               ddc = &aconnector->i2c->base;
 
        /*
         * Note: drm_get_edid gets edid in the following order:
@@ -6545,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
         * 2) firmware EDID if set via edid_firmware module parameter
         * 3) regular DDC read.
         */
-       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       edid = drm_get_edid(connector, ddc);
        if (!edid) {
                DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
                return;
@@ -6586,12 +6591,18 @@ static int get_modes(struct drm_connector *connector)
 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
 {
        struct drm_connector *connector = &aconnector->base;
-       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+       struct dc_link *dc_link = aconnector->dc_link;
        struct dc_sink_init_data init_params = {
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
        struct edid *edid;
+       struct i2c_adapter *ddc;
+
+       if (dc_link->aux_mode)
+               ddc = &aconnector->dm_dp_aux.aux.ddc;
+       else
+               ddc = &aconnector->i2c->base;
 
        /*
         * Note: drm_get_edid gets edid in the following order:
@@ -6599,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
         * 2) firmware EDID if set via edid_firmware module parameter
         * 3) regular DDC read.
         */
-       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       edid = drm_get_edid(connector, ddc);
        if (!edid) {
                DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
                return;
@@ -11158,14 +11169,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
                                if (range->flags != 1)
                                        continue;
 
-                               amdgpu_dm_connector->min_vfreq = range->min_vfreq;
-                               amdgpu_dm_connector->max_vfreq = range->max_vfreq;
-                               amdgpu_dm_connector->pixel_clock_mhz =
-                                       range->pixel_clock_mhz * 10;
-
                                connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
                                connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
 
+                               if (edid->revision >= 4) {
+                                       if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+                                               connector->display_info.monitor_range.min_vfreq += 255;
+                                       if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+                                               connector->display_info.monitor_range.max_vfreq += 255;
+                               }
+
+                               amdgpu_dm_connector->min_vfreq =
+                                       connector->display_info.monitor_range.min_vfreq;
+                               amdgpu_dm_connector->max_vfreq =
+                                       connector->display_info.monitor_range.max_vfreq;
+                               amdgpu_dm_connector->pixel_clock_mhz =
+                                       range->pixel_clock_mhz * 10;
+
                                break;
                        }
 
index 85b7f58a7f35a478f551ec097b1613b504ced535..c27063305a1341c677c95e91dd49eb4fca1ea94a 100644 (file)
@@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
        /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
        case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
        case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+       case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
+       case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
                DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
                edid_caps->panel_patch.remove_sink_ext_caps = true;
                break;
@@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
 
        edid_caps->edid_hdmi = connector->display_info.is_hdmi;
 
+       apply_edid_quirks(edid_buf, edid_caps);
+
        sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
        if (sad_count <= 0)
                return result;
@@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
        else
                edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
 
-       apply_edid_quirks(edid_buf, edid_caps);
-
        kfree(sads);
        kfree(sadb);
 
index 2b79a0e5638e1b757ea3d3527add517db139552e..363d522603a21744c02e3e3497a2907862b02fd1 100644 (file)
@@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
                unsigned int count,
                union dmub_rb_cmd *cmd_list)
 {
-       struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+       struct dc_context *dc_ctx;
        struct dmub_srv *dmub;
        enum dmub_status status;
        int i;
@@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
        if (!dc_dmub_srv || !dc_dmub_srv->dmub)
                return false;
 
+       dc_ctx = dc_dmub_srv->ctx;
        dmub = dc_dmub_srv->dmub;
 
        for (i = 0 ; i < count; i++) {
@@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
 
 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
 {
-       struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+       struct dc_context *dc_ctx;
        enum dmub_status status;
 
        if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
        if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
                return true;
 
+       dc_ctx = dc_dmub_srv->ctx;
+
        if (wait) {
                if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
                        do {
index e8570060d007ba5bab0db3b3395aca2b9c487573..5bca67407c5b16b682ed669ef2b6382be7965b1b 100644 (file)
@@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
        dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
        dce_panel_cntl->base.ctx = init_data->ctx;
        dce_panel_cntl->base.inst = init_data->inst;
+       dce_panel_cntl->base.pwrseq_inst = 0;
 }
index ad0df1a72a90ab4ff13b267f1c69392e68703884..9e96a3ace2077cb53bff30f5984a5391a017d239 100644 (file)
@@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
        dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
        dcn301_panel_cntl->base.ctx = init_data->ctx;
        dcn301_panel_cntl->base.inst = init_data->inst;
+       dcn301_panel_cntl->base.pwrseq_inst = 0;
 }
index 03248422d6ffde2d6923fb33185bf8dd12607787..281be20b1a1071576a4ca9037ee105333268801e 100644 (file)
@@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
        struct dcn31_panel_cntl *dcn31_panel_cntl,
        const struct panel_cntl_init_data *init_data)
 {
+       uint8_t pwrseq_inst = 0xF;
+
        dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
        dcn31_panel_cntl->base.ctx = init_data->ctx;
        dcn31_panel_cntl->base.inst = init_data->inst;
-       dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+
+       switch (init_data->eng_id) {
+       case ENGINE_ID_DIGA:
+               pwrseq_inst = 0;
+               break;
+       case ENGINE_ID_DIGB:
+               pwrseq_inst = 1;
+               break;
+       default:
+               DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+               ASSERT(false);
+               break;
+       }
+
+       dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
 }
index 23a608274096f89002e7e5438be18c85d023e442..1ba6933d2b3617aa6d275647d17320dd0755ae69 100644 (file)
@@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
        /* Copy clocks tables entries, if available */
        if (dml2->config.bbox_overrides.clks_table.num_states) {
                p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
                for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
                        p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
                }
@@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
        }
 
        dml2_policy_build_synthetic_soc_states(s, p);
+       if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+               dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+               // Override last out_state with data from last in_state
+               // This will ensure that out_state contains max fclk
+               memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+                               &p->in_states->state_array[p->in_states->num_states - 1],
+                               sizeof(struct soc_state_bounding_box_st));
+       }
 }
 
 void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
index 26307e599614c6e1212c53184ba02849ae6e1dbb..2a58a7687bdb5779db6c639d3cbf2277aaf231ae 100644 (file)
@@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
                        in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
                }
                for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
+                       if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
+                               dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
+                                         __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+                               break;
+                       }
                        dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
                        dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
                        dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
index 5dcbaa2db964aee7de17c2e9306606cac1817b08..e97d964a1791cefb2eb47c91780a41e3682baed0 100644 (file)
@@ -57,7 +57,7 @@ struct panel_cntl_funcs {
 struct panel_cntl_init_data {
        struct dc_context *ctx;
        uint32_t inst;
-       uint32_t pwrseq_inst;
+       uint32_t eng_id;
 };
 
 struct panel_cntl {
index 37d3027c32dcb1007dbb90e209f7f459be81617e..cf22b8f28ba6c65394a536465143d1c2f81bd2b6 100644 (file)
@@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
        }
 }
 
-static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
-{
-       uint8_t pwrseq_inst = 0xF;
-       struct dc_context *dc_ctx = link->dc->ctx;
-
-       DC_LOGGER_INIT(dc_ctx->logger);
-
-       switch (link->eng_id) {
-       case ENGINE_ID_DIGA:
-               pwrseq_inst = 0;
-               break;
-       case ENGINE_ID_DIGB:
-               pwrseq_inst = 1;
-               break;
-       default:
-               DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
-               ASSERT(false);
-               break;
-       }
-
-       return pwrseq_inst;
-}
-
-
 static void link_destruct(struct dc_link *link)
 {
        int i;
@@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
                        link->link_id.id == CONNECTOR_ID_LVDS)) {
                panel_cntl_init_data.ctx = dc_ctx;
                panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
-               panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+               panel_cntl_init_data.eng_id = link->eng_id;
                link->panel_cntl =
                        link->dc->res_pool->funcs->panel_cntl_create(
                                                                &panel_cntl_init_data);
index 087d57850304c45193a7f5de336953c1dec9cbba..39c5e1dfa275a64f32fa358d875efc6d0bd99682 100644 (file)
@@ -2558,6 +2558,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        int err, ret;
+       u32 pwm_mode;
        int value;
 
        if (amdgpu_in_reset(adev))
@@ -2569,13 +2570,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
        if (err)
                return err;
 
+       if (value == 0)
+               pwm_mode = AMD_FAN_CTRL_NONE;
+       else if (value == 1)
+               pwm_mode = AMD_FAN_CTRL_MANUAL;
+       else if (value == 2)
+               pwm_mode = AMD_FAN_CTRL_AUTO;
+       else
+               return -EINVAL;
+
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return ret;
        }
 
-       ret = amdgpu_dpm_set_fan_control_mode(adev, value);
+       ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
index df4f20293c16a368748cd4138c0912906f80acc7..eb4da3666e05d6d145a927258d7ea247425dad93 100644 (file)
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
        return 0;
 }
 
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = si_thermal_enable_alert(adev, false);
+       if (ret)
+               return ret;
+       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(adev, true);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
 static void si_dpm_disable(struct amdgpu_device *adev)
 {
        struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
 
 static int si_dpm_late_init(void *handle)
 {
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->pm.dpm_enabled)
+               return 0;
+
+       ret = si_set_temperature_range(adev);
+       if (ret)
+               return ret;
+#if 0 //TODO ?
+       si_dpm_powergate_uvd(adev, true);
+#endif
        return 0;
 }
 
index 4cd43bbec910e351eb27a79b4c39308d6462d196..bcad42534da46d780423d636953c40993e7001ac 100644 (file)
@@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled) {
+       if (smu->od_enabled)
                od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-               od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-       } else {
+       else
                od_percent_upper = 0;
-               od_percent_lower = 100;
-       }
+
+       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                                        od_percent_upper, od_percent_lower, power_limit);
index 8d1d29ffb0f1c54a781c2508447454f9bb7aa5ee..ed189a3878ebe7199833e495f45417461897a93a 100644 (file)
@@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
                *default_power_limit = power_limit;
 
        if (smu->od_enabled &&
-                   navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+                   navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
                od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-               od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-       } else {
+       else
                od_percent_upper = 0;
-               od_percent_lower = 100;
-       }
+
+       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index 21fc033528fa9d1a57ea2699a2780501e2902b3c..e2ad2b972ab0b3550d7aceb66e632eb372a0ffc5 100644 (file)
@@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled) {
+       if (smu->od_enabled)
                od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
-               od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
-       } else {
+       else
                od_percent_upper = 0;
-               od_percent_lower = 100;
-       }
+
+       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index a9954ffc02c562b91bf1166bb4bf87208152b36a..9b80f18ea6c359f279f050ee9f645b92dd43d057 100644 (file)
@@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled) {
+       if (smu->od_enabled)
                od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
-               od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
-       } else {
+       else
                od_percent_upper = 0;
-               od_percent_lower = 100;
-       }
+
+       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index 0ffdb58af74e654af7ca73acf078415663f41dfe..3dc7b60cb0754d0f62fd3cead74f1553071b8597 100644 (file)
@@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled) {
+       if (smu->od_enabled)
                od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
-               od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
-       } else {
+       else
                od_percent_upper = 0;
-               od_percent_lower = 100;
-       }
+
+       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index 4894f7ee737b41dd0e81503b5cb7f3fc1182a6e6..6dae5ad74ff081c4616304ada3a6af302cc21f87 100644 (file)
@@ -229,8 +229,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
                break;
        case IP_VERSION(14, 0, 0):
-               if ((smu->smc_fw_version < 0x5d3a00))
-                       dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
                smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
                break;
        default:
index 47fdbae4adfc0207f628c8e2e156703b960e5829..9310c4758e38ce9791ba8d61ce61a2face051fe8 100644 (file)
@@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->MpipuclkFrequency;
                break;
        case METRICS_AVERAGE_GFXACTIVITY:
-               *value = metrics->GfxActivity / 100;
+               if ((smu->smc_fw_version > 0x5d4600))
+                       *value = metrics->GfxActivity;
+               else
+                       *value = metrics->GfxActivity / 100;
                break;
        case METRICS_AVERAGE_VCNACTIVITY:
                *value = metrics->VcnActivity / 100;
index bb55f697a1819264e1320f6118d28dd776236f4f..6886db2d9e00c4544ee3d81e29e779f806c8a9b7 100644 (file)
@@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
        ida_free(&drm_aux_hpd_bridge_ida, adev->id);
 
        of_node_put(adev->dev.platform_data);
+       of_node_put(adev->dev.of_node);
 
        kfree(adev);
 }
 
-static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
 {
-       struct auxiliary_device *adev = _adev;
-
-       auxiliary_device_delete(adev);
-       auxiliary_device_uninit(adev);
+       auxiliary_device_uninit(_adev);
 }
 
 /**
- * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
  * @parent: device instance providing this bridge
  * @np: device node pointer corresponding to this bridge instance
  *
@@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
  * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
  * able to send the HPD events.
  *
- * Return: device instance that will handle created bridge or an error code
- * encoded into the pointer.
+ * Return: bridge auxiliary device pointer or an error pointer
  */
-struct device *drm_dp_hpd_bridge_register(struct device *parent,
-                                         struct device_node *np)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
 {
        struct auxiliary_device *adev;
        int ret;
@@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
 
        ret = auxiliary_device_init(adev);
        if (ret) {
+               of_node_put(adev->dev.platform_data);
+               of_node_put(adev->dev.of_node);
                ida_free(&drm_aux_hpd_bridge_ida, adev->id);
                kfree(adev);
                return ERR_PTR(ret);
        }
 
-       ret = auxiliary_device_add(adev);
-       if (ret) {
-               auxiliary_device_uninit(adev);
+       ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
-       ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+       return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+       auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+       int ret;
+
+       ret = auxiliary_device_add(adev);
+       if (ret)
+               return ret;
+
+       return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+       struct auxiliary_device *adev;
+       int ret;
+
+       adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+       if (IS_ERR(adev))
+               return ERR_CAST(adev);
+
+       ret = devm_drm_dp_hpd_bridge_add(parent, adev);
        if (ret)
                return ERR_PTR(ret);
 
index c1a99bf4dffd1d221d9ca9e57d303d415acef00f..5ebdd6f8f36e6bc8d67e99a54bac3856d45ac9eb 100644 (file)
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
                 u64 start, u64 end,
                 unsigned int order)
 {
+       u64 req_size = mm->chunk_size << order;
        struct drm_buddy_block *block;
        struct drm_buddy_block *buddy;
        LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
                if (drm_buddy_block_is_allocated(block))
                        continue;
 
+               if (block_start < start || block_end > end) {
+                       u64 adjusted_start = max(block_start, start);
+                       u64 adjusted_end = min(block_end, end);
+
+                       if (round_down(adjusted_end + 1, req_size) <=
+                           round_up(adjusted_start, req_size))
+                               continue;
+               }
+
                if (contains(start, end, block_start, block_end) &&
                    order == drm_buddy_block_order(block)) {
                        /*
@@ -538,13 +548,13 @@ static int __alloc_range(struct drm_buddy *mm,
                list_add(&block->left->tmp_link, dfs);
        } while (1);
 
-       list_splice_tail(&allocated, blocks);
-
        if (total_allocated < size) {
                err = -ENOSPC;
                goto err_free;
        }
 
+       list_splice_tail(&allocated, blocks);
+
        return 0;
 
 err_undo:
@@ -761,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
                return -EINVAL;
 
        /* Actual range allocation */
-       if (start + size == end)
+       if (start + size == end) {
+               if (!IS_ALIGNED(start | end, min_block_size))
+                       return -EINVAL;
+
                return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+       }
 
        original_size = size;
        original_min_size = min_block_size;
index 3f479483d7d80f21febcda087570bcc6af2fd34c..23b4e9a3361d82e0d5bc6a1daf121a0645011d96 100644 (file)
@@ -760,9 +760,11 @@ static void output_poll_execute(struct work_struct *work)
        changed = dev->mode_config.delayed_event;
        dev->mode_config.delayed_event = false;
 
-       if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
-               drm_kms_helper_disable_hpd(dev);
-               dev->mode_config.poll_running = false;
+       if (!drm_kms_helper_poll) {
+               if (dev->mode_config.poll_running) {
+                       drm_kms_helper_disable_hpd(dev);
+                       dev->mode_config.poll_running = false;
+               }
                goto out;
        }
 
index 84101baeecc6e67d562e15e7d5ded57df39ffdc1..a6c19de462928ed70da033a60b10f08061bd1dc8 100644 (file)
@@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
        uint64_t *points;
        uint32_t signaled_count, i;
 
-       if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+       if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+                    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
                lockdep_assert_none_held_once();
 
        points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
         * fallthough and try a 0 timeout wait!
         */
 
-       if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+       if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+                    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
                for (i = 0; i < count; ++i)
                        drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
        }
@@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
 
        /* This happens inside the syncobj lock */
        fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+       if (!fence)
+               return;
+
        ret = dma_fence_chain_find_seqno(&fence, entry->point);
-       if (ret != 0 || !fence) {
+       if (ret != 0) {
+               /* The given seqno has not been submitted yet. */
                dma_fence_put(fence);
                return;
+       } else if (!fence) {
+               /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+                * to NULL, it implies that the given seqno is signaled and a
+                * later seqno has already been submitted. Assign a stub fence
+                * so that the eventfd still gets signaled below.
+                */
+               fence = dma_fence_get_stub();
        }
 
        list_del_init(&entry->node);
index 47cd6bb04366f34a798d76df35b3bba3be2cd67e..06900ff307b23a411114394e8d5d0f5e7ba4ad89 100644 (file)
@@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
        enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
        struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
 
-       return intel_port_to_phy(i915, dig_port->base.port);
+       /*
+        * FIXME should we care about the (VBT defined) dig_port->aux_ch
+        * relationship or should this be purely defined by the hardware layout?
+        * Currently if the port doesn't appear in the VBT, or if it's declared
+        * as HDMI-only and routed to a combo PHY, the encoder either won't be
+        * present at all or it will not have an aux_ch assigned.
+        */
+       return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
 }
 
 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 
        intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
 
-       if (DISPLAY_VER(dev_priv) < 12)
+       /* FIXME this is a mess */
+       if (phy != PHY_NONE)
                intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
                             0, ICL_LANE_ENABLE_AUX);
 
@@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 
        drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 
-       intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
+       /* FIXME this is a mess */
+       if (phy != PHY_NONE)
+               intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+                            ICL_LANE_ENABLE_AUX, 0);
 
        intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
 
index 3fdd8a5179831288f1e10bc8d9161d8d23a7ba6a..ac7fe6281afe3f52b37c0e75f422045982bb076a 100644 (file)
@@ -609,6 +609,13 @@ struct intel_connector {
         * and active (i.e. dpms ON state). */
        bool (*get_hw_state)(struct intel_connector *);
 
+       /*
+        * Optional hook called during init/resume to sync any state
+        * stored in the connector (eg. DSC state) wrt. the HW state.
+        */
+       void (*sync_state)(struct intel_connector *connector,
+                          const struct intel_crtc_state *crtc_state);
+
        /* Panel info for eDP and LVDS */
        struct intel_panel panel;
 
index ae647d03af25cd48a3151b362c5ed1e752bdba60..94d2a15d8444ad6a9d88029091cf4aafda60f497 100644 (file)
@@ -5699,6 +5699,9 @@ intel_dp_detect(struct drm_connector *connector,
                goto out;
        }
 
+       if (!intel_dp_is_edp(intel_dp))
+               intel_psr_init_dpcd(intel_dp);
+
        intel_dp_detect_dsc_caps(intel_dp, intel_connector);
 
        intel_dp_configure_mst(intel_dp);
@@ -5859,6 +5862,19 @@ intel_dp_connector_unregister(struct drm_connector *connector)
        intel_connector_unregister(connector);
 }
 
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+                                  const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+       if (crtc_state && crtc_state->dsc.compression_enable) {
+               drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+               connector->dp.dsc_decompression_enabled = true;
+       } else {
+               connector->dp.dsc_decompression_enabled = false;
+       }
+}
+
 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 {
        struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
index 05db46b111f216e150760e0dff76581cc18bbcca..375d0677cd8c516c56ca2cf9ba592b4746677304 100644 (file)
@@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
 int intel_dp_min_bpp(enum intel_output_format output_format);
 bool intel_dp_init_connector(struct intel_digital_port *dig_port,
                             struct intel_connector *intel_connector);
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+                                  const struct intel_crtc_state *crtc_state);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
                              int link_rate, int lane_count);
 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
index 3a595cd433d4952078ed20227a1d47f4fe11d458..8538d1ce2fcb854bdf5fcc60086992039312702e 100644 (file)
@@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
          0, 0 },
 };
 
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
-       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
-       if (intel_encoder_is_mst(connector->encoder))
-               return &connector->port->aux;
-       else
-               return &dig_port->dp.aux;
-}
-
 static int
 intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
                              u8 *rx_status)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
-       struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        ssize_t ret;
 
        ret = drm_dp_dpcd_read(aux,
@@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
                            const struct hdcp2_dp_msg_data *hdcp2_msg_data)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        u8 msg_id = hdcp2_msg_data->msg_id;
        int ret, timeout;
        bool msg_ready = false;
@@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_write, len;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-       struct drm_dp_aux *aux;
 
        hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
        if (!hdcp2_msg_data)
@@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
 
        offset = hdcp2_msg_data->offset;
 
-       aux = intel_dp_hdcp_get_aux(connector);
-
        /* No msg_id in DP HDCP2.2 msgs */
        bytes_to_write = size - 1;
        byte++;
@@ -490,7 +481,8 @@ static
 ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
                                     u32 *dev_cnt, u8 *byte)
 {
-       struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        ssize_t ret;
        u8 *rx_info = byte;
 
@@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_dp_aux *aux;
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_recv, len;
@@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
                return -EINVAL;
        offset = hdcp2_msg_data->offset;
 
-       aux = intel_dp_hdcp_get_aux(connector);
-
        ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
        if (ret < 0)
                return ret;
@@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
 
                /* Entire msg read timeout since initiate of msg read */
                if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
-                       if (intel_encoder_is_mst(connector->encoder))
-                               msg_end = ktime_add_ms(ktime_get_raw(),
-                                                      hdcp2_msg_data->msg_read_timeout *
-                                                      connector->port->parent->num_ports);
-                       else
-                               msg_end = ktime_add_ms(ktime_get_raw(),
-                                                      hdcp2_msg_data->msg_read_timeout);
+                       msg_end = ktime_add_ms(ktime_get_raw(),
+                                              hdcp2_msg_data->msg_read_timeout);
                }
 
                ret = drm_dp_dpcd_read(aux, offset,
@@ -651,12 +637,11 @@ static
 int intel_dp_hdcp2_capable(struct intel_connector *connector,
                           bool *capable)
 {
-       struct drm_dp_aux *aux;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        u8 rx_caps[3];
        int ret;
 
-       aux = intel_dp_hdcp_get_aux(connector);
-
        *capable = false;
        ret = drm_dp_dpcd_read(aux,
                               DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
index 8a9432335030346ecf3b7501a4cfb19cd59d5259..a01a59f57ae5525acb65e2725b3b37a9e31a065c 100644 (file)
@@ -1534,6 +1534,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
                return NULL;
 
        intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+       intel_connector->sync_state = intel_dp_connector_sync_state;
        intel_connector->mst_port = intel_dp;
        intel_connector->port = port;
        drm_dp_mst_get_port_malloc(port);
index 94eece7f63be3341fc92807345c0f7b01f862275..caeca3a8442c5d76008525ff56c65356eb6171be 100644 (file)
@@ -318,12 +318,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
                        const struct intel_crtc_state *crtc_state =
                                to_intel_crtc_state(crtc->base.state);
 
-                       if (crtc_state->dsc.compression_enable) {
-                               drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
-                               connector->dp.dsc_decompression_enabled = true;
-                       } else {
-                               connector->dp.dsc_decompression_enabled = false;
-                       }
                        conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
                }
        }
@@ -775,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
 
        drm_connector_list_iter_begin(&i915->drm, &conn_iter);
        for_each_intel_connector_iter(connector, &conn_iter) {
+               struct intel_crtc_state *crtc_state = NULL;
+
                if (connector->get_hw_state(connector)) {
-                       struct intel_crtc_state *crtc_state;
                        struct intel_crtc *crtc;
 
                        connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -802,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
                        connector->base.dpms = DRM_MODE_DPMS_OFF;
                        connector->base.encoder = NULL;
                }
+
+               if (connector->sync_state)
+                       connector->sync_state(connector, crtc_state);
+
                drm_dbg_kms(&i915->drm,
                            "[CONNECTOR:%d:%s] hw state readout: %s\n",
                            connector->base.base.id, connector->base.name,
index 57bbf3e3af92fbb0325d0c41765f7a0f0d0ac806..4faaf4b3fc53baf048cad365636955c2fce0e921 100644 (file)
@@ -2776,9 +2776,6 @@ void intel_psr_init(struct intel_dp *intel_dp)
        if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
                return;
 
-       if (!intel_dp_is_edp(intel_dp))
-               intel_psr_init_dpcd(intel_dp);
-
        /*
         * HSW spec explicitly says PSR is tied to port A.
         * BDW+ platforms have a instance of PSR registers per transcoder but
index acc6b6804105102389dc26c3fefce80444d0adad..2915d7afe5ccc2facdaeaee164e7b9c60796f361 100644 (file)
@@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
        struct intel_sdvo_tv_format format;
        u32 format_map;
 
-       format_map = 1 << conn_state->tv.mode;
+       format_map = 1 << conn_state->tv.legacy_mode;
        memset(&format, 0, sizeof(format));
        memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
 
@@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
         * Read the list of supported input resolutions for the selected TV
         * format.
         */
-       format_map = 1 << conn_state->tv.mode;
+       format_map = 1 << conn_state->tv.legacy_mode;
        memcpy(&tv_res, &format_map,
               min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
 
@@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
                int i;
 
                for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
-                       if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+                       if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
                                *val = i;
 
                                return 0;
@@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
        struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
 
        if (property == intel_sdvo_connector->tv_format) {
-               state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+               state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
 
                if (state->crtc) {
                        struct drm_crtc_state *crtc_state =
@@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                drm_property_add_enum(intel_sdvo_connector->tv_format, i,
                                      tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
-       intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+       intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
        drm_object_attach_property(&intel_sdvo_connector->base.base.base,
                                   intel_sdvo_connector->tv_format, 0);
        return true;
index d4386cb3569e0991bc3c0c78a4415d77a7bc1998..992a725de751a2d1925c23da8763e5ea7dce4714 100644 (file)
@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
 
 static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
 {
-       int format = conn_state->tv.mode;
+       int format = conn_state->tv.legacy_mode;
 
        return &tv_modes[format];
 }
@@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
                        break;
        }
 
-       connector->state->tv.mode = i;
+       connector->state->tv.legacy_mode = i;
 }
 
 static int
@@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
        old_state = drm_atomic_get_old_connector_state(state, connector);
        new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
 
-       if (old_state->tv.mode != new_state->tv.mode ||
+       if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
            old_state->tv.margins.left != new_state->tv.margins.left ||
            old_state->tv.margins.right != new_state->tv.margins.right ||
            old_state->tv.margins.top != new_state->tv.margins.top ||
@@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
        conn_state->tv.margins.right = 46;
        conn_state->tv.margins.bottom = 37;
 
-       conn_state->tv.mode = 0;
+       conn_state->tv.legacy_mode = 0;
 
        /* Create TV properties then attach current values */
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
 
        drm_object_attach_property(&connector->base,
                                   i915->drm.mode_config.legacy_tv_mode_property,
-                                  conn_state->tv.mode);
+                                  conn_state->tv.legacy_mode);
        drm_object_attach_property(&connector->base,
                                   i915->drm.mode_config.tv_left_margin_property,
                                   conn_state->tv.margins.left);
index 1d3ebdf4069b5d0fea98aefdb2b1609f82b9650e..c08b67593565c5827d4555e70b88b083e97172d9 100644 (file)
@@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 {
        GEM_WARN_ON(obj->userptr.page_ref);
 
+       if (!obj->userptr.notifier.mm)
+               return;
+
        mmu_interval_notifier_remove(&obj->userptr.notifier);
        obj->userptr.notifier.mm = NULL;
 }
index 2990dd4d4a0d8a84ad5794815dbb4661610314a2..e14ac0ab1314d1032a707762a1be444a14b3ca39 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright Â© 2021 Intel Corporation
  */
 
+#include <linux/jiffies.h>
+
 //#include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
@@ -12,7 +14,7 @@
 
 #define REDUCED_TIMESLICE      5
 #define REDUCED_PREEMPT                10
-#define WAIT_FOR_RESET_TIME    10000
+#define WAIT_FOR_RESET_TIME_MS 10000
 
 struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
 {
@@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq)
 {
        long ret;
 
-       ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+       ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
        if (ret < 0)
                return ret;
 
index 3f73b211fa8e3e3bc4812180883c9685f8377f19..3407450435e2057dd3973441ba6e31485e69ee6d 100644 (file)
@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
        if (priv->encoders[MESON_ENC_CVBS]) {
                meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
                drm_bridge_remove(&meson_encoder_cvbs->bridge);
-               drm_bridge_remove(meson_encoder_cvbs->next_bridge);
        }
 }
index 3f93c70488cad1829bbe488d8bf8f7b3833859f1..311b91630fbe536cf724223a1fa71e565ba2c778 100644 (file)
@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
        if (priv->encoders[MESON_ENC_DSI]) {
                meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
                drm_bridge_remove(&meson_encoder_dsi->bridge);
-               drm_bridge_remove(meson_encoder_dsi->next_bridge);
        }
 }
index 25ea765586908f14d08715f45ca9def85a6a07f3..c4686568c9ca5d81b4066315681263e0fbd848a2 100644 (file)
@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
        if (priv->encoders[MESON_ENC_HDMI]) {
                meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
                drm_bridge_remove(&meson_encoder_hdmi->bridge);
-               drm_bridge_remove(meson_encoder_hdmi->next_bridge);
        }
 }
index d37d599aec273b41b7ec54eb04b55ee86770d1a5..4c72124ffb5d495bdd24eefaf086f4d9401663ce 100644 (file)
@@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
        .unbind = dp_display_unbind,
 };
 
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+       struct drm_connector *connector;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       connector = dp->dp_display.connector;
+       drm_helper_hpd_irq_event(connector->dev);
+}
+
 static int dp_display_send_hpd_notification(struct dp_display_private *dp,
                                            bool hpd)
 {
-       struct drm_bridge *bridge = dp->dp_display.bridge;
+       if ((hpd && dp->dp_display.link_ready) ||
+                       (!hpd && !dp->dp_display.link_ready)) {
+               drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+                               (hpd ? "on" : "off"));
+               return 0;
+       }
 
        /* reset video pattern flag on disconnect */
        if (!hpd) {
@@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
 
        drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
                        dp->dp_display.connector_type, hpd);
-       drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
+       dp_display_send_hpd_event(&dp->dp_display);
 
        return 0;
 }
index 1e6aaf95ff7c79483f7d8bba1ddce897bb7affcf..ceef470c9fbfcfb08be6abd69627b7e7bc66366d 100644 (file)
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
        help
          Say Y here if you want to enable experimental support for
          Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+       bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+       depends on DRM_NOUVEAU
+       default n
+       help
+         Say Y here if you want to use the GSP codepaths by default on
+         Turing and Ampere GPUs.
index 0d9fc741a719328722f2c1873bb07bf4b120e890..932c9fd0b2d89ce8c3ec04165bbefa0cec8b25ce 100644 (file)
@@ -11,6 +11,7 @@ struct nvkm_client {
        u32 debug;
 
        struct rb_root objroot;
+       spinlock_t obj_lock;
 
        void *data;
        int (*event)(u64 token, void *argv, u32 argc);
index d1bb8151a1df565c44db55432222f09641d7b109..80f74ee0fc78677f8f890e8cc5daf8f363817b34 100644 (file)
@@ -199,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvif_device *device = &drm->client.device;
+       struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
        struct nvkm_gr *gr = nvxx_gr(device);
        struct drm_nouveau_getparam *getparam = data;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -263,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
                getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
                break;
        }
+       case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+               getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+               break;
+       case NOUVEAU_GETPARAM_VRAM_USED: {
+               struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+               getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+               break;
+       }
        default:
                NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
                return -EINVAL;
index 49c2bcbef1299de1f556353423300b345e5cc538..5a887d67dc0e8c71cf1987acb68f48f4bbf05d70 100644 (file)
@@ -764,7 +764,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        if (unlikely(nouveau_cli_uvmm(cli)))
-               return -ENOSYS;
+               return nouveau_abi16_put(abi16, -ENOSYS);
 
        list_for_each_entry(temp, &abi16->channels, head) {
                if (temp->chan->chid == req->channel) {
index ebdeb8eb9e774186707d17508975311c14f3fabf..c55662937ab22caa54cd0d8f3df44b0d3e170197 100644 (file)
@@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
        client->device = device;
        client->debug = nvkm_dbgopt(dbg, "CLIENT");
        client->objroot = RB_ROOT;
+       spin_lock_init(&client->obj_lock);
        client->event = event;
        INIT_LIST_HEAD(&client->umem);
        spin_lock_init(&client->lock);
index 7c554c14e8841da1bb0374f25d9a47512c6f3765..aea3ba72027abfbdf0456f065bce416eac26b348 100644 (file)
@@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
                   const struct nvkm_object_func *func)
 {
        struct nvkm_object *object;
+       unsigned long flags;
 
        if (handle) {
+               spin_lock_irqsave(&client->obj_lock, flags);
                struct rb_node *node = client->objroot.rb_node;
                while (node) {
                        object = rb_entry(node, typeof(*object), node);
@@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
                        else
                        if (handle > object->object)
                                node = node->rb_right;
-                       else
+                       else {
+                               spin_unlock_irqrestore(&client->obj_lock, flags);
                                goto done;
+                       }
                }
+               spin_unlock_irqrestore(&client->obj_lock, flags);
                return ERR_PTR(-ENOENT);
        } else {
                object = &client->object;
@@ -57,30 +62,39 @@ done:
 void
 nvkm_object_remove(struct nvkm_object *object)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&object->client->obj_lock, flags);
        if (!RB_EMPTY_NODE(&object->node))
                rb_erase(&object->node, &object->client->objroot);
+       spin_unlock_irqrestore(&object->client->obj_lock, flags);
 }
 
 bool
 nvkm_object_insert(struct nvkm_object *object)
 {
-       struct rb_node **ptr = &object->client->objroot.rb_node;
+       struct rb_node **ptr;
        struct rb_node *parent = NULL;
+       unsigned long flags;
 
+       spin_lock_irqsave(&object->client->obj_lock, flags);
+       ptr = &object->client->objroot.rb_node;
        while (*ptr) {
                struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
                parent = *ptr;
-               if (object->object < this->object)
+               if (object->object < this->object) {
                        ptr = &parent->rb_left;
-               else
-               if (object->object > this->object)
+               } else if (object->object > this->object) {
                        ptr = &parent->rb_right;
-               else
+               } else {
+                       spin_unlock_irqrestore(&object->client->obj_lock, flags);
                        return false;
+               }
        }
 
        rb_link_node(&object->node, parent, ptr);
        rb_insert_color(&object->node, &object->client->objroot);
+       spin_unlock_irqrestore(&object->client->obj_lock, flags);
        return true;
 }
 
index 4135690326f44789535e8cb375ccfe1ee5fa68c3..3a30bea30e366f47ecda0bbabac5441aed285565 100644 (file)
@@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
        rm->flush = r535_bar_flush;
 
        ret = gf100_bar_new_(rm, device, type, inst, &bar);
-       *pbar = bar;
        if (ret) {
-               if (!bar)
-                       kfree(rm);
+               kfree(rm);
                return ret;
        }
+       *pbar = bar;
 
        bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
        if (!bar->flushBAR2PhysMode)
index 19188683c8fca90a7656b53ab15a8ee58d8575e0..8c2bf1c16f2a9568a8d434838d0c7691d9d70ff7 100644 (file)
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
        return (void *)fw;
 }
 
+static void
+shadow_fw_release(void *fw)
+{
+       release_firmware(fw);
+}
+
 static const struct nvbios_source
 shadow_fw = {
        .name = "firmware",
        .init = shadow_fw_init,
-       .fini = (void(*)(void *))release_firmware,
+       .fini = shadow_fw_release,
        .read = shadow_fw_read,
        .rw = false,
 };
index a41735ab60683f02fde33f0107a2edae89155a6e..a73a5b58979045b07468c1443940f87e1b151f67 100644 (file)
@@ -1054,8 +1054,6 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
        /* Release the DMA buffers that were needed only for boot and init */
        nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
        nvkm_gsp_mem_dtor(gsp, &gsp->libos);
-       nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
-       nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
 
        return ret;
 }
@@ -2163,6 +2161,8 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
 
        r535_gsp_dtor_fws(gsp);
 
+       nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+       nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
        nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
        nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
        nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
@@ -2312,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
 {
        struct nvkm_subdev *subdev = &gsp->subdev;
        int ret;
+       bool enable_gsp = fwif->enable;
 
-       if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+       enable_gsp = true;
+#endif
+       if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
                return -EINVAL;
 
        if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
index c4c0f08e92026d80824a6932a696144da65e0311..4945a1e787eb3efc8bb9617bef1a75fba13bb656 100644 (file)
@@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
 };
 
 static const struct drm_display_mode starry_himax83102_j02_default_mode = {
-       .clock = 162850,
+       .clock = 162680,
        .hdisplay = 1200,
-       .hsync_start = 1200 + 50,
-       .hsync_end = 1200 + 50 + 20,
-       .htotal = 1200 + 50 + 20 + 50,
+       .hsync_start = 1200 + 60,
+       .hsync_end = 1200 + 60 + 20,
+       .htotal = 1200 + 60 + 20 + 40,
        .vdisplay = 1920,
        .vsync_start = 1920 + 116,
        .vsync_end = 1920 + 116 + 8,
index a73cff7a307082d97ce78e43deaff5547cf55964..03d1c76aec2d3f7aca6a52acbb1a42455b37faa8 100644 (file)
@@ -1243,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
 
        drm_mode_config_reset(drm);
 
-       err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
-       if (err < 0)
-               goto hub;
+       /*
+        * Only take over from a potential firmware framebuffer if any CRTCs
+        * have been registered. This must not be a fatal error because there
+        * are other accelerators that are exposed via this driver.
+        *
+        * Another case where this happens is on Tegra234 where the display
+        * hardware is no longer part of the host1x complex, so this driver
+        * will not expose any modesetting features.
+        */
+       if (drm->mode_config.num_crtc > 0) {
+               err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+               if (err < 0)
+                       goto hub;
+       } else {
+               /*
+                * Indicate to userspace that this doesn't expose any display
+                * capabilities.
+                */
+               drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+       }
 
        err = drm_dev_register(drm, 0);
        if (err < 0)
index fee6bec757d1ac2e37918b5b16e97c4331cc0401..484360c7e1f65ceb72ce3938dd57f365ca84b35b 100644 (file)
 
 #include "../lib/drm_random.h"
 
+static unsigned int random_seed;
+
 static inline u64 get_size(int order, u64 chunk_size)
 {
        return (1 << order) * chunk_size;
 }
 
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
+{
+       u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+       DRM_RND_STATE(prng, random_seed);
+       unsigned int i, count, *order;
+       struct drm_buddy mm;
+       LIST_HEAD(allocated);
+
+       bias_size = SZ_1M;
+       ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+       ps = max(SZ_4K, ps);
+       mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
+
+       kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
+
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+                              "buddy_init failed\n");
+
+       count = mm_size / bias_size;
+       order = drm_random_order(count, &prng);
+       KUNIT_EXPECT_TRUE(test, order);
+
+       /*
+        * Idea is to split the address space into uniform bias ranges, and then
+        * in some random order allocate within each bias, using various
+        * patterns within. This should detect if allocations leak out from a
+        * given bias, for example.
+        */
+
+       for (i = 0; i < count; i++) {
+               LIST_HEAD(tmp);
+               u32 size;
+
+               bias_start = order[i] * bias_size;
+               bias_end = bias_start + bias_size;
+               bias_rem = bias_size;
+
+               /* internal round_up too big */
+               KUNIT_ASSERT_TRUE_MSG(test,
+                                     drm_buddy_alloc_blocks(&mm, bias_start,
+                                                            bias_end, bias_size + ps, bias_size,
+                                                            &allocated,
+                                                            DRM_BUDDY_RANGE_ALLOCATION),
+                                     "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+                                     bias_start, bias_end, bias_size, bias_size);
+
+               /* size too big */
+               KUNIT_ASSERT_TRUE_MSG(test,
+                                     drm_buddy_alloc_blocks(&mm, bias_start,
+                                                            bias_end, bias_size + ps, ps,
+                                                            &allocated,
+                                                            DRM_BUDDY_RANGE_ALLOCATION),
+                                     "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+                                     bias_start, bias_end, bias_size + ps, ps);
+
+               /* bias range too small for size */
+               KUNIT_ASSERT_TRUE_MSG(test,
+                                     drm_buddy_alloc_blocks(&mm, bias_start + ps,
+                                                            bias_end, bias_size, ps,
+                                                            &allocated,
+                                                            DRM_BUDDY_RANGE_ALLOCATION),
+                                     "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+                                     bias_start + ps, bias_end, bias_size, ps);
+
+               /* bias misaligned */
+               KUNIT_ASSERT_TRUE_MSG(test,
+                                     drm_buddy_alloc_blocks(&mm, bias_start + ps,
+                                                            bias_end - ps,
+                                                            bias_size >> 1, bias_size >> 1,
+                                                            &allocated,
+                                                            DRM_BUDDY_RANGE_ALLOCATION),
+                                     "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+                                     bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+               /* single big page */
+               KUNIT_ASSERT_FALSE_MSG(test,
+                                      drm_buddy_alloc_blocks(&mm, bias_start,
+                                                             bias_end, bias_size, bias_size,
+                                                             &tmp,
+                                                             DRM_BUDDY_RANGE_ALLOCATION),
+                                      "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+                                      bias_start, bias_end, bias_size, bias_size);
+               drm_buddy_free_list(&mm, &tmp);
+
+               /* single page with internal round_up */
+               KUNIT_ASSERT_FALSE_MSG(test,
+                                      drm_buddy_alloc_blocks(&mm, bias_start,
+                                                             bias_end, ps, bias_size,
+                                                             &tmp,
+                                                             DRM_BUDDY_RANGE_ALLOCATION),
+                                      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+                                      bias_start, bias_end, ps, bias_size);
+               drm_buddy_free_list(&mm, &tmp);
+
+               /* random size within */
+               size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+               if (size)
+                       KUNIT_ASSERT_FALSE_MSG(test,
+                                              drm_buddy_alloc_blocks(&mm, bias_start,
+                                                                     bias_end, size, ps,
+                                                                     &tmp,
+                                                                     DRM_BUDDY_RANGE_ALLOCATION),
+                                              "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+                                              bias_start, bias_end, size, ps);
+
+               bias_rem -= size;
+               /* too big for current avail */
+               KUNIT_ASSERT_TRUE_MSG(test,
+                                     drm_buddy_alloc_blocks(&mm, bias_start,
+                                                            bias_end, bias_rem + ps, ps,
+                                                            &allocated,
+                                                            DRM_BUDDY_RANGE_ALLOCATION),
+                                     "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+                                     bias_start, bias_end, bias_rem + ps, ps);
+
+               if (bias_rem) {
+                       /* random fill of the remainder */
+                       size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+                       size = max(size, ps);
+
+                       KUNIT_ASSERT_FALSE_MSG(test,
+                                              drm_buddy_alloc_blocks(&mm, bias_start,
+                                                                     bias_end, size, ps,
+                                                                     &allocated,
+                                                                     DRM_BUDDY_RANGE_ALLOCATION),
+                                              "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+                                              bias_start, bias_end, size, ps);
+                       /*
+                        * Intentionally allow some space to be left
+                        * unallocated, and ideally not always on the bias
+                        * boundaries.
+                        */
+                       drm_buddy_free_list(&mm, &tmp);
+               } else {
+                       list_splice_tail(&tmp, &allocated);
+               }
+       }
+
+       kfree(order);
+       drm_buddy_free_list(&mm, &allocated);
+       drm_buddy_fini(&mm);
+
+       /*
+        * Something more free-form. Idea is to pick a random starting bias
+        * range within the address space and then start filling it up. Also
+        * randomly grow the bias range in both directions as we go along. This
+        * should give us bias start/end which is not always uniform like above,
+        * and in some cases will require the allocator to jump over already
+        * allocated nodes in the middle of the address space.
+        */
+
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+                              "buddy_init failed\n");
+
+       bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+       bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+       bias_end = max(bias_end, bias_start + ps);
+       bias_rem = bias_end - bias_start;
+
+       do {
+               u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+               KUNIT_ASSERT_FALSE_MSG(test,
+                                      drm_buddy_alloc_blocks(&mm, bias_start,
+                                                             bias_end, size, ps,
+                                                             &allocated,
+                                                             DRM_BUDDY_RANGE_ALLOCATION),
+                                      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+                                      bias_start, bias_end, size, ps);
+               bias_rem -= size;
+
+               /*
+                * Try to randomly grow the bias range in both directions, or
+                * only one, or perhaps don't grow at all.
+                */
+               do {
+                       u32 old_bias_start = bias_start;
+                       u32 old_bias_end = bias_end;
+
+                       if (bias_start)
+                               bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+                       if (bias_end != mm_size)
+                               bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+                       bias_rem += old_bias_start - bias_start;
+                       bias_rem += bias_end - old_bias_end;
+               } while (!bias_rem && (bias_start || bias_end != mm_size));
+       } while (bias_rem);
+
+       KUNIT_ASSERT_EQ(test, bias_start, 0);
+       KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+       KUNIT_ASSERT_TRUE_MSG(test,
+                             drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+                                                    ps, ps,
+                                                    &allocated,
+                                                    DRM_BUDDY_RANGE_ALLOCATION),
+                             "buddy_alloc passed with bias(%x-%x), size=%u\n",
+                             bias_start, bias_end, ps);
+
+       drm_buddy_free_list(&mm, &allocated);
+       drm_buddy_fini(&mm);
+}
+
 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
 {
-       u64 mm_size, ps = SZ_4K, i, n_pages, total;
+       const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+       unsigned long i, n_pages, total;
        struct drm_buddy_block *block;
        struct drm_buddy mm;
        LIST_HEAD(left);
@@ -29,8 +235,6 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
        LIST_HEAD(right);
        LIST_HEAD(allocated);
 
-       mm_size = 16 * 3 * SZ_4K;
-
        KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
 
        /*
@@ -56,30 +260,30 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
                KUNIT_ASSERT_FALSE_MSG(test,
                                       drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                              ps, ps, list, 0),
-                                      "buddy_alloc hit an error size=%d\n",
+                                      "buddy_alloc hit an error size=%u\n",
                                       ps);
        } while (++i < n_pages);
 
        KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                           3 * ps, ps, &allocated,
                                                           DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc didn't error size=%d\n", 3 * ps);
+                              "buddy_alloc didn't error size=%u\n", 3 * ps);
 
        drm_buddy_free_list(&mm, &middle);
        KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                           3 * ps, ps, &allocated,
                                                           DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc didn't error size=%llu\n", 3 * ps);
+                              "buddy_alloc didn't error size=%u\n", 3 * ps);
        KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                           2 * ps, ps, &allocated,
                                                           DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc didn't error size=%llu\n", 2 * ps);
+                              "buddy_alloc didn't error size=%u\n", 2 * ps);
 
        drm_buddy_free_list(&mm, &right);
        KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                           3 * ps, ps, &allocated,
                                                           DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc didn't error size=%llu\n", 3 * ps);
+                              "buddy_alloc didn't error size=%u\n", 3 * ps);
        /*
         * At this point we should have enough contiguous space for 2 blocks,
         * however they are never buddies (since we freed middle and right) so
@@ -88,13 +292,13 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
        KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                            2 * ps, ps, &allocated,
                                                            DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc hit an error size=%d\n", 2 * ps);
+                              "buddy_alloc hit an error size=%u\n", 2 * ps);
 
        drm_buddy_free_list(&mm, &left);
        KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
                                                            3 * ps, ps, &allocated,
                                                            DRM_BUDDY_CONTIGUOUS_ALLOCATION),
-                              "buddy_alloc hit an error size=%d\n", 3 * ps);
+                              "buddy_alloc hit an error size=%u\n", 3 * ps);
 
        total = 0;
        list_for_each_entry(block, &allocated, link)
@@ -363,17 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
        drm_buddy_fini(&mm);
 }
 
+static int drm_buddy_suite_init(struct kunit_suite *suite)
+{
+       while (!random_seed)
+               random_seed = get_random_u32();
+
+       kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+                  random_seed);
+
+       return 0;
+}
+
 static struct kunit_case drm_buddy_tests[] = {
        KUNIT_CASE(drm_test_buddy_alloc_limit),
        KUNIT_CASE(drm_test_buddy_alloc_optimistic),
        KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
        KUNIT_CASE(drm_test_buddy_alloc_pathological),
        KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+       KUNIT_CASE(drm_test_buddy_alloc_range_bias),
        {}
 };
 
 static struct kunit_suite drm_buddy_test_suite = {
        .name = "drm_buddy",
+       .suite_init = drm_buddy_suite_init,
        .test_cases = drm_buddy_tests,
 };
 
index b62f420a9f969d61e09e1cde82f2e49b9415d8aa..112438d965ffbefd4fa2cce5f246cc03a63759f9 100644 (file)
@@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
                                enum ttm_caching caching,
                                pgoff_t start_page, pgoff_t end_page)
 {
-       struct page **pages = tt->pages;
+       struct page **pages = &tt->pages[start_page];
        unsigned int order;
        pgoff_t i, nr;
 
index ef56bd517b28c2604b1ed8c5f494a839217d25da..421b819fd4ba9a182d1dcbb7b364ad9a144477cb 100644 (file)
@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
 
 MODULE_AUTHOR("Intel Corporation");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
 MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
index 0b0e262e2166d69da1063915fa4c6eeedfd38bd6..4d3b80ec906d0a6f44793df496ef776a90d84596 100644 (file)
 #include "xe_ttm_stolen_mgr.h"
 #include "xe_vm.h"
 
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES]  = {
+       [XE_PL_SYSTEM] = "system",
+       [XE_PL_TT] = "gtt",
+       [XE_PL_VRAM0] = "vram0",
+       [XE_PL_VRAM1] = "vram1",
+       [XE_PL_STOLEN] = "stolen"
+};
+
 static const struct ttm_place sys_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
@@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                migrate = xe->tiles[0].migrate;
 
        xe_assert(xe, migrate);
-
-       trace_xe_bo_move(bo);
+       trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
        xe_device_mem_access_get(xe);
 
        if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
index 9b1279aca1272cd69eab6d1121ed651b83210166..8be42ac6cd07023c520988cfff2cf3599de4859f 100644 (file)
@@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
 int xe_bo_restore_pinned(struct xe_bo *bo);
 
 extern struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
 
 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
index 1f0b4b9ce84f585ea599ccaf7f4641c3d139121f..5176c27e4b6a4c59739f5e456f79ca7d8a77ce94 100644 (file)
@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
-                                              struct xe_file *xef);
-
 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
        mutex_unlock(&xef->exec_queue.lock);
        xa_destroy(&xef->exec_queue.xa);
        mutex_destroy(&xef->exec_queue.lock);
-       device_kill_persistent_exec_queues(xe, xef);
-
        mutex_lock(&xef->vm.lock);
        xa_for_each(&xef->vm.xa, idx, vm)
                xe_vm_close_and_put(vm);
@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
                        xa_erase(&xe->usm.asid_to_vm, asid);
        }
 
-       drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
-       INIT_LIST_HEAD(&xe->persistent_engines.list);
-
        spin_lock_init(&xe->pinned.lock);
        INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
        INIT_LIST_HEAD(&xe->pinned.external_vram);
@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
 {
 }
 
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
-{
-       mutex_lock(&xe->persistent_engines.lock);
-       list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
-       mutex_unlock(&xe->persistent_engines.lock);
-}
-
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
-                                            struct xe_exec_queue *q)
-{
-       mutex_lock(&xe->persistent_engines.lock);
-       if (!list_empty(&q->persistent.link))
-               list_del(&q->persistent.link);
-       mutex_unlock(&xe->persistent_engines.lock);
-}
-
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
-                                              struct xe_file *xef)
-{
-       struct xe_exec_queue *q, *next;
-
-       mutex_lock(&xe->persistent_engines.lock);
-       list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
-                                persistent.link)
-               if (q->persistent.xef == xef) {
-                       xe_exec_queue_kill(q);
-                       list_del_init(&q->persistent.link);
-               }
-       mutex_unlock(&xe->persistent_engines.lock);
-}
-
 void xe_device_wmb(struct xe_device *xe)
 {
        struct xe_gt *gt = xe_root_mmio_gt(xe);
index 3da83b2332063882afcaffb3f204410fa848de9d..08d8b72c77319a74bc34562c92ec0aab0195be42 100644 (file)
@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
 void xe_device_remove(struct xe_device *xe);
 void xe_device_shutdown(struct xe_device *xe);
 
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
-                                            struct xe_exec_queue *q);
-
 void xe_device_wmb(struct xe_device *xe);
 
 static inline struct xe_file *to_xe_file(const struct drm_file *file)
index 5dc9127a20293e1ebb56c3684e2fdb7e6f425b43..e8491979a6f21810cf4c480af08e9b2b6abfd4ee 100644 (file)
@@ -341,14 +341,6 @@ struct xe_device {
                struct mutex lock;
        } usm;
 
-       /** @persistent_engines: engines that are closed but still running */
-       struct {
-               /** @lock: protects persistent engines */
-               struct mutex lock;
-               /** @list: list of persistent engines */
-               struct list_head list;
-       } persistent_engines;
-
        /** @pinned: pinned BO state */
        struct {
                /** @lock: protected pinned BO list state */
index 82d1305e831f298f013338e4f7ee9e6e2ea67168..6040e4d22b2809c10385fadfbd6f4d8b6fdd0b28 100644 (file)
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
 
 static void show_meminfo(struct drm_printer *p, struct drm_file *file)
 {
-       static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES]  = {
-               [XE_PL_SYSTEM] = "system",
-               [XE_PL_TT] = "gtt",
-               [XE_PL_VRAM0] = "vram0",
-               [XE_PL_VRAM1] = "vram1",
-               [4 ... 6] = NULL,
-               [XE_PL_STOLEN] = "stolen"
-       };
        struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
        struct xe_file *xef = file->driver_priv;
        struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
        spin_unlock(&client->bos_lock);
 
        for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
-               if (!mem_type_to_name[mem_type])
+               if (!xe_mem_type_to_name[mem_type])
                        continue;
 
                man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
                                               DRM_GEM_OBJECT_RESIDENT |
                                               (mem_type != XE_PL_SYSTEM ? 0 :
                                               DRM_GEM_OBJECT_PURGEABLE),
-                                              mem_type_to_name[mem_type]);
+                                              xe_mem_type_to_name[mem_type]);
                }
        }
 }
index 254b1d3af4cb56888700f82b2a6b8fa3436e1a2a..49223026c89fd5e3626be84a9687774d29b6bcb2 100644 (file)
@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
        q->fence_irq = &gt->fence_irq[hwe->class];
        q->ring_ops = gt->ring_ops[hwe->class];
        q->ops = gt->exec_queue_ops;
-       INIT_LIST_HEAD(&q->persistent.link);
        INIT_LIST_HEAD(&q->compute.link);
        INIT_LIST_HEAD(&q->multi_gt_link);
 
@@ -310,102 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
        return q->ops->set_timeslice(q, value);
 }
 
-static int exec_queue_set_preemption_timeout(struct xe_device *xe,
-                                            struct xe_exec_queue *q, u64 value,
-                                            bool create)
-{
-       u32 min = 0, max = 0;
-
-       xe_exec_queue_get_prop_minmax(q->hwe->eclass,
-                                     XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
-
-       if (xe_exec_queue_enforce_schedule_limit() &&
-           !xe_hw_engine_timeout_in_range(value, min, max))
-               return -EINVAL;
-
-       return q->ops->set_preempt_timeout(q, value);
-}
-
-static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
-                                     u64 value, bool create)
-{
-       if (XE_IOCTL_DBG(xe, !create))
-               return -EINVAL;
-
-       if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
-               return -EINVAL;
-
-       if (value)
-               q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
-       else
-               q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
-
-       return 0;
-}
-
-static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
-                                     u64 value, bool create)
-{
-       u32 min = 0, max = 0;
-
-       if (XE_IOCTL_DBG(xe, !create))
-               return -EINVAL;
-
-       xe_exec_queue_get_prop_minmax(q->hwe->eclass,
-                                     XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
-
-       if (xe_exec_queue_enforce_schedule_limit() &&
-           !xe_hw_engine_timeout_in_range(value, min, max))
-               return -EINVAL;
-
-       return q->ops->set_job_timeout(q, value);
-}
-
-static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
-                                     u64 value, bool create)
-{
-       if (XE_IOCTL_DBG(xe, !create))
-               return -EINVAL;
-
-       if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
-               return -EINVAL;
-
-       q->usm.acc_trigger = value;
-
-       return 0;
-}
-
-static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
-                                    u64 value, bool create)
-{
-       if (XE_IOCTL_DBG(xe, !create))
-               return -EINVAL;
-
-       if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
-               return -EINVAL;
-
-       q->usm.acc_notify = value;
-
-       return 0;
-}
-
-static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
-                                         u64 value, bool create)
-{
-       if (XE_IOCTL_DBG(xe, !create))
-               return -EINVAL;
-
-       if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
-               return -EINVAL;
-
-       if (value > DRM_XE_ACC_GRANULARITY_64M)
-               return -EINVAL;
-
-       q->usm.acc_granularity = value;
-
-       return 0;
-}
-
 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
                                             struct xe_exec_queue *q,
                                             u64 value, bool create);
@@ -413,12 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
        [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
        [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
-       [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
 };
 
 static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -437,10 +334,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
 
        if (XE_IOCTL_DBG(xe, ext.property >=
                         ARRAY_SIZE(exec_queue_set_property_funcs)) ||
-           XE_IOCTL_DBG(xe, ext.pad))
+           XE_IOCTL_DBG(xe, ext.pad) ||
+           XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+                        ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
                return -EINVAL;
 
        idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+       if (!exec_queue_set_property_funcs[idx])
+               return -EINVAL;
+
        return exec_queue_set_property_funcs[idx](xe, q, ext.value,  create);
 }
 
@@ -704,9 +606,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                }
 
                q = xe_exec_queue_create(xe, vm, logical_mask,
-                                        args->width, hwe,
-                                        xe_vm_in_lr_mode(vm) ? 0 :
-                                        EXEC_QUEUE_FLAG_PERSISTENT);
+                                        args->width, hwe, 0);
                up_read(&vm->lock);
                xe_vm_put(vm);
                if (IS_ERR(q))
@@ -728,8 +628,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                        goto kill_exec_queue;
        }
 
-       q->persistent.xef = xef;
-
        mutex_lock(&xef->exec_queue.lock);
        err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
        mutex_unlock(&xef->exec_queue.lock);
@@ -872,10 +770,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
        if (XE_IOCTL_DBG(xe, !q))
                return -ENOENT;
 
-       if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
-               xe_exec_queue_kill(q);
-       else
-               xe_device_add_persistent_exec_queues(xe, q);
+       xe_exec_queue_kill(q);
 
        trace_xe_exec_queue_close(q);
        xe_exec_queue_put(q);
index 8d4b7feb8c306b8a406a46f74c5cad2a430bdef3..36f4901d8d7ee917215d745da900ea49b7616a78 100644 (file)
@@ -105,16 +105,6 @@ struct xe_exec_queue {
                struct xe_guc_exec_queue *guc;
        };
 
-       /**
-        * @persistent: persistent exec queue state
-        */
-       struct {
-               /** @xef: file which this exec queue belongs to */
-               struct xe_file *xef;
-               /** @link: link in list of persistent exec queues */
-               struct list_head link;
-       } persistent;
-
        union {
                /**
                 * @parallel: parallel submission state
@@ -160,16 +150,6 @@ struct xe_exec_queue {
                spinlock_t lock;
        } compute;
 
-       /** @usm: unified shared memory state */
-       struct {
-               /** @acc_trigger: access counter trigger */
-               u32 acc_trigger;
-               /** @acc_notify: access counter notify */
-               u32 acc_notify;
-               /** @acc_granularity: access counter granularity */
-               u32 acc_granularity;
-       } usm;
-
        /** @ops: submission backend exec queue operations */
        const struct xe_exec_queue_ops *ops;
 
index 96b5224eb4787d4c7abd2b65b56d0559724bd2c8..acb4d9f38fd738dd5a0e66607cb1bbdbe91311c2 100644 (file)
@@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
 static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
 {
        struct xe_execlist_port *port = exl->port;
-       enum xe_exec_queue_priority priority = exl->active_priority;
+       enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
 
        XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
        XE_WARN_ON(priority < 0);
@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
                list_del(&exl->active_link);
        spin_unlock_irqrestore(&exl->port->lock, flags);
 
-       if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
-               xe_device_remove_persistent_exec_queues(xe, q);
        drm_sched_entity_fini(&exl->entity);
        drm_sched_fini(&exl->sched);
        kfree(exl);
index 9358f733688969391e68f22a2658b08c993d296a..9fcae65b64699eadb80a82b06386588a8af07f86 100644 (file)
@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
        }
 
        if (xe_gt_is_media_type(gt)) {
-               sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
+               sprintf(gtidle->name, "gt%d-mc", gt->info.id);
                gtidle->idle_residency = xe_guc_pc_mc6_residency;
        } else {
-               sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
+               sprintf(gtidle->name, "gt%d-rc", gt->info.id);
                gtidle->idle_residency = xe_guc_pc_rc6_residency;
        }
 
index 7eef23a00d77ee679b011d8e4a0dc2b3ed1bb360..f4c485289dbe4d606e9022c5b58eec8e8123fdca 100644 (file)
@@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
 
        xe_gt_assert(gt, vma);
 
+       /* Execlists not supported */
+       if (gt_to_xe(gt)->info.force_execlist) {
+               if (fence)
+                       __invalidation_fence_signal(fence);
+
+               return 0;
+       }
+
        action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
        action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
        if (!xe->info.has_range_tlb_invalidation) {
@@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
        struct drm_printer p = drm_err_printer(__func__);
        int ret;
 
+       /* Execlists not supported */
+       if (gt_to_xe(gt)->info.force_execlist)
+               return 0;
+
        /*
         * XXX: See above, this algorithm only works if seqno are always in
         * order
index 54ffcfcdd41f9ce3c590f5814fcbe3d3535946ac..f22ae717b0b2d3d8ff938d83f9ea954b4d5746e4 100644 (file)
@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
 
        if (xe_exec_queue_is_lr(q))
                cancel_work_sync(&ge->lr_tdr);
-       if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
-               xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
        release_guc_id(guc, q);
        xe_sched_entity_fini(&ge->entity);
        xe_sched_fini(&ge->sched);
index 0ec5ad2539f1be6098aa248876a2816f65b91f38..b38319d2801e008f14fa4b5089cd1b9dc204f547 100644 (file)
@@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
 
 #define PVC_CTX_ASID           (0x2e + 1)
 #define PVC_CTX_ACC_CTR_THOLD  (0x2a + 1)
-#define ACC_GRANULARITY_S       20
-#define ACC_NOTIFY_S            16
 
 int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
                struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@@ -754,13 +752,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
        xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
                             RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
        if (xe->info.has_asid && vm)
-               xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
-                                    (q->usm.acc_granularity <<
-                                     ACC_GRANULARITY_S) | vm->usm.asid);
-       if (xe->info.has_usm && vm)
-               xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
-                                    (q->usm.acc_notify << ACC_NOTIFY_S) |
-                                    q->usm.acc_trigger);
+               xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
 
        lrc->desc = LRC_VALID;
        lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
index 5f6b53ea5528b2c904ce0c4ee30e39c4a16139b7..02f7808f28cabd5533e634b41d1780769bdcbb10 100644 (file)
@@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
 
        pci_bus_for_each_resource(root, root_res, i) {
                if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
-                   root_res->start > 0x100000000ull)
+                   (u64)root_res->start > 0x100000000ul)
                        break;
        }
 
index ac19bfa3f798c08e48511af0f18e093f29f88eab..6653c045f3c927f21e9d73dacb591ad363e01c47 100644 (file)
@@ -499,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
                 * this device *requires* 64K PTE size for VRAM, fail.
                 */
                if (level == 0 && !xe_parent->is_compact) {
-                       if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
+                       if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+                               xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
                                pte |= XE_PTE_PS64;
-                       else if (XE_WARN_ON(xe_walk->needs_64K))
+                       } else if (XE_WARN_ON(xe_walk->needs_64K)) {
                                return -EINVAL;
+                       }
                }
 
                ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@@ -545,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
                *child = &xe_child->base;
 
                /*
-                * Prefer the compact pagetable layout for L0 if possible.
+                * Prefer the compact pagetable layout for L0 if possible. Only
+                * possible if VMA covers entire 2MB region as compact 64k and
+                * 4k pages cannot be mixed within a 2MB region.
                 * TODO: Suballocate the pt bo to avoid wasting a lot of
                 * memory.
                 */
                if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
                    covers && xe_pt_scan_64K(addr, next, xe_walk)) {
                        walk->shifts = xe_compact_pt_shifts;
+                       xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
                        flags |= XE_PDE_64K;
                        xe_child->is_compact = true;
                }
index aab92bee1d7cf2ff52ec07befe0dcc220325a649..02c9577fe418516bcb891174b9599b6c0b2903bf 100644 (file)
@@ -19,7 +19,7 @@
 #include "xe_macros.h"
 #include "xe_sched_job_types.h"
 
-struct user_fence {
+struct xe_user_fence {
        struct xe_device *xe;
        struct kref refcount;
        struct dma_fence_cb cb;
@@ -27,31 +27,32 @@ struct user_fence {
        struct mm_struct *mm;
        u64 __user *addr;
        u64 value;
+       int signalled;
 };
 
 static void user_fence_destroy(struct kref *kref)
 {
-       struct user_fence *ufence = container_of(kref, struct user_fence,
+       struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
                                                 refcount);
 
        mmdrop(ufence->mm);
        kfree(ufence);
 }
 
-static void user_fence_get(struct user_fence *ufence)
+static void user_fence_get(struct xe_user_fence *ufence)
 {
        kref_get(&ufence->refcount);
 }
 
-static void user_fence_put(struct user_fence *ufence)
+static void user_fence_put(struct xe_user_fence *ufence)
 {
        kref_put(&ufence->refcount, user_fence_destroy);
 }
 
-static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
-                                           u64 value)
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+                                              u64 value)
 {
-       struct user_fence *ufence;
+       struct xe_user_fence *ufence;
 
        ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
        if (!ufence)
@@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
 
 static void user_fence_worker(struct work_struct *w)
 {
-       struct user_fence *ufence = container_of(w, struct user_fence, worker);
+       struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
 
        if (mmget_not_zero(ufence->mm)) {
                kthread_use_mm(ufence->mm);
@@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
        }
 
        wake_up_all(&ufence->xe->ufence_wq);
+       WRITE_ONCE(ufence->signalled, 1);
        user_fence_put(ufence);
 }
 
-static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
 {
        INIT_WORK(&ufence->worker, user_fence_worker);
        queue_work(ufence->xe->ordered_wq, &ufence->worker);
@@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
 
 static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-       struct user_fence *ufence = container_of(cb, struct user_fence, cb);
+       struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
 
        kick_ufence(ufence, fence);
 }
@@ -340,3 +342,39 @@ err_out:
 
        return ERR_PTR(-ENOMEM);
 }
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+       user_fence_get(sync->ufence);
+
+       return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+       user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+       return READ_ONCE(ufence->signalled);
+}
index f43cdcaca6c5794ec8b42ab3bc77e1942004d046..0fd0d51208e627c9be72eef661c160458db6f5a4 100644 (file)
@@ -38,4 +38,8 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
        return !!sync->ufence;
 }
 
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
 #endif
index 852db5e7884fcde668f6f85b6e4049fa5290f8a9..30ac3f51993b944e3dd86ccb059c75441f87f5e1 100644 (file)
@@ -18,7 +18,7 @@ struct xe_sync_entry {
        struct drm_syncobj *syncobj;
        struct dma_fence *fence;
        struct dma_fence_chain *chain_fence;
-       struct user_fence *ufence;
+       struct xe_user_fence *ufence;
        u64 addr;
        u64 timeline_value;
        u32 type;
index 044c20881de7ef0ede17f4dcfcdf34863817d8de..0650b2fa75efba85aea8d2a98e7d076ebabd607a 100644 (file)
@@ -167,9 +167,10 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
                goto err_mem_access;
 
        tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
-       if (IS_ERR(tile->mem.kernel_bb_pool))
+       if (IS_ERR(tile->mem.kernel_bb_pool)) {
                err = PTR_ERR(tile->mem.kernel_bb_pool);
-
+               goto err_mem_access;
+       }
        xe_wa_apply_tile_workarounds(tile);
 
        xe_tile_sysfs_init(tile);
index 95163c303f3e11694bdc1bafd18eb6386740eb01..4ddc55527f9ab3e632635c5f920d4f4420df1255 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/tracepoint.h>
 #include <linux/types.h>
 
+#include "xe_bo.h"
 #include "xe_bo_types.h"
 #include "xe_exec_queue_types.h"
 #include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
                    TP_ARGS(fence),
 
                    TP_STRUCT__entry(
-                            __field(u64, fence)
+                            __field(struct xe_gt_tlb_invalidation_fence *, fence)
                             __field(int, seqno)
                             ),
 
                    TP_fast_assign(
-                          __entry->fence = (u64)fence;
+                          __entry->fence = fence;
                           __entry->seqno = fence->seqno;
                           ),
 
-                   TP_printk("fence=0x%016llx, seqno=%d",
+                   TP_printk("fence=%p, seqno=%d",
                              __entry->fence, __entry->seqno)
 );
 
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
                    TP_STRUCT__entry(
                             __field(size_t, size)
                             __field(u32, flags)
-                            __field(u64, vm)
+                            __field(struct xe_vm *, vm)
                             ),
 
                    TP_fast_assign(
                           __entry->size = bo->size;
                           __entry->flags = bo->flags;
-                          __entry->vm = (unsigned long)bo->vm;
+                          __entry->vm = bo->vm;
                           ),
 
-                   TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+                   TP_printk("size=%zu, flags=0x%02x, vm=%p",
                              __entry->size, __entry->flags, __entry->vm)
 );
 
@@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
             TP_ARGS(bo)
 );
 
-DEFINE_EVENT(xe_bo, xe_bo_move,
-            TP_PROTO(struct xe_bo *bo),
-            TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+           TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+                    bool move_lacks_source),
+           TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+           TP_STRUCT__entry(
+                    __field(struct xe_bo *, bo)
+                    __field(size_t, size)
+                    __field(u32, new_placement)
+                    __field(u32, old_placement)
+                    __array(char, device_id, 12)
+                    __field(bool, move_lacks_source)
+                       ),
+
+           TP_fast_assign(
+                  __entry->bo      = bo;
+                  __entry->size = bo->size;
+                  __entry->new_placement = new_placement;
+                  __entry->old_placement = old_placement;
+                  strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+                  __entry->move_lacks_source = move_lacks_source;
+                  ),
+           TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+                     __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+                     xe_mem_type_to_name[__entry->old_placement],
+                     xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
 );
 
 DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
                    TP_STRUCT__entry(
                             __field(u64, ctx)
                             __field(u32, seqno)
-                            __field(u64, fence)
+                            __field(struct xe_hw_fence *, fence)
                             ),
 
                    TP_fast_assign(
                           __entry->ctx = fence->dma.context;
                           __entry->seqno = fence->dma.seqno;
-                          __entry->fence = (unsigned long)fence;
+                          __entry->fence = fence;
                           ),
 
-                   TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+                   TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
                              __entry->ctx, __entry->fence, __entry->seqno)
 );
 
@@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
                    TP_ARGS(vma),
 
                    TP_STRUCT__entry(
-                            __field(u64, vma)
+                            __field(struct xe_vma *, vma)
                             __field(u32, asid)
                             __field(u64, start)
                             __field(u64, end)
@@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
                             ),
 
                    TP_fast_assign(
-                          __entry->vma = (unsigned long)vma;
+                          __entry->vma = vma;
                           __entry->asid = xe_vma_vm(vma)->usm.asid;
                           __entry->start = xe_vma_start(vma);
                           __entry->end = xe_vma_end(vma) - 1;
                           __entry->ptr = xe_vma_userptr(vma);
                           ),
 
-                   TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+                   TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
                              __entry->vma, __entry->asid, __entry->start,
                              __entry->end, __entry->ptr)
 )
@@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
                    TP_ARGS(vm),
 
                    TP_STRUCT__entry(
-                            __field(u64, vm)
+                            __field(struct xe_vm *, vm)
                             __field(u32, asid)
                             ),
 
                    TP_fast_assign(
-                          __entry->vm = (unsigned long)vm;
+                          __entry->vm = vm;
                           __entry->asid = vm->usm.asid;
                           ),
 
-                   TP_printk("vm=0x%016llx, asid=0x%05x",  __entry->vm,
+                   TP_printk("vm=%p, asid=0x%05x",  __entry->vm,
                              __entry->asid)
 );
 
index 7b00faa672879c3b568c3ee3f3d9c20c54f527c9..3b21afe5b4883fa64aeb92c6d2174b014be96c59 100644 (file)
@@ -897,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
        struct xe_device *xe = vm->xe;
        bool read_only = xe_vma_read_only(vma);
 
+       if (vma->ufence) {
+               xe_sync_ufence_put(vma->ufence);
+               vma->ufence = NULL;
+       }
+
        if (xe_vma_is_userptr(vma)) {
                struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
 
@@ -1608,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 
        trace_xe_vma_unbind(vma);
 
+       if (vma->ufence) {
+               struct xe_user_fence * const f = vma->ufence;
+
+               if (!xe_sync_ufence_get_status(f))
+                       return ERR_PTR(-EBUSY);
+
+               vma->ufence = NULL;
+               xe_sync_ufence_put(f);
+       }
+
        if (number_tiles > 1) {
                fences = kmalloc_array(number_tiles, sizeof(*fences),
                                       GFP_KERNEL);
@@ -1741,6 +1756,21 @@ err_fences:
        return ERR_PTR(err);
 }
 
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+       unsigned int i;
+
+       for (i = 0; i < num_syncs; i++) {
+               struct xe_sync_entry *e = &syncs[i];
+
+               if (xe_sync_is_ufence(e))
+                       return xe_sync_ufence_get(e);
+       }
+
+       return NULL;
+}
+
 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
                        struct xe_exec_queue *q, struct xe_sync_entry *syncs,
                        u32 num_syncs, bool immediate, bool first_op,
@@ -1748,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
 {
        struct dma_fence *fence;
        struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+       struct xe_user_fence *ufence;
 
        xe_vm_assert_held(vm);
 
+       ufence = find_ufence_get(syncs, num_syncs);
+       if (vma->ufence && ufence)
+               xe_sync_ufence_put(vma->ufence);
+
+       vma->ufence = ufence ?: vma->ufence;
+
        if (immediate) {
                fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
                                       last_op);
@@ -2117,10 +2154,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
 
                if (__op->op == DRM_GPUVA_OP_MAP) {
-                       op->map.immediate =
-                               flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
-                       op->map.read_only =
-                               flags & DRM_XE_VM_BIND_FLAG_READONLY;
                        op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
                        op->map.pat_index = pat_index;
                } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2190,15 +2223,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
 {
        if (vma->gpuva.flags & XE_VMA_PTE_1G)
                return SZ_1G;
-       else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+       else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
                return SZ_2M;
+       else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+               return SZ_64K;
        else if (vma->gpuva.flags & XE_VMA_PTE_4K)
                return SZ_4K;
 
        return SZ_1G;   /* Uninitialized, used max size */
 }
 
-static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
 {
        switch (size) {
        case SZ_1G:
@@ -2207,9 +2242,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
        case SZ_2M:
                vma->gpuva.flags |= XE_VMA_PTE_2M;
                break;
+       case SZ_64K:
+               vma->gpuva.flags |= XE_VMA_PTE_64K;
+               break;
+       case SZ_4K:
+               vma->gpuva.flags |= XE_VMA_PTE_4K;
+               break;
        }
-
-       return SZ_4K;
 }
 
 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
@@ -2307,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                switch (op->base.op) {
                case DRM_GPUVA_OP_MAP:
                {
-                       flags |= op->map.read_only ?
-                               VMA_CREATE_FLAG_READ_ONLY : 0;
                        flags |= op->map.is_null ?
                                VMA_CREATE_FLAG_IS_NULL : 0;
 
@@ -2439,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
        case DRM_GPUVA_OP_MAP:
                err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
                                 op->syncs, op->num_syncs,
-                                op->map.immediate || !xe_vm_in_fault_mode(vm),
+                                !xe_vm_in_fault_mode(vm),
                                 op->flags & XE_VMA_OP_FIRST,
                                 op->flags & XE_VMA_OP_LAST);
                break;
@@ -2714,14 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
        return 0;
 }
 
-#define SUPPORTED_FLAGS        \
-       (DRM_XE_VM_BIND_FLAG_READONLY | \
-        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
+#define SUPPORTED_FLAGS        (DRM_XE_VM_BIND_FLAG_NULL | \
+        DRM_XE_VM_BIND_FLAG_DUMPABLE)
 #define XE_64K_PAGE_MASK 0xffffull
 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
 
-#define MAX_BINDS      512     /* FIXME: Picking random upper limit */
-
 static int vm_bind_ioctl_check_args(struct xe_device *xe,
                                    struct drm_xe_vm_bind *args,
                                    struct drm_xe_vm_bind_op **bind_ops)
@@ -2733,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
            XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->extensions) ||
-           XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
+       if (XE_IOCTL_DBG(xe, args->extensions))
                return -EINVAL;
 
        if (args->num_binds > 1) {
                u64 __user *bind_user =
                        u64_to_user_ptr(args->vector_of_binds);
 
-               *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
-                                   args->num_binds, GFP_KERNEL);
+               *bind_ops = kvmalloc_array(args->num_binds,
+                                          sizeof(struct drm_xe_vm_bind_op),
+                                          GFP_KERNEL | __GFP_ACCOUNT);
                if (!*bind_ops)
                        return -ENOMEM;
 
@@ -2832,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
 
 free_bind_ops:
        if (args->num_binds > 1)
-               kfree(*bind_ops);
+               kvfree(*bind_ops);
        return err;
 }
 
@@ -2920,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        }
 
        if (args->num_binds) {
-               bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+               bos = kvcalloc(args->num_binds, sizeof(*bos),
+                              GFP_KERNEL | __GFP_ACCOUNT);
                if (!bos) {
                        err = -ENOMEM;
                        goto release_vm_lock;
                }
 
-               ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+               ops = kvcalloc(args->num_binds, sizeof(*ops),
+                              GFP_KERNEL | __GFP_ACCOUNT);
                if (!ops) {
                        err = -ENOMEM;
                        goto release_vm_lock;
@@ -3067,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        for (i = 0; bos && i < args->num_binds; ++i)
                xe_bo_put(bos[i]);
 
-       kfree(bos);
-       kfree(ops);
+       kvfree(bos);
+       kvfree(ops);
        if (args->num_binds > 1)
-               kfree(bind_ops);
+               kvfree(bind_ops);
 
        return err;
 
@@ -3094,10 +3130,10 @@ put_exec_queue:
        if (q)
                xe_exec_queue_put(q);
 free_objs:
-       kfree(bos);
-       kfree(ops);
+       kvfree(bos);
+       kvfree(ops);
        if (args->num_binds > 1)
-               kfree(bind_ops);
+               kvfree(bind_ops);
        return err;
 }
 
index 5ac9c5bebabc3cf3ecf528f51f8aa92cebc410ef..7300eea5394ba8c1ece10dba63314bf733ee5157 100644 (file)
@@ -19,6 +19,7 @@
 
 struct xe_bo;
 struct xe_sync_entry;
+struct xe_user_fence;
 struct xe_vm;
 
 #define XE_VMA_READ_ONLY       DRM_GPUVA_USERBITS
@@ -29,6 +30,8 @@ struct xe_vm;
 #define XE_VMA_PTE_4K          (DRM_GPUVA_USERBITS << 5)
 #define XE_VMA_PTE_2M          (DRM_GPUVA_USERBITS << 6)
 #define XE_VMA_PTE_1G          (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K         (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT     (DRM_GPUVA_USERBITS << 9)
 
 /** struct xe_userptr - User pointer */
 struct xe_userptr {
@@ -102,6 +105,12 @@ struct xe_vma {
         * @pat_index: The pat index to use when encoding the PTEs for this vma.
         */
        u16 pat_index;
+
+       /**
+        * @ufence: The user fence that was provided with MAP.
+        * Needs to be signalled before UNMAP can be processed.
+        */
+       struct xe_user_fence *ufence;
 };
 
 /**
@@ -286,10 +295,6 @@ struct xe_vm {
 struct xe_vma_op_map {
        /** @vma: VMA to map */
        struct xe_vma *vma;
-       /** @immediate: Immediate bind */
-       bool immediate;
-       /** @read_only: Read only */
-       bool read_only;
        /** @is_null: is NULL binding */
        bool is_null;
        /** @pat_index: The pat index to use for this operation. */
index 42fd504abbcda248e67fd84a64e2f96a2609b4cb..89983d7d73ca1539c19ff4a511c0c179bd07ed91 100644 (file)
@@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
        .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
        .sid_table = tegra186_sid_table,
        .reserve_vblank_syncpts = false,
+       .skip_reset_assert = true,
 };
 
 static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
        host1x_intr_stop(host);
        host1x_syncpt_save(host);
 
-       err = reset_control_bulk_assert(host->nresets, host->resets);
-       if (err) {
-               dev_err(dev, "failed to assert reset: %d\n", err);
-               goto resume_host1x;
-       }
+       if (!host->info->skip_reset_assert) {
+               err = reset_control_bulk_assert(host->nresets, host->resets);
+               if (err) {
+                       dev_err(dev, "failed to assert reset: %d\n", err);
+                       goto resume_host1x;
+               }
 
-       usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
+       }
 
        clk_disable_unprepare(host->clk);
        reset_control_bulk_release(host->nresets, host->resets);
index c8e302de76257008aa3fb172da0c3acd4412c572..925a118db23f5751cbbe50db317e98fd4c543414 100644 (file)
@@ -116,6 +116,12 @@ struct host1x_info {
         * the display driver disables VBLANK increments.
         */
        bool reserve_vblank_syncpts;
+       /*
+        * On Tegra186, secure world applications may require access to
+        * host1x during suspend/resume. To allow this, we need to leave
+        * host1x not in reset.
+        */
+       bool skip_reset_assert;
 };
 
 struct host1x {
index 56f7e06c673e4236ba8d1e01957723a800af63a0..adbf674355b2b8a472c03bd60092960cb0c742cf 100644 (file)
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
 
        pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
 
-       /* do we need a gpadl body msg */
        pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
                  sizeof(struct vmbus_channel_gpadl_header) -
                  sizeof(struct gpa_range);
+       pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+       msgsize = sizeof(struct vmbus_channel_msginfo) +
+                 sizeof(struct vmbus_channel_gpadl_header) +
+                 sizeof(struct gpa_range) + pfncount * sizeof(u64);
+       msgheader =  kzalloc(msgsize, GFP_KERNEL);
+       if (!msgheader)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&msgheader->submsglist);
+       msgheader->msgsize = msgsize;
+
+       gpadl_header = (struct vmbus_channel_gpadl_header *)
+               msgheader->msg;
+       gpadl_header->rangecount = 1;
+       gpadl_header->range_buflen = sizeof(struct gpa_range) +
+                                pagecount * sizeof(u64);
+       gpadl_header->range[0].byte_offset = 0;
+       gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+       for (i = 0; i < pfncount; i++)
+               gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+                       type, kbuffer, size, send_offset, i);
+       *msginfo = msgheader;
+
+       pfnsum = pfncount;
+       pfnleft = pagecount - pfncount;
+
+       /* how many pfns can we fit in a body message */
+       pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+                 sizeof(struct vmbus_channel_gpadl_body);
        pfncount = pfnsize / sizeof(u64);
 
-       if (pagecount > pfncount) {
-               /* we need a gpadl body */
-               /* fill in the header */
+       /*
+        * If pfnleft is zero, everything fits in the header and no body
+        * messages are needed
+        */
+       while (pfnleft) {
+               pfncurr = umin(pfncount, pfnleft);
                msgsize = sizeof(struct vmbus_channel_msginfo) +
-                         sizeof(struct vmbus_channel_gpadl_header) +
-                         sizeof(struct gpa_range) + pfncount * sizeof(u64);
-               msgheader =  kzalloc(msgsize, GFP_KERNEL);
-               if (!msgheader)
-                       goto nomem;
-
-               INIT_LIST_HEAD(&msgheader->submsglist);
-               msgheader->msgsize = msgsize;
-
-               gpadl_header = (struct vmbus_channel_gpadl_header *)
-                       msgheader->msg;
-               gpadl_header->rangecount = 1;
-               gpadl_header->range_buflen = sizeof(struct gpa_range) +
-                                        pagecount * sizeof(u64);
-               gpadl_header->range[0].byte_offset = 0;
-               gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
-               for (i = 0; i < pfncount; i++)
-                       gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
-                               type, kbuffer, size, send_offset, i);
-               *msginfo = msgheader;
-
-               pfnsum = pfncount;
-               pfnleft = pagecount - pfncount;
-
-               /* how many pfns can we fit */
-               pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
-                         sizeof(struct vmbus_channel_gpadl_body);
-               pfncount = pfnsize / sizeof(u64);
-
-               /* fill in the body */
-               while (pfnleft) {
-                       if (pfnleft > pfncount)
-                               pfncurr = pfncount;
-                       else
-                               pfncurr = pfnleft;
-
-                       msgsize = sizeof(struct vmbus_channel_msginfo) +
-                                 sizeof(struct vmbus_channel_gpadl_body) +
-                                 pfncurr * sizeof(u64);
-                       msgbody = kzalloc(msgsize, GFP_KERNEL);
-
-                       if (!msgbody) {
-                               struct vmbus_channel_msginfo *pos = NULL;
-                               struct vmbus_channel_msginfo *tmp = NULL;
-                               /*
-                                * Free up all the allocated messages.
-                                */
-                               list_for_each_entry_safe(pos, tmp,
-                                       &msgheader->submsglist,
-                                       msglistentry) {
-
-                                       list_del(&pos->msglistentry);
-                                       kfree(pos);
-                               }
-
-                               goto nomem;
-                       }
-
-                       msgbody->msgsize = msgsize;
-                       gpadl_body =
-                               (struct vmbus_channel_gpadl_body *)msgbody->msg;
+                         sizeof(struct vmbus_channel_gpadl_body) +
+                         pfncurr * sizeof(u64);
+               msgbody = kzalloc(msgsize, GFP_KERNEL);
 
+               if (!msgbody) {
+                       struct vmbus_channel_msginfo *pos = NULL;
+                       struct vmbus_channel_msginfo *tmp = NULL;
                        /*
-                        * Gpadl is u32 and we are using a pointer which could
-                        * be 64-bit
-                        * This is governed by the guest/host protocol and
-                        * so the hypervisor guarantees that this is ok.
+                        * Free up all the allocated messages.
                         */
-                       for (i = 0; i < pfncurr; i++)
-                               gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
-                                       kbuffer, size, send_offset, pfnsum + i);
-
-                       /* add to msg header */
-                       list_add_tail(&msgbody->msglistentry,
-                                     &msgheader->submsglist);
-                       pfnsum += pfncurr;
-                       pfnleft -= pfncurr;
+                       list_for_each_entry_safe(pos, tmp,
+                               &msgheader->submsglist,
+                               msglistentry) {
+
+                               list_del(&pos->msglistentry);
+                               kfree(pos);
+                       }
+                       kfree(msgheader);
+                       return -ENOMEM;
                }
-       } else {
-               /* everything fits in a header */
-               msgsize = sizeof(struct vmbus_channel_msginfo) +
-                         sizeof(struct vmbus_channel_gpadl_header) +
-                         sizeof(struct gpa_range) + pagecount * sizeof(u64);
-               msgheader = kzalloc(msgsize, GFP_KERNEL);
-               if (msgheader == NULL)
-                       goto nomem;
-
-               INIT_LIST_HEAD(&msgheader->submsglist);
-               msgheader->msgsize = msgsize;
-
-               gpadl_header = (struct vmbus_channel_gpadl_header *)
-                       msgheader->msg;
-               gpadl_header->rangecount = 1;
-               gpadl_header->range_buflen = sizeof(struct gpa_range) +
-                                        pagecount * sizeof(u64);
-               gpadl_header->range[0].byte_offset = 0;
-               gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
-               for (i = 0; i < pagecount; i++)
-                       gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
-                               type, kbuffer, size, send_offset, i);
-
-               *msginfo = msgheader;
+
+               msgbody->msgsize = msgsize;
+               gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+               /*
+                * Gpadl is u32 and we are using a pointer which could
+                * be 64-bit
+                * This is governed by the guest/host protocol and
+                * so the hypervisor guarantees that this is ok.
+                */
+               for (i = 0; i < pfncurr; i++)
+                       gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+                               kbuffer, size, send_offset, pfnsum + i);
+
+               /* add to msg header */
+               list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+               pfnsum += pfncurr;
+               pfnleft -= pfncurr;
        }
 
        return 0;
-nomem:
-       kfree(msgheader);
-       kfree(msgbody);
-       return -ENOMEM;
 }
 
 /*
index 42aec2c5606af756cc89c88cd50d158a733d784e..9c97c4065fe736e7e076894999447a2def819c24 100644 (file)
@@ -296,6 +296,11 @@ static struct {
        spinlock_t                      lock;
 } host_ts;
 
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
 static inline u64 reftime_to_ns(u64 reftime)
 {
        return (reftime - WLTIMEDELTA) * 100;
@@ -344,6 +349,29 @@ static void hv_set_host_time(struct work_struct *work)
                do_settimeofday64(&ts);
 }
 
+/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+       struct timespec64 new_ts;
+       struct timespec64 threshold_ts;
+
+       new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+       ktime_get_real_ts64(&threshold_ts);
+
+       threshold_ts.tv_sec += 5;
+
+       /*
+        * If guest behind the host by 5 or more seconds.
+        */
+       if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+               return true;
+
+       return false;
+}
+
 /*
  * Synchronize time with host after reboot, restore, etc.
  *
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
        spin_unlock_irqrestore(&host_ts.lock, flags);
 
        /* Schedule work to do do_settimeofday64() */
-       if (adj_flags & ICTIMESYNCFLAG_SYNC)
+       if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+           (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
                schedule_work(&adj_time_work);
 }
 
index b33d5abd9beb234f98fdcb9d50636a3affc93c35..7f7965f3d187884d87a2a822c3479485e17cec62 100644 (file)
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
 };
 
 /* The one and only one */
-static struct bus_type  hv_bus = {
+static const struct bus_type  hv_bus = {
        .name =         "vmbus",
        .match =                vmbus_match,
        .shutdown =             vmbus_shutdown,
index 8d2ef3145bca3c71b0aee2d8a1fb466dd3f9cb3e..9fbab8f023340da24cf9623e8da882849358ccea 100644 (file)
@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
        const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
        const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
        int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
+       int num_reg_temp_config;
        struct device *hwmon_dev;
        struct sensor_template_group tsi_temp_tg;
 
@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6106_REG_TEMP_OVER;
                reg_temp_hyst = NCT6106_REG_TEMP_HYST;
                reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6106_REG_TEMP_CRIT;
                reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6106_REG_TEMP_OVER;
                reg_temp_hyst = NCT6106_REG_TEMP_HYST;
                reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6106_REG_TEMP_CRIT;
                reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6775_REG_TEMP_OVER;
                reg_temp_hyst = NCT6775_REG_TEMP_HYST;
                reg_temp_config = NCT6775_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6775_REG_TEMP_CRIT;
 
@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6775_REG_TEMP_OVER;
                reg_temp_hyst = NCT6775_REG_TEMP_HYST;
                reg_temp_config = NCT6776_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6776_REG_TEMP_CRIT;
 
@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6779_REG_TEMP_OVER;
                reg_temp_hyst = NCT6779_REG_TEMP_HYST;
                reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6779_REG_TEMP_CRIT;
 
@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6779_REG_TEMP_OVER;
                reg_temp_hyst = NCT6779_REG_TEMP_HYST;
                reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6779_REG_TEMP_CRIT;
 
@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                reg_temp_over = NCT6798_REG_TEMP_OVER;
                reg_temp_hyst = NCT6798_REG_TEMP_HYST;
                reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+               num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
                reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
                reg_temp_crit = NCT6798_REG_TEMP_CRIT;
 
@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                                  = reg_temp_crit[src - 1];
                        if (reg_temp_crit_l && reg_temp_crit_l[i])
                                data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
-                       data->reg_temp_config[src - 1] = reg_temp_config[i];
+                       if (i < num_reg_temp_config)
+                               data->reg_temp_config[src - 1] = reg_temp_config[i];
                        data->temp_src[src - 1] = src;
                        continue;
                }
@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
                data->reg_temp[0][s] = reg_temp[i];
                data->reg_temp[1][s] = reg_temp_over[i];
                data->reg_temp[2][s] = reg_temp_hyst[i];
-               data->reg_temp_config[s] = reg_temp_config[i];
+               if (i < num_reg_temp_config)
+                       data->reg_temp_config[s] = reg_temp_config[i];
                if (reg_temp_crit_h && reg_temp_crit_h[i])
                        data->reg_temp[3][s] = reg_temp_crit_h[i];
                else if (reg_temp_crit[src - 1])
index 5511fd46a65eae66b46f3e3385fe15f0c8970a2b..ce8c4846b7fae4548e36ccd78ce59ce1c90532f3 100644 (file)
@@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                        irq_status);
                irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
                if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
+                       irq_handled = irq_status;
                        bus->cmd_err = ret;
                        bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
                        goto out_complete;
index 2c36b36d7d516c851c8e9e44c8f90ce11fac0f13..274e987e4cfa0f9b90a576b83d2a96368b7f50a3 100644 (file)
@@ -1416,7 +1416,6 @@ static void i801_add_mux(struct i801_priv *priv)
                lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
                                               mux_config->gpios[i], "mux", 0);
        gpiod_add_lookup_table(lookup);
-       priv->lookup = lookup;
 
        /*
         * Register the mux device, we use PLATFORM_DEVID_NONE here
@@ -1430,7 +1429,10 @@ static void i801_add_mux(struct i801_priv *priv)
                                sizeof(struct i2c_mux_gpio_platform_data));
        if (IS_ERR(priv->mux_pdev)) {
                gpiod_remove_lookup_table(lookup);
+               devm_kfree(dev, lookup);
                dev_err(dev, "Failed to register i2c-mux-gpio device\n");
+       } else {
+               priv->lookup = lookup;
        }
 }
 
@@ -1742,9 +1744,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        i801_enable_host_notify(&priv->adapter);
 
-       i801_probe_optional_slaves(priv);
        /* We ignore errors - multiplexing is optional */
        i801_add_mux(priv);
+       i801_probe_optional_slaves(priv);
 
        pci_set_drvdata(dev, priv);
 
index 88a053987403cc6f59c3def73fd52cd11e2b1359..60e813137f8442895b19c6e9d871252cc32c7f24 100644 (file)
@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
                ctl &= ~I2CR_MTX;
                imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
                imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+               /* flag the last byte as processed */
+               i2c_imx_slave_event(i2c_imx,
+                                   I2C_SLAVE_READ_PROCESSED, &value);
+
                i2c_imx_slave_finish_op(i2c_imx);
                return IRQ_HANDLED;
        }
index ec2a8da134e56d01be06588551db26bca47caef4..198afee5233c3d65df7552eb3343fe4d4e5d7488 100644 (file)
@@ -378,11 +378,15 @@ static int wmt_i2c_probe(struct platform_device *pdev)
 
        err = i2c_add_adapter(adap);
        if (err)
-               return err;
+               goto err_disable_clk;
 
        platform_set_drvdata(pdev, i2c_dev);
 
        return 0;
+
+err_disable_clk:
+       clk_disable_unprepare(i2c_dev->clk);
+       return err;
 }
 
 static void wmt_i2c_remove(struct platform_device *pdev)
index 90b7ae6d42b7700c9cb0a328b093352a7cec419e..484fe2e9fb1742b9adbde28d737127a4fe1d6413 100644 (file)
@@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
        unsigned int val;
        int ret;
 
-       ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
-                                      val == ADXL367_DEVID_AD, 1000, 10000);
+       ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
        if (ret)
+               return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
+
+       if (val != ADXL367_DEVID_AD)
                return dev_err_probe(st->dev, -ENODEV,
                                     "Invalid dev id 0x%02X, expected 0x%02X\n",
                                     val, ADXL367_DEVID_AD);
@@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
        if (ret)
                return ret;
 
+       fsleep(15000);
+
        ret = adxl367_verify_devid(st);
        if (ret)
                return ret;
index b595fe94f3a321b2d8fc986d64d29dbc4f024ccc..62c74bdc0d77bff87b822d1d6ed2502ffbed6687 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "adxl367.h"
 
-#define ADXL367_I2C_FIFO_DATA  0x42
+#define ADXL367_I2C_FIFO_DATA  0x18
 
 struct adxl367_i2c_state {
        struct regmap *regmap;
index 66d4ba088e70ff8c0df12685af77e4372e87985d..d4f9b5d8d28d6d7850f8e5dbf2a4f3c5d9b32d50 100644 (file)
@@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
        /* compute and process only all complete datum */
        nb = fifo_count / bytes_per_datum;
        fifo_count = nb * bytes_per_datum;
+       if (nb == 0)
+               goto end_session;
        /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
        fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
        inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
index 676704f9151fcb4eb111cdd89d486a48fab91f28..e6e6e94452a32801ff7427112b33f0a4bb923d2f 100644 (file)
@@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
        if (enable) {
                /* reset timestamping */
                inv_sensors_timestamp_reset(&st->timestamp);
+               inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
                /* reset FIFO */
                d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
                ret = regmap_write(st->map, st->reg->user_ctrl, d);
@@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
                if (result)
                        goto error_power_off;
        } else {
+               st->chip_config.gyro_fifo_enable = 0;
+               st->chip_config.accl_fifo_enable = 0;
+               st->chip_config.temp_fifo_enable = 0;
+               st->chip_config.magn_fifo_enable = 0;
                result = inv_mpu6050_prepare_fifo(st, false);
                if (result)
                        goto error_power_off;
index e8a5fed07e88835a019924a0b60bdb16233a95a3..a444d4b2978b581ed8f4cd63b6821e23a45a0560 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
  */
+#include <linux/bits.h>
 #include <linux/module.h>
 #include <linux/spi/spi.h>
 #include <linux/err.h>
@@ -35,6 +36,34 @@ static int bmp280_regmap_spi_read(void *context, const void *reg,
        return spi_write_then_read(spi, reg, reg_size, val, val_size);
 }
 
+static int bmp380_regmap_spi_read(void *context, const void *reg,
+                                 size_t reg_size, void *val, size_t val_size)
+{
+       struct spi_device *spi = to_spi_device(context);
+       u8 rx_buf[4];
+       ssize_t status;
+
+       /*
+        * Maximum number of consecutive bytes read for a temperature or
+        * pressure measurement is 3.
+        */
+       if (val_size > 3)
+               return -EINVAL;
+
+       /*
+        * According to the BMP3xx datasheets, for a basic SPI read opertion,
+        * the first byte needs to be dropped and the rest are the requested
+        * data.
+        */
+       status = spi_write_then_read(spi, reg, 1, rx_buf, val_size + 1);
+       if (status)
+               return status;
+
+       memcpy(val, rx_buf + 1, val_size);
+
+       return 0;
+}
+
 static struct regmap_bus bmp280_regmap_bus = {
        .write = bmp280_regmap_spi_write,
        .read = bmp280_regmap_spi_read,
@@ -42,10 +71,19 @@ static struct regmap_bus bmp280_regmap_bus = {
        .val_format_endian_default = REGMAP_ENDIAN_BIG,
 };
 
+static struct regmap_bus bmp380_regmap_bus = {
+       .write = bmp280_regmap_spi_write,
+       .read = bmp380_regmap_spi_read,
+       .read_flag_mask = BIT(7),
+       .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+       .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
 static int bmp280_spi_probe(struct spi_device *spi)
 {
        const struct spi_device_id *id = spi_get_device_id(spi);
        const struct bmp280_chip_info *chip_info;
+       struct regmap_bus *bmp_regmap_bus;
        struct regmap *regmap;
        int ret;
 
@@ -58,8 +96,18 @@ static int bmp280_spi_probe(struct spi_device *spi)
 
        chip_info = spi_get_device_match_data(spi);
 
+       switch (chip_info->chip_id[0]) {
+       case BMP380_CHIP_ID:
+       case BMP390_CHIP_ID:
+               bmp_regmap_bus = &bmp380_regmap_bus;
+               break;
+       default:
+               bmp_regmap_bus = &bmp280_regmap_bus;
+               break;
+       }
+
        regmap = devm_regmap_init(&spi->dev,
-                                 &bmp280_regmap_bus,
+                                 bmp_regmap_bus,
                                  &spi->dev,
                                  chip_info->regmap_config);
        if (IS_ERR(regmap)) {
index 28c8269ba65d31f1547fc0e35c0fc9465c4b9740..0bba4c5a8d4059ba24eebbc1acb5732ebd1efc31 100644 (file)
@@ -250,18 +250,17 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
        struct dlh_state *st = iio_priv(indio_dev);
        int ret;
        unsigned int chn, i = 0;
-       __be32 tmp_buf[2];
+       __be32 tmp_buf[2] = { };
 
        ret = dlh_start_capture_and_read(st);
        if (ret)
                goto out;
 
        for_each_set_bit(chn, indio_dev->active_scan_mask,
-               indio_dev->masklength) {
-               memcpy(tmp_buf + i,
+                        indio_dev->masklength) {
+               memcpy(&tmp_buf[i++],
                        &st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
                        DLH_NUM_DATA_BYTES);
-               i++;
        }
 
        iio_push_to_buffers(indio_dev, tmp_buf);
index 824349659d69dc8e9ea9c1b5254d469628a5f933..ce9c5bae83bf1b934338d465ce25c5fba4e6ab2c 100644 (file)
@@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
        struct bnxt_re_fence_data *fence = &pd->fence;
        struct ib_mr *ib_mr = &fence->mr->ib_mr;
        struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+       struct bnxt_re_dev *rdev = pd->rdev;
+
+       if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+               return;
 
        memset(wqe, 0, sizeof(*wqe));
        wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
        struct device *dev = &rdev->en_dev->pdev->dev;
        struct bnxt_re_mr *mr = fence->mr;
 
+       if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+               return;
+
        if (fence->mw) {
                bnxt_re_dealloc_mw(fence->mw);
                fence->mw = NULL;
@@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
        struct ib_mw *mw;
        int rc;
 
+       if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+               return 0;
+
        dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
                                  DMA_BIDIRECTIONAL);
        rc = dma_mapping_error(dev, dma_addr);
@@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
        switch (srq_attr_mask) {
        case IB_SRQ_MAX_WR:
                /* SRQ resize is not supported */
-               break;
+               return -EINVAL;
        case IB_SRQ_LIMIT:
                /* Change the SRQ threshold */
                if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
                /* On success, update the shadow */
                srq->srq_limit = srq_attr->srq_limit;
                /* No need to Build and send response back to udata */
-               break;
+               return 0;
        default:
                ibdev_err(&rdev->ibdev,
                          "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
                return -EINVAL;
        }
-       return 0;
 }
 
 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
        wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
        wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
 
-       /* Need unconditional fence for local invalidate
-        * opcode to work as expected.
-        */
-       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
        if (wr->send_flags & IB_SEND_SIGNALED)
                wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
        if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
        wqe->frmr.levels = qplib_frpl->hwq.level;
        wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
 
-       /* Need unconditional fence for reg_mr
-        * opcode to function as expected.
-        */
-
-       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
        if (wr->wr.send_flags & IB_SEND_SIGNALED)
                wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
 
@@ -2719,6 +2717,18 @@ bad:
        return rc;
 }
 
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+       /* Need unconditional fence for non-wire memory opcode
+        * to work as expected.
+        */
+       if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+           wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+           wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+           wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+               wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
                      const struct ib_send_wr **bad_wr)
 {
@@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
                        rc = -EINVAL;
                        goto bad;
                }
-               if (!rc)
+               if (!rc) {
+                       if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+                               bnxt_re_legacy_set_uc_fence(&wqe);
                        rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+               }
 bad:
                if (rc) {
                        ibdev_err(&qp->rdev->ibdev,
index f022c922fae5183cb6860092e5bd0662d22f1764..54b4d2f3a5d885d1f17643a2416420cb6b805b8a 100644 (file)
@@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
 
 static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
 {
-
-       if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
-               return;
        rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
        if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
                bnxt_re_set_resource_limits(rdev);
index c98e04fe2ddd477dd8457c09bef64c9339b992f4..439d0c7c5d0cab91e028b380435aaf898f9856c3 100644 (file)
@@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
        bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
                                sizeof(resp), 0);
        rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
-       srq->threshold = le16_to_cpu(sb->srq_limit);
+       if (!rc)
+               srq->threshold = le16_to_cpu(sb->srq_limit);
        dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
                          sbuf.sb, sbuf.dma_addr);
 
index 68c621ff59d03fea9340eb50a56363d24c9bc105..5a91cbda4aee6f769385d6a4eab9aa191d0e44d4 100644 (file)
@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
                                   "Unable to allocate credit return DMA range for NUMA %d\n",
                                   i);
                        ret = -ENOMEM;
-                       goto done;
+                       goto free_cr_base;
                }
        }
        set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
        ret = 0;
 done:
        return ret;
+
+free_cr_base:
+       free_credit_return(dd);
+       goto done;
 }
 
 void free_credit_return(struct hfi1_devdata *dd)
index 6e5ac2023328a7d59d42f6532113dd9a95641b31..b67d23b1f28625c5ed7a4f15f8a07a32d074199b 100644 (file)
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 {
        int rval = 0;
 
-       if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+       if ((unlikely(tx->num_desc == tx->desc_limit))) {
                rval = _extend_sdma_tx_descs(dd, tx);
                if (rval) {
                        __sdma_txclean(dd, tx);
index 8fb752f2eda2999aed4f61bffcb53e105adde9a5..2cb4b96db7212163f1e207bb87dd0b1326ad26e1 100644 (file)
@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
 #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES                                0x050b
 #define IRDMA_AE_LLP_DOUBT_REACHABILITY                                        0x050c
 #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED                            0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS                                     0x050f
 #define IRDMA_AE_RESOURCE_EXHAUSTION                                   0x0520
 #define IRDMA_AE_RESET_SENT                                            0x0601
 #define IRDMA_AE_TERMINATE_SENT                                                0x0602
index bd4b2b89644442341226e6c5716f5ddb221ea1a1..ad50b77282f8a1b5352e390080d208d0086152eb 100644 (file)
@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
                case IRDMA_AE_LLP_TOO_MANY_RETRIES:
                case IRDMA_AE_LCE_QP_CATASTROPHIC:
                case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+               case IRDMA_AE_LLP_TOO_MANY_RNRS:
                case IRDMA_AE_LCE_CQ_CATASTROPHIC:
                case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
                default:
@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
        dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
        irq_update_affinity_hint(msix_vec->irq, NULL);
        free_irq(msix_vec->irq, dev_id);
+       if (rf == dev_id) {
+               tasklet_kill(&rf->dpc_tasklet);
+       } else {
+               struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+               tasklet_kill(&iwceq->dpc_tasklet);
+       }
 }
 
 /**
index b5eb8d421988c1abd73cf4eb3a93adc6f2944089..0b046c061742be140251785f60ac25cff73aa2ba 100644 (file)
@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
 
        if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
            init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
-           init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+           init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+           init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+           init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
                return -EINVAL;
 
        if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
                info.cq_base_pa = iwcq->kmem.pa;
        }
 
-       if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
-               info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
-                                                (u32)IRDMA_MAX_CQ_READ_THRESH);
+       info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+                                        (u32)IRDMA_MAX_CQ_READ_THRESH);
 
        if (irdma_sc_cq_init(cq, &info)) {
                ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
index f87531318feb807c7c5a216c991e10f197e9f8f4..a78a067e3ce7f3abd260c09f552562050b7b78cc 100644 (file)
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
        dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
 
        for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+               if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+                    i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+                       if (!MLX5_CAP_GEN(mdev, roce) ||
+                           !MLX5_CAP_ROCE(mdev, roce_cc_general))
+                               continue;
+
                dbg_cc_params->params[i].offset = i;
                dbg_cc_params->params[i].dev = dev;
                dbg_cc_params->params[i].port_num = port_num;
index 869369cb5b5fa4745aaca7bc5eb7032e684bb132..253fea374a72de1d1143b82601da2ce9caf1cf1f 100644 (file)
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
        MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
        UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
                        UVERBS_IDR_ANY_OBJECT,
-                       UVERBS_ACCESS_WRITE,
+                       UVERBS_ACCESS_READ,
                        UA_MANDATORY),
        UVERBS_ATTR_PTR_IN(
                MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
index df1d1b0a3ef72bfc938c6cb61b5589e5ef7b7ff4..9947feb7fb8a0bcd1ecf9e5d136e9ea7e326e8e7 100644 (file)
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
                 */
                copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
                               left);
-               memcpy(eseg->inline_hdr.start, pdata, copysz);
+               memcpy(eseg->inline_hdr.data, pdata, copysz);
                stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
                               sizeof(eseg->inline_hdr.start) + copysz, 16);
                *size += stride / 16;
index 7887a6786ed43d6917a97b2dfbd8770c49383fbd..f118ce0a9a617b4226d0195048299827f2a11d37 100644 (file)
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
                /* RQ - read access only (0) */
                rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
                                          ureq.rq_len, true, 0, alloc_and_init);
-               if (rc)
+               if (rc) {
+                       ib_umem_release(qp->usq.umem);
+                       qp->usq.umem = NULL;
+                       if (rdma_protocol_roce(&dev->ibdev, 1)) {
+                               qedr_free_pbl(dev, &qp->usq.pbl_info,
+                                             qp->usq.pbl_tbl);
+                       } else {
+                               kfree(qp->usq.pbl_tbl);
+                       }
                        return rc;
+               }
        }
 
        memset(&in_params, 0, sizeof(in_params));
index 58f70cfec45a72abd8df2ba88098a92f7fcacb4a..040234c01be4d5a0cc6fb4a4124af4752f58e181 100644 (file)
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
 MODULE_PARM_DESC(srpt_srq_size,
                 "Shared receive queue (SRQ) size.");
 
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
+{
+       return kstrtou64(buffer, 16, (u64 *)kp->arg);
+}
 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
 {
        return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
 }
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
-                 0444);
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
+                 &srpt_service_guid, 0444);
 MODULE_PARM_DESC(srpt_service_guid,
                 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
 
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
 /**
  * srpt_qp_event - QP event callback function
  * @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
  */
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
 {
+       struct srpt_rdma_ch *ch = ptr;
+
        pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
                 event->event, ch, ch->sess_name, ch->qp->qp_num,
                 get_ch_state_name(ch->state));
@@ -1807,8 +1813,7 @@ retry:
        ch->cq_size = ch->rq_size + sq_size;
 
        qp_init->qp_context = (void *)ch;
-       qp_init->event_handler
-               = (void(*)(struct ib_event *, void*))srpt_qp_event;
+       qp_init->event_handler = srpt_qp_event;
        qp_init->send_cq = ch->cq;
        qp_init->recv_cq = ch->cq;
        qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
index 7c4b2a5cc1b54a1c98a92b38076df6a7b0424b49..14c828adebf7829269b7bace9b1bcbda0c7c506c 100644 (file)
@@ -130,7 +130,12 @@ static const struct xpad_device {
        { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
        { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+       { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 },                  /* wired */
+       { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 },                  /* wireless */
        { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+       { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
+       { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },         /* v2 */
+       { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -463,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
        { USB_INTERFACE_INFO('X', 'B', 0) },    /* Xbox USB-IF not-approved class */
        XPAD_XBOX360_VENDOR(0x0079),            /* GPD Win 2 controller */
        XPAD_XBOX360_VENDOR(0x03eb),            /* Wooting Keyboards (Legacy) */
+       XPAD_XBOX360_VENDOR(0x03f0),            /* HP HyperX Xbox 360 controllers */
        XPAD_XBOXONE_VENDOR(0x03f0),            /* HP HyperX Xbox One controllers */
        XPAD_XBOX360_VENDOR(0x044f),            /* Thrustmaster Xbox 360 controllers */
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft Xbox 360 controllers */
index ba00ecfbd343bc796fd48f77db92659a1c36bccc..b41fd1240f4312e06935685d00aded64076c3513 100644 (file)
@@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
 
                        error = devm_gpio_request_one(dev, button->gpio,
                                        flags, button->desc ? : DRV_NAME);
-                       if (error) {
-                               dev_err(dev,
-                                       "unable to claim gpio %u, err=%d\n",
-                                       button->gpio, error);
-                               return error;
-                       }
+                       if (error)
+                               return dev_err_probe(dev, error,
+                                                    "unable to claim gpio %u\n",
+                                                    button->gpio);
 
                        bdata->gpiod = gpio_to_desc(button->gpio);
                        if (!bdata->gpiod) {
index 953992b458e9f2c46900204e926da7c665468709..ca150618d32f1863795f390b4ebf4687ea0e36c1 100644 (file)
@@ -19,7 +19,6 @@
  * Copyright (C) 2006     Nicolas Boichat (nicolas@boichat.ch)
  */
 
-#include "linux/usb.h"
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -194,8 +193,6 @@ enum tp_type {
 
 /* list of device capability bits */
 #define HAS_INTEGRATED_BUTTON  1
-/* maximum number of supported endpoints (currently trackpad and button) */
-#define MAX_ENDPOINTS  2
 
 /* trackpad finger data block size */
 #define FSIZE_TYPE1            (14 * sizeof(__le16))
@@ -894,18 +891,6 @@ static int bcm5974_resume(struct usb_interface *iface)
        return error;
 }
 
-static bool bcm5974_check_endpoints(struct usb_interface *iface,
-                                   const struct bcm5974_config *cfg)
-{
-       u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
-
-       ep_addr[0] = cfg->tp_ep;
-       if (cfg->tp_type == TYPE1)
-               ep_addr[1] = cfg->bt_ep;
-
-       return usb_check_int_endpoints(iface, ep_addr);
-}
-
 static int bcm5974_probe(struct usb_interface *iface,
                         const struct usb_device_id *id)
 {
@@ -918,11 +903,6 @@ static int bcm5974_probe(struct usb_interface *iface,
        /* find the product index */
        cfg = bcm5974_get_config(udev);
 
-       if (!bcm5974_check_endpoints(iface, cfg)) {
-               dev_err(&iface->dev, "Unexpected non-int endpoint\n");
-               return -ENODEV;
-       }
-
        /* allocate memory for our device state and initialize it */
        dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
        input_dev = input_allocate_device();
index 258d5fe3d395c4670088aa0d736cac69c7d24550..42eaebb3bf5cc82efabccff777a8ee23b016bf49 100644 (file)
@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
 
        rmi_disable_irq(rmi_dev, false);
 
-       irq_domain_remove(data->irqdomain);
-       data->irqdomain = NULL;
-
        rmi_f34_remove_sysfs(rmi_dev);
        rmi_free_function_list(rmi_dev);
 
+       irq_domain_remove(data->irqdomain);
+       data->irqdomain = NULL;
+
        return 0;
 }
 
index 05722121f00e70689680ce7a45cc5e953f50210b..4a27fbdb2d8446cb6af2b0e287580615c7da47c1 100644 (file)
@@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
                          struct mm_struct *mm)
 {
        int ret;
-       unsigned long flags;
        struct arm_smmu_ctx_desc *cd;
        struct arm_smmu_mmu_notifier *smmu_mn;
-       struct arm_smmu_master *master;
 
        list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
                if (smmu_mn->mn.mm == mm) {
@@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
                goto err_free_cd;
        }
 
-       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
-       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
-               ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
-                                             cd);
-               if (ret) {
-                       list_for_each_entry_from_reverse(
-                               master, &smmu_domain->devices, domain_head)
-                               arm_smmu_write_ctx_desc(
-                                       master, mm_get_enqcmd_pasid(mm), NULL);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-       if (ret)
-               goto err_put_notifier;
-
        list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
        return smmu_mn;
 
-err_put_notifier:
-       /* Frees smmu_mn */
-       mmu_notifier_put(&smmu_mn->mn);
 err_free_cd:
        arm_smmu_free_shared_cd(cd);
        return ERR_PTR(ret);
@@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
 
        list_del(&smmu_mn->list);
 
-       arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
-                                        NULL);
-
        /*
         * If we went through clear(), we've already invalidated, and no
         * new TLB entry can have been formed.
@@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
        arm_smmu_free_shared_cd(cd);
 }
 
-static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
+                              struct mm_struct *mm)
 {
        int ret;
        struct arm_smmu_bond *bond;
@@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
                goto err_free_bond;
        }
 
+       ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
+       if (ret)
+               goto err_put_notifier;
+
        list_add(&bond->list, &master->bonds);
        return 0;
 
+err_put_notifier:
+       arm_smmu_mmu_notifier_put(bond->smmu_mn);
 err_free_bond:
        kfree(bond);
        return ret;
@@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
        struct arm_smmu_master *master = dev_iommu_priv_get(dev);
 
        mutex_lock(&sva_lock);
+
+       arm_smmu_write_ctx_desc(master, id, NULL);
+
        list_for_each_entry(t, &master->bonds, list) {
                if (t->mm == mm) {
                        bond = t;
@@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
        struct mm_struct *mm = domain->mm;
 
        mutex_lock(&sva_lock);
-       ret = __arm_smmu_sva_bind(dev, mm);
+       ret = __arm_smmu_sva_bind(dev, id, mm);
        mutex_unlock(&sva_lock);
 
        return ret;
index 68b6bc5e7c71016b8d58a6a077e921b27fb51447..6317aaf7b3ab1c7bed6f5f33b9a4bdca14cc171e 100644 (file)
@@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
        arm_smmu_rpm_put(smmu);
 }
 
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
        struct arm_smmu_domain *smmu_domain;
 
+       if (type != IOMMU_DOMAIN_UNMANAGED) {
+               if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+                       return NULL;
+       }
        /*
         * Allocate the domain and initialise some of its data structures.
         * We can't really do anything meaningful until we've added a
@@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
        mutex_init(&smmu_domain->init_mutex);
        spin_lock_init(&smmu_domain->cb_lock);
 
-       if (dev) {
-               struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
-
-               if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
-                       kfree(smmu_domain);
-                       return NULL;
-               }
-       }
-
        return &smmu_domain->domain;
 }
 
@@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
        .identity_domain        = &arm_smmu_identity_domain,
        .blocked_domain         = &arm_smmu_blocked_domain,
        .capable                = arm_smmu_capable,
-       .domain_alloc_paging    = arm_smmu_domain_alloc_paging,
+       .domain_alloc           = arm_smmu_domain_alloc,
        .probe_device           = arm_smmu_probe_device,
        .release_device         = arm_smmu_release_device,
        .probe_finalize         = arm_smmu_probe_finalize,
index 6fb5f6fceea11fb7865d92d8451a5de98a655556..11652e0bcab3a6e3113c70fb80971853df012f57 100644 (file)
@@ -396,8 +396,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
        return nid;
 }
 
-static void domain_update_iotlb(struct dmar_domain *domain);
-
 /* Return the super pagesize bitmap if supported. */
 static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
 {
@@ -1218,7 +1216,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
        return NULL;
 }
 
-static void domain_update_iotlb(struct dmar_domain *domain)
+void domain_update_iotlb(struct dmar_domain *domain)
 {
        struct dev_pasid_info *dev_pasid;
        struct device_domain_info *info;
@@ -1368,6 +1366,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
        spin_unlock_irqrestore(&domain->lock, flags);
 }
 
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+                                   unsigned long pfn, unsigned int pages,
+                                   int ih)
+{
+       unsigned int aligned_pages = __roundup_pow_of_two(pages);
+       unsigned long bitmask = aligned_pages - 1;
+       unsigned int mask = ilog2(aligned_pages);
+       u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+       /*
+        * PSI masks the low order bits of the base address. If the
+        * address isn't aligned to the mask, then compute a mask value
+        * needed to ensure the target range is flushed.
+        */
+       if (unlikely(bitmask & pfn)) {
+               unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+               /*
+                * Since end_pfn <= pfn + bitmask, the only way bits
+                * higher than bitmask can differ in pfn and end_pfn is
+                * by carrying. This means after masking out bitmask,
+                * high bits starting with the first set bit in
+                * shared_bits are all equal in both pfn and end_pfn.
+                */
+               shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+               mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+       }
+
+       /*
+        * Fallback to domain selective flush if no PSI support or
+        * the size is too big.
+        */
+       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+               iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                        DMA_TLB_DSI_FLUSH);
+       else
+               iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+                                        DMA_TLB_PSI_FLUSH);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
@@ -1384,42 +1422,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
        if (ih)
                ih = 1 << 6;
 
-       if (domain->use_first_level) {
+       if (domain->use_first_level)
                domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
-       } else {
-               unsigned long bitmask = aligned_pages - 1;
-
-               /*
-                * PSI masks the low order bits of the base address. If the
-                * address isn't aligned to the mask, then compute a mask value
-                * needed to ensure the target range is flushed.
-                */
-               if (unlikely(bitmask & pfn)) {
-                       unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
-                       /*
-                        * Since end_pfn <= pfn + bitmask, the only way bits
-                        * higher than bitmask can differ in pfn and end_pfn is
-                        * by carrying. This means after masking out bitmask,
-                        * high bits starting with the first set bit in
-                        * shared_bits are all equal in both pfn and end_pfn.
-                        */
-                       shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
-                       mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
-               }
-
-               /*
-                * Fallback to domain selective flush if no PSI support or
-                * the size is too big.
-                */
-               if (!cap_pgsel_inv(iommu->cap) ||
-                   mask > cap_max_amask_val(iommu->cap))
-                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                                       DMA_TLB_DSI_FLUSH);
-               else
-                       iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-                                                       DMA_TLB_PSI_FLUSH);
-       }
+       else
+               __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
 
        /*
         * In caching mode, changes of pages from non-present to present require
@@ -1443,6 +1449,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
                iommu_flush_write_buffer(iommu);
 }
 
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+                               unsigned long pfn,
+                               unsigned long pages, int ih)
+{
+       struct dmar_domain *s1_domain;
+
+       spin_lock(&domain->s1_lock);
+       list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+               struct device_domain_info *device_info;
+               struct iommu_domain_info *info;
+               unsigned long flags;
+               unsigned long i;
+
+               xa_for_each(&s1_domain->iommu_array, i, info)
+                       __iommu_flush_iotlb_psi(info->iommu, info->did,
+                                               pfn, pages, ih);
+
+               if (!s1_domain->has_iotlb_device)
+                       continue;
+
+               spin_lock_irqsave(&s1_domain->lock, flags);
+               list_for_each_entry(device_info, &s1_domain->devices, link)
+                       /*
+                        * Address translation cache in device side caches the
+                        * result of nested translation. There is no easy way
+                        * to identify the exact set of nested translations
+                        * affected by a change in S2. So just flush the entire
+                        * device cache.
+                        */
+                       __iommu_flush_dev_iotlb(device_info, 0,
+                                               MAX_AGAW_PFN_WIDTH);
+               spin_unlock_irqrestore(&s1_domain->lock, flags);
+       }
+       spin_unlock(&domain->s1_lock);
+}
+
 static void intel_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1462,6 +1508,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
                if (!cap_caching_mode(iommu->cap))
                        iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
        }
+
+       if (dmar_domain->nested_parent)
+               parent_domain_flush(dmar_domain, 0, -1, 0);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1985,6 +2034,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
                                iommu_flush_iotlb_psi(info->iommu, domain,
                                                      start_pfn, lvl_pages,
                                                      0, 0);
+                       if (domain->nested_parent)
+                               parent_domain_flush(domain, start_pfn,
+                                                   lvl_pages, 0);
                }
 
                pte++;
@@ -3883,6 +3935,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
        bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
        bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
        struct intel_iommu *iommu = info->iommu;
+       struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
 
        /* Must be NESTING domain */
@@ -3908,11 +3961,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
        if (!domain)
                return ERR_PTR(-ENOMEM);
 
-       if (nested_parent)
-               to_dmar_domain(domain)->nested_parent = true;
+       dmar_domain = to_dmar_domain(domain);
+
+       if (nested_parent) {
+               dmar_domain->nested_parent = true;
+               INIT_LIST_HEAD(&dmar_domain->s1_domains);
+               spin_lock_init(&dmar_domain->s1_lock);
+       }
 
        if (dirty_tracking) {
-               if (to_dmar_domain(domain)->use_first_level) {
+               if (dmar_domain->use_first_level) {
                        iommu_domain_free(domain);
                        return ERR_PTR(-EOPNOTSUPP);
                }
@@ -3924,8 +3982,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+       WARN_ON(dmar_domain->nested_parent &&
+               !list_empty(&dmar_domain->s1_domains));
        if (domain != &si_domain->domain)
-               domain_exit(to_dmar_domain(domain));
+               domain_exit(dmar_domain);
 }
 
 int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -4107,6 +4169,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
                                      start_pfn, nrpages,
                                      list_empty(&gather->freelist), 0);
 
+       if (dmar_domain->nested_parent)
+               parent_domain_flush(dmar_domain, start_pfn, nrpages,
+                                   list_empty(&gather->freelist));
        put_pages_list(&gather->freelist);
 }
 
@@ -4664,21 +4729,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
        return vtd;
 }
 
+/*
+ * Set dirty tracking for the device list of a domain. The caller must
+ * hold the domain->lock when calling it.
+ */
+static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+{
+       struct device_domain_info *info;
+       int ret = 0;
+
+       list_for_each_entry(info, devices, link) {
+               ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
+                                                      IOMMU_NO_PASID, enable);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
+                                           bool enable)
+{
+       struct dmar_domain *s1_domain;
+       unsigned long flags;
+       int ret;
+
+       spin_lock(&domain->s1_lock);
+       list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+               spin_lock_irqsave(&s1_domain->lock, flags);
+               ret = device_set_dirty_tracking(&s1_domain->devices, enable);
+               spin_unlock_irqrestore(&s1_domain->lock, flags);
+               if (ret)
+                       goto err_unwind;
+       }
+       spin_unlock(&domain->s1_lock);
+       return 0;
+
+err_unwind:
+       list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+               spin_lock_irqsave(&s1_domain->lock, flags);
+               device_set_dirty_tracking(&s1_domain->devices,
+                                         domain->dirty_tracking);
+               spin_unlock_irqrestore(&s1_domain->lock, flags);
+       }
+       spin_unlock(&domain->s1_lock);
+       return ret;
+}
+
 static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
                                          bool enable)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
-       struct device_domain_info *info;
        int ret;
 
        spin_lock(&dmar_domain->lock);
        if (dmar_domain->dirty_tracking == enable)
                goto out_unlock;
 
-       list_for_each_entry(info, &dmar_domain->devices, link) {
-               ret = intel_pasid_setup_dirty_tracking(info->iommu,
-                                                      info->domain, info->dev,
-                                                      IOMMU_NO_PASID, enable);
+       ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
+       if (ret)
+               goto err_unwind;
+
+       if (dmar_domain->nested_parent) {
+               ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
                if (ret)
                        goto err_unwind;
        }
@@ -4690,10 +4804,8 @@ out_unlock:
        return 0;
 
 err_unwind:
-       list_for_each_entry(info, &dmar_domain->devices, link)
-               intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
-                                                info->dev, IOMMU_NO_PASID,
-                                                dmar_domain->dirty_tracking);
+       device_set_dirty_tracking(&dmar_domain->devices,
+                                 dmar_domain->dirty_tracking);
        spin_unlock(&dmar_domain->lock);
        return ret;
 }
index d02f916d8e59a914d2441fa2b81af9ac31dfbf86..4145c04cb1c6818fea0ce420d31c41acec8836a3 100644 (file)
@@ -627,6 +627,10 @@ struct dmar_domain {
                        int             agaw;
                        /* maximum mapped address */
                        u64             max_addr;
+                       /* Protect the s1_domains list */
+                       spinlock_t      s1_lock;
+                       /* Track s1_domains nested on this domain */
+                       struct list_head s1_domains;
                };
 
                /* Nested user domain */
@@ -637,6 +641,8 @@ struct dmar_domain {
                        unsigned long s1_pgtbl;
                        /* page table attributes */
                        struct iommu_hwpt_vtd_s1 s1_cfg;
+                       /* link to parent domain siblings */
+                       struct list_head s2_link;
                };
        };
 
@@ -1060,6 +1066,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
  */
 #define QI_OPT_WAIT_DRAIN              BIT(0)
 
+void domain_update_iotlb(struct dmar_domain *domain);
 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
 void device_block_translation(struct device *dev);
index f26c7f1c46ccaf43b0a4db5209b5c85b484277ed..a7d68f3d518acd9fc5af6f03ebbf71c825a4afcc 100644 (file)
@@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
        list_add(&info->link, &dmar_domain->devices);
        spin_unlock_irqrestore(&dmar_domain->lock, flags);
 
+       domain_update_iotlb(dmar_domain);
+
        return 0;
 }
 
 static void intel_nested_domain_free(struct iommu_domain *domain)
 {
-       kfree(to_dmar_domain(domain));
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct dmar_domain *s2_domain = dmar_domain->s2_domain;
+
+       spin_lock(&s2_domain->s1_lock);
+       list_del(&dmar_domain->s2_link);
+       spin_unlock(&s2_domain->s1_lock);
+       kfree(dmar_domain);
 }
 
 static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
 }
 
 static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
-                                    unsigned long npages, bool ih)
+                                    u64 npages, bool ih)
 {
        struct iommu_domain_info *info;
        unsigned int mask;
@@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
        spin_lock_init(&domain->lock);
        xa_init(&domain->iommu_array);
 
+       spin_lock(&s2_domain->s1_lock);
+       list_add(&domain->s2_link, &s2_domain->s1_domains);
+       spin_unlock(&s2_domain->s1_lock);
+
        return &domain->domain;
 }
index 3239cefa4c337897dda048ebec7aeb1fc075a955..108158e2b907d0744467d88e8ec35b419185555b 100644 (file)
@@ -428,7 +428,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
  * Set up dirty tracking on a second only or nested translation type.
  */
 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
-                                    struct dmar_domain *domain,
                                     struct device *dev, u32 pasid,
                                     bool enabled)
 {
@@ -445,7 +444,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
                return -ENODEV;
        }
 
-       did = domain_id_iommu(domain, iommu);
+       did = pasid_get_domain_id(pte);
        pgtt = pasid_pte_get_pgtt(pte);
        if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
            pgtt != PASID_ENTRY_PGTT_NESTED) {
@@ -658,6 +657,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
        pasid_set_domain_id(pte, did);
        pasid_set_address_width(pte, s2_domain->agaw);
        pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+       if (s2_domain->dirty_tracking)
+               pasid_set_ssade(pte);
        pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
        pasid_set_present(pte);
        spin_unlock(&iommu->lock);
index 8d40d4c66e3198a7ce90c83168a3f86491d79f71..487ede039bdde5733ec1f6af0905ade24c806200 100644 (file)
@@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
                                   struct dmar_domain *domain,
                                   struct device *dev, u32 pasid);
 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
-                                    struct dmar_domain *domain,
                                     struct device *dev, u32 pasid,
                                     bool enabled);
 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
index c3fc9201d0be97e59395750cda0fc29940c0b844..65814cbc84020021df67d0b7dab9db2c61351b56 100644 (file)
@@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
        }
        iommu_mm->pasid = pasid;
        INIT_LIST_HEAD(&iommu_mm->sva_domains);
+       INIT_LIST_HEAD(&iommu_mm->sva_handles);
        /*
         * Make sure the write to mm->iommu_mm is not reordered in front of
         * initialization to iommu_mm fields. If it does, readers may see a
@@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
                goto out_unlock;
        }
 
+       list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+               if (handle->dev == dev) {
+                       refcount_inc(&handle->users);
+                       mutex_unlock(&iommu_sva_lock);
+                       return handle;
+               }
+       }
+
        handle = kzalloc(sizeof(*handle), GFP_KERNEL);
        if (!handle) {
                ret = -ENOMEM;
@@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
        list_add(&domain->next, &mm->iommu_mm->sva_domains);
 
 out:
+       refcount_set(&handle->users, 1);
+       list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
        mutex_unlock(&iommu_sva_lock);
        handle->dev = dev;
        handle->domain = domain;
@@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
        struct device *dev = handle->dev;
 
        mutex_lock(&iommu_sva_lock);
+       if (!refcount_dec_and_test(&handle->users)) {
+               mutex_unlock(&iommu_sva_lock);
+               return;
+       }
+       list_del(&handle->handle_item);
+
        iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
        if (--domain->users == 0) {
                list_del(&domain->next);
index 3f3f1fa1a0a946a43eb48ee324ab4979683bb566..33d142f8057d70a77f44e842afdd84b1bee0a970 100644 (file)
@@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
 
        if (cmd->__reserved)
                return -EOPNOTSUPP;
-       if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
+       if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
+           (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
                return -EINVAL;
 
        idev = iommufd_get_device(ucmd, cmd->dev_id);
index 504ac1b01b2d2ab45fbc22fde2bdcf324ce2d973..05fd9d3abf1b809614cced9e9387679797866103 100644 (file)
@@ -1330,20 +1330,23 @@ out_unlock:
 
 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
 {
+       u32 new_id;
        int rc;
 
        down_write(&iopt->domains_rwsem);
        down_write(&iopt->iova_rwsem);
-       rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
-                     xa_limit_16b, GFP_KERNEL_ACCOUNT);
+       rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
+                     GFP_KERNEL_ACCOUNT);
+
        if (rc)
                goto out_unlock;
 
        rc = iopt_calculate_iova_alignment(iopt);
        if (rc) {
-               xa_erase(&iopt->access_list, access->iopt_access_list_id);
+               xa_erase(&iopt->access_list, new_id);
                goto out_unlock;
        }
+       access->iopt_access_list_id = new_id;
 
 out_unlock:
        up_write(&iopt->iova_rwsem);
index 482d4059f5db6aed38ee8aa60f25b791f1e7556d..e854d3f672051b5223e0fec8af741abf03bbffbd 100644 (file)
@@ -45,6 +45,7 @@ enum {
 
 enum {
        MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+       MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
 };
 
 enum {
index 0a92c9eeaf7f50a6fe05c266b9ec39d1021844a9..db8c46bee1559ac46fb148d2474668b5a994ae15 100644 (file)
@@ -100,7 +100,7 @@ struct iova_bitmap {
        struct iova_bitmap_map mapped;
 
        /* userspace address of the bitmap */
-       u64 __user *bitmap;
+       u8 __user *bitmap;
 
        /* u64 index that @mapped points to */
        unsigned long mapped_base_index;
@@ -113,6 +113,9 @@ struct iova_bitmap {
 
        /* length of the IOVA range for the whole bitmap */
        size_t length;
+
+       /* length of the IOVA range set ahead the pinned pages */
+       unsigned long set_ahead_length;
 };
 
 /*
@@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
 {
        struct iova_bitmap_map *mapped = &bitmap->mapped;
        unsigned long npages;
-       u64 __user *addr;
+       u8 __user *addr;
        long ret;
 
        /*
@@ -175,18 +178,19 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
                               bitmap->mapped_base_index) *
                               sizeof(*bitmap->bitmap), PAGE_SIZE);
 
-       /*
-        * We always cap at max number of 'struct page' a base page can fit.
-        * This is, for example, on x86 means 2M of bitmap data max.
-        */
-       npages = min(npages,  PAGE_SIZE / sizeof(struct page *));
-
        /*
         * Bitmap address to be pinned is calculated via pointer arithmetic
         * with bitmap u64 word index.
         */
        addr = bitmap->bitmap + bitmap->mapped_base_index;
 
+       /*
+        * We always cap at max number of 'struct page' a base page can fit.
+        * This is, for example, on x86 means 2M of bitmap data max.
+        */
+       npages = min(npages + !!offset_in_page(addr),
+                    PAGE_SIZE / sizeof(struct page *));
+
        ret = pin_user_pages_fast((unsigned long)addr, npages,
                                  FOLL_WRITE, mapped->pages);
        if (ret <= 0)
@@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
 
        mapped = &bitmap->mapped;
        mapped->pgshift = __ffs(page_size);
-       bitmap->bitmap = data;
+       bitmap->bitmap = (u8 __user *)data;
        bitmap->mapped_total_index =
                iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
        bitmap->iova = iova;
@@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
 
        remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
        remaining = min_t(unsigned long, remaining,
-                         bytes / sizeof(*bitmap->bitmap));
+                         DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
 
        return remaining;
 }
@@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
        return bitmap->mapped_base_index >= bitmap->mapped_total_index;
 }
 
+static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
+                                size_t set_ahead_length)
+{
+       int ret = 0;
+
+       while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
+               unsigned long length = iova_bitmap_mapped_length(bitmap);
+               unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+
+               ret = iova_bitmap_get(bitmap);
+               if (ret)
+                       break;
+
+               length = min(length, set_ahead_length);
+               iova_bitmap_set(bitmap, iova, length);
+
+               set_ahead_length -= length;
+               bitmap->mapped_base_index +=
+                       iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+               iova_bitmap_put(bitmap);
+       }
+
+       bitmap->set_ahead_length = 0;
+       return ret;
+}
+
 /*
  * Advances to the next range, releases the current pinned
  * pages and pins the next set of bitmap pages.
@@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
        if (iova_bitmap_done(bitmap))
                return 0;
 
+       /* Iterate, set and skip any bits requested for next iteration */
+       if (bitmap->set_ahead_length) {
+               int ret;
+
+               ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
+               if (ret)
+                       return ret;
+       }
+
        /* When advancing the index we pin the next set of bitmap pages */
        return iova_bitmap_get(bitmap);
 }
@@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
                        mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
        unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
                        mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+       unsigned long last_page_idx = mapped->npages - 1;
 
        do {
                unsigned int page_idx = cur_bit / BITS_PER_PAGE;
@@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
                                         last_bit - cur_bit + 1);
                void *kaddr;
 
+               if (unlikely(page_idx > last_page_idx))
+                       break;
+
                kaddr = kmap_local_page(mapped->pages[page_idx]);
                bitmap_set(kaddr, offset, nbits);
                kunmap_local(kaddr);
                cur_bit += nbits;
        } while (cur_bit <= last_bit);
+
+       if (unlikely(cur_bit <= last_bit)) {
+               bitmap->set_ahead_length =
+                       ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
+       }
 }
 EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
index d9e9920c7eba413eaf25b7840eefdf36a3999a9e..7a2199470f3121da91e060bca82315a6944e37b8 100644 (file)
@@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = {
        },
 };
 
-static atomic_t mock_dev_num;
+static DEFINE_IDA(mock_dev_ida);
 
 enum {
        MOCK_DIRTY_TRACK = 1,
        MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+       MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
 
        /*
         * Like a real page table alignment requires the low bits of the address
@@ -53,6 +54,7 @@ enum {
        MOCK_PFN_START_IOVA = _MOCK_PFN_START,
        MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
        MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+       MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
 };
 
 /*
@@ -61,8 +63,8 @@ enum {
  * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
  * value. This has a much smaller randomization space and syzkaller can hit it.
  */
-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
-                                               u64 *iova)
+static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+                                                 u64 *iova)
 {
        struct syz_layout {
                __u32 nth_area;
@@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
        return 0;
 }
 
+static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
+                                               u64 *iova)
+{
+       unsigned long ret;
+
+       mutex_lock(&access->ioas_lock);
+       if (!access->ioas) {
+               mutex_unlock(&access->ioas_lock);
+               return 0;
+       }
+       ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
+       mutex_unlock(&access->ioas_lock);
+       return ret;
+}
+
 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
                                   unsigned int ioas_id, u64 *iova, u32 *flags)
 {
@@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
        ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
        if (IS_ERR(ioas))
                return;
-       *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+       *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
        iommufd_put_object(ucmd->ictx, &ioas->obj);
 }
 
@@ -121,6 +138,7 @@ enum selftest_obj_type {
 struct mock_dev {
        struct device dev;
        unsigned long flags;
+       int id;
 };
 
 struct selftest_obj {
@@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
        return 0;
 }
 
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+                                     unsigned long iova, size_t page_size,
+                                     unsigned long flags)
+{
+       unsigned long cur, end = iova + page_size - 1;
+       bool dirty = false;
+       void *ent, *old;
+
+       for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+               ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+               if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+                       continue;
+
+               dirty = true;
+               /* Clear dirty */
+               if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+                       unsigned long val;
+
+                       val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+                       old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+                                      xa_mk_value(val), GFP_KERNEL);
+                       WARN_ON_ONCE(ent != old);
+               }
+       }
+
+       return dirty;
+}
+
 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
                                            unsigned long iova, size_t size,
                                            unsigned long flags,
@@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
 {
        struct mock_iommu_domain *mock =
                container_of(domain, struct mock_iommu_domain, domain);
-       unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
-       void *ent, *old;
+       unsigned long end = iova + size;
+       void *ent;
 
        if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
                return -EINVAL;
 
-       for (i = 0; i < max; i++) {
-               unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+       do {
+               unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+               unsigned long head;
 
-               ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
-               if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
-                       /* Clear dirty */
-                       if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
-                               unsigned long val;
-
-                               val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
-                               old = xa_store(&mock->pfns,
-                                              cur / MOCK_IO_PAGE_SIZE,
-                                              xa_mk_value(val), GFP_KERNEL);
-                               WARN_ON_ONCE(ent != old);
-                       }
-                       iommu_dirty_bitmap_record(dirty, cur,
-                                                 MOCK_IO_PAGE_SIZE);
+               ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+               if (!ent) {
+                       iova += pgsize;
+                       continue;
                }
-       }
+
+               if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+                       pgsize = MOCK_HUGE_PAGE_SIZE;
+               head = iova & ~(pgsize - 1);
+
+               /* Clear dirty */
+               if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+                       iommu_dirty_bitmap_record(dirty, head, pgsize);
+               iova = head + pgsize;
+       } while (iova < end);
 
        return 0;
 }
@@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
 
 static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
 {
+       struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
        struct mock_iommu_domain *mock;
 
        mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
        mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
        mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
        mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+       if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+               mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
        mock->domain.ops = mock_ops.default_domain_ops;
        mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
        xa_init(&mock->pfns);
@@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
                        return ERR_PTR(-EOPNOTSUPP);
                if (user_data || (has_dirty_flag && no_dirty_ops))
                        return ERR_PTR(-EOPNOTSUPP);
-               domain = mock_domain_alloc_paging(NULL);
+               domain = mock_domain_alloc_paging(dev);
                if (!domain)
                        return ERR_PTR(-ENOMEM);
                if (has_dirty_flag)
@@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
 
                        if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
                                flags = MOCK_PFN_LAST_IOVA;
+                       if (pgsize != MOCK_IO_PAGE_SIZE) {
+                               flags |= MOCK_PFN_HUGE_IOVA;
+                       }
                        old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
                                       xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
                                                   flags),
@@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 
                        /*
                         * iommufd generates unmaps that must be a strict
-                        * superset of the map's performend So every starting
-                        * IOVA should have been an iova passed to map, and the
+                        * superset of the map's performend So every
+                        * starting/ending IOVA should have been an iova passed
+                        * to map.
                         *
-                        * First IOVA must be present and have been a first IOVA
-                        * passed to map_pages
+                        * This simple logic doesn't work when the HUGE_PAGE is
+                        * turned on since the core code will automatically
+                        * switch between the two page sizes creating a break in
+                        * the unmap calls. The break can land in the middle of
+                        * contiguous IOVA.
                         */
-                       if (first) {
-                               WARN_ON(ent && !(xa_to_value(ent) &
-                                                MOCK_PFN_START_IOVA));
-                               first = false;
+                       if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+                               if (first) {
+                                       WARN_ON(ent && !(xa_to_value(ent) &
+                                                        MOCK_PFN_START_IOVA));
+                                       first = false;
+                               }
+                               if (pgcount == 1 &&
+                                   cur + MOCK_IO_PAGE_SIZE == pgsize)
+                                       WARN_ON(ent && !(xa_to_value(ent) &
+                                                        MOCK_PFN_LAST_IOVA));
                        }
-                       if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
-                               WARN_ON(ent && !(xa_to_value(ent) &
-                                                MOCK_PFN_LAST_IOVA));
 
                        iova += MOCK_IO_PAGE_SIZE;
                        ret += MOCK_IO_PAGE_SIZE;
@@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev)
 {
        struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
 
-       atomic_dec(&mock_dev_num);
+       ida_free(&mock_dev_ida, mdev->id);
        kfree(mdev);
 }
 
@@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
        struct mock_dev *mdev;
        int rc;
 
-       if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+       if (dev_flags &
+           ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
                return ERR_PTR(-EINVAL);
 
        mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
        mdev->dev.release = mock_dev_release;
        mdev->dev.bus = &iommufd_mock_bus_type.bus;
 
-       rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
-                         atomic_inc_return(&mock_dev_num));
+       rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+       if (rc < 0)
+               goto err_put;
+       mdev->id = rc;
+
+       rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
        if (rc)
                goto err_put;
 
@@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
        }
 
        if (flags & MOCK_FLAGS_ACCESS_SYZ)
-               iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+               iova = iommufd_test_syz_conv_iova(staccess->access,
                                        &cmd->access_pages.iova);
 
        npages = (ALIGN(iova + length, PAGE_SIZE) -
@@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
        }
 
        if (flags & MOCK_FLAGS_ACCESS_SYZ)
-               iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
-                                       &cmd->access_rw.iova);
+               iova = iommufd_test_syz_conv_iova(staccess->access,
+                               &cmd->access_rw.iova);
 
        rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
        if (rc)
index 53abd4779914d6639f939ca87791ef812884f9a7..b822752c42617055e811f9e89bc2b3455bcc2eb8 100644 (file)
@@ -3181,6 +3181,7 @@ static void its_cpu_init_lpis(void)
        val |= GICR_CTLR_ENABLE_LPIS;
        writel_relaxed(val, rbase + GICR_CTLR);
 
+out:
        if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
                void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
 
@@ -3216,7 +3217,6 @@ static void its_cpu_init_lpis(void)
 
        /* Make sure the GIC has seen the above */
        dsb(sy);
-out:
        gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
        pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
                smp_processor_id(),
index 5101a3fb11df5bef53122db9db3c194669d754e7..58881d3139792074bf6ae1430a4de3760d3eb220 100644 (file)
@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
 static int mbigen_of_create_domain(struct platform_device *pdev,
                                   struct mbigen_device *mgn_chip)
 {
-       struct device *parent;
        struct platform_device *child;
        struct irq_domain *domain;
        struct device_node *np;
        u32 num_pins;
        int ret = 0;
 
-       parent = bus_get_dev_root(&platform_bus_type);
-       if (!parent)
-               return -ENODEV;
-
        for_each_child_of_node(pdev->dev.of_node, np) {
                if (!of_property_read_bool(np, "interrupt-controller"))
                        continue;
 
-               child = of_platform_device_create(np, NULL, parent);
+               child = of_platform_device_create(np, NULL, NULL);
                if (!child) {
                        ret = -ENOMEM;
                        break;
@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
                }
        }
 
-       put_device(parent);
        if (ret)
                of_node_put(np);
 
index 5b7bc4fd9517c8972680ad7a503eebf2ca47a518..bf0b40b0fad4b23d756a22a86c8e206a7155e858 100644 (file)
@@ -148,7 +148,13 @@ static void plic_irq_eoi(struct irq_data *d)
 {
        struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
 
-       writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+       if (unlikely(irqd_irq_disabled(d))) {
+               plic_toggle(handler, d->hwirq, 1);
+               writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+               plic_toggle(handler, d->hwirq, 0);
+       } else {
+               writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+       }
 }
 
 #ifdef CONFIG_SMP
index f745f85082434dca8fb6d6bf9efe30db79b2a81e..59445763e55a65de49e79cc2436c8a03131a5a15 100644 (file)
 struct convert_context {
        struct completion restart;
        struct bio *bio_in;
-       struct bio *bio_out;
        struct bvec_iter iter_in;
+       struct bio *bio_out;
        struct bvec_iter iter_out;
-       u64 cc_sector;
        atomic_t cc_pending;
+       u64 cc_sector;
        union {
                struct skcipher_request *req;
                struct aead_request *req_aead;
        } r;
+       bool aead_recheck;
+       bool aead_failed;
 
 };
 
@@ -82,6 +84,8 @@ struct dm_crypt_io {
        blk_status_t error;
        sector_t sector;
 
+       struct bvec_iter saved_bi_iter;
+
        struct rb_node rb_node;
 } CRYPTO_MINALIGN_ATTR;
 
@@ -1370,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
        if (r == -EBADMSG) {
                sector_t s = le64_to_cpu(*sector);
 
-               DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
-                           ctx->bio_in->bi_bdev, s);
-               dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
-                                ctx->bio_in, s, 0);
+               ctx->aead_failed = true;
+               if (ctx->aead_recheck) {
+                       DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+                                   ctx->bio_in->bi_bdev, s);
+                       dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+                                        ctx->bio_in, s, 0);
+               }
        }
 
        if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
@@ -1757,6 +1764,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->base_bio = bio;
        io->sector = sector;
        io->error = 0;
+       io->ctx.aead_recheck = false;
+       io->ctx.aead_failed = false;
        io->ctx.r.req = NULL;
        io->integrity_metadata = NULL;
        io->integrity_metadata_from_pool = false;
@@ -1768,6 +1777,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
        atomic_inc(&io->io_pending);
 }
 
+static void kcryptd_queue_read(struct dm_crypt_io *io);
+
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
@@ -1781,6 +1792,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        if (!atomic_dec_and_test(&io->io_pending))
                return;
 
+       if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
+           cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
+               io->ctx.aead_recheck = true;
+               io->ctx.aead_failed = false;
+               io->error = 0;
+               kcryptd_queue_read(io);
+               return;
+       }
+
        if (io->ctx.r.req)
                crypt_free_req(cc, io->ctx.r.req, base_bio);
 
@@ -1816,15 +1836,19 @@ static void crypt_endio(struct bio *clone)
        struct dm_crypt_io *io = clone->bi_private;
        struct crypt_config *cc = io->cc;
        unsigned int rw = bio_data_dir(clone);
-       blk_status_t error;
+       blk_status_t error = clone->bi_status;
+
+       if (io->ctx.aead_recheck && !error) {
+               kcryptd_queue_crypt(io);
+               return;
+       }
 
        /*
         * free the processed pages
         */
-       if (rw == WRITE)
+       if (rw == WRITE || io->ctx.aead_recheck)
                crypt_free_buffer_pages(cc, clone);
 
-       error = clone->bi_status;
        bio_put(clone);
 
        if (rw == READ && !error) {
@@ -1845,6 +1869,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        struct crypt_config *cc = io->cc;
        struct bio *clone;
 
+       if (io->ctx.aead_recheck) {
+               if (!(gfp & __GFP_DIRECT_RECLAIM))
+                       return 1;
+               crypt_inc_pending(io);
+               clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+               if (unlikely(!clone)) {
+                       crypt_dec_pending(io);
+                       return 1;
+               }
+               clone->bi_iter.bi_sector = cc->start + io->sector;
+               crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
+               io->saved_bi_iter = clone->bi_iter;
+               dm_submit_bio_remap(io->base_bio, clone);
+               return 0;
+       }
+
        /*
         * We need the original biovec array in order to decrypt the whole bio
         * data *afterwards* -- thanks to immutable biovecs we don't need to
@@ -2071,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        io->ctx.bio_out = clone;
        io->ctx.iter_out = clone->bi_iter;
 
+       if (crypt_integrity_aead(cc)) {
+               bio_copy_data(clone, io->base_bio);
+               io->ctx.bio_in = clone;
+               io->ctx.iter_in = clone->bi_iter;
+       }
+
        sector += bio_sectors(clone);
 
        crypt_inc_pending(io);
@@ -2107,6 +2153,14 @@ dec:
 
 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
 {
+       if (io->ctx.aead_recheck) {
+               if (!io->error) {
+                       io->ctx.bio_in->bi_iter = io->saved_bi_iter;
+                       bio_copy_data(io->base_bio, io->ctx.bio_in);
+               }
+               crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
+               bio_put(io->ctx.bio_in);
+       }
        crypt_dec_pending(io);
 }
 
@@ -2136,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 
        crypt_inc_pending(io);
 
-       crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
-                          io->sector);
+       if (io->ctx.aead_recheck) {
+               io->ctx.cc_sector = io->sector + cc->iv_offset;
+               r = crypt_convert(cc, &io->ctx,
+                                 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+       } else {
+               crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+                                  io->sector);
 
-       r = crypt_convert(cc, &io->ctx,
-                         test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+               r = crypt_convert(cc, &io->ctx,
+                                 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+       }
        /*
         * Crypto API backlogged the request, because its queue was full
         * and we're in softirq context, so continue from a workqueue
@@ -2182,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
        if (error == -EBADMSG) {
                sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
 
-               DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
-                           ctx->bio_in->bi_bdev, s);
-               dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
-                                ctx->bio_in, s, 0);
+               ctx->aead_failed = true;
+               if (ctx->aead_recheck) {
+                       DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+                                   ctx->bio_in->bi_bdev, s);
+                       dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+                                        ctx->bio_in, s, 0);
+               }
                io->error = BLK_STS_PROTECTION;
        } else if (error < 0)
                io->error = BLK_STS_IOERR;
@@ -3110,7 +3173,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
                        sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
                        if (!strcasecmp(sval, "aead")) {
                                set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
-                       } else  if (strcasecmp(sval, "none")) {
+                       } else if (strcasecmp(sval, "none")) {
                                ti->error = "Unknown integrity profile";
                                return -EINVAL;
                        }
@@ -3639,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 24, 0},
+       .version = {1, 25, 0},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
index c5f03aab455256ff1b0abc606b7728438be347f0..1fc901df84eb163c833e364d21e0a48e65c06239 100644 (file)
@@ -278,6 +278,8 @@ struct dm_integrity_c {
 
        atomic64_t number_of_mismatches;
 
+       mempool_t recheck_pool;
+
        struct notifier_block reboot_notifier;
 };
 
@@ -1689,6 +1691,77 @@ failed:
        get_random_bytes(result, ic->tag_size);
 }
 
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
+{
+       struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+       struct dm_integrity_c *ic = dio->ic;
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       sector_t sector, logical_sector, area, offset;
+       struct page *page;
+       void *buffer;
+
+       get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+       dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+                                                            &dio->metadata_offset);
+       sector = get_data_sector(ic, area, offset);
+       logical_sector = dio->range.logical_sector;
+
+       page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+       buffer = page_to_virt(page);
+
+       __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+               unsigned pos = 0;
+
+               do {
+                       char *mem;
+                       int r;
+                       struct dm_io_request io_req;
+                       struct dm_io_region io_loc;
+                       io_req.bi_opf = REQ_OP_READ;
+                       io_req.mem.type = DM_IO_KMEM;
+                       io_req.mem.ptr.addr = buffer;
+                       io_req.notify.fn = NULL;
+                       io_req.client = ic->io;
+                       io_loc.bdev = ic->dev->bdev;
+                       io_loc.sector = sector;
+                       io_loc.count = ic->sectors_per_block;
+
+                       r = dm_io(&io_req, 1, &io_loc, NULL);
+                       if (unlikely(r)) {
+                               dio->bi_status = errno_to_blk_status(r);
+                               goto free_ret;
+                       }
+
+                       integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+                       r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
+                                               &dio->metadata_offset, ic->tag_size, TAG_CMP);
+                       if (r) {
+                               if (r > 0) {
+                                       DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+                                                   bio->bi_bdev, logical_sector);
+                                       atomic64_inc(&ic->number_of_mismatches);
+                                       dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+                                                        bio, logical_sector, 0);
+                                       r = -EILSEQ;
+                               }
+                               dio->bi_status = errno_to_blk_status(r);
+                               goto free_ret;
+                       }
+
+                       mem = bvec_kmap_local(&bv);
+                       memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+                       kunmap_local(mem);
+
+                       pos += ic->sectors_per_block << SECTOR_SHIFT;
+                       sector += ic->sectors_per_block;
+                       logical_sector += ic->sectors_per_block;
+               } while (pos < bv.bv_len);
+       }
+free_ret:
+       mempool_free(page, &ic->recheck_pool);
+}
+
 static void integrity_metadata(struct work_struct *w)
 {
        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
@@ -1776,15 +1849,8 @@ again:
                                                checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
                        if (unlikely(r)) {
                                if (r > 0) {
-                                       sector_t s;
-
-                                       s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
-                                       DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
-                                                   bio->bi_bdev, s);
-                                       r = -EILSEQ;
-                                       atomic64_inc(&ic->number_of_mismatches);
-                                       dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
-                                                        bio, s, 0);
+                                       integrity_recheck(dio, checksums);
+                                       goto skip_io;
                                }
                                if (likely(checksums != checksums_onstack))
                                        kfree(checksums);
@@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
                goto bad;
        }
 
+       r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+       if (r) {
+               ti->error = "Cannot allocate mempool";
+               goto bad;
+       }
+
        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
        if (!ic->metadata_wq) {
@@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
        kvfree(ic->bbs);
        if (ic->bufio)
                dm_bufio_client_destroy(ic->bufio);
+       mempool_exit(&ic->recheck_pool);
        mempool_exit(&ic->journal_io_mempool);
        if (ic->io)
                dm_io_client_destroy(ic->io);
@@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
 
 static struct target_type integrity_target = {
        .name                   = "integrity",
-       .version                = {1, 10, 0},
+       .version                = {1, 11, 0},
        .module                 = THIS_MODULE,
        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
        .ctr                    = dm_integrity_ctr,
index 82662f5769c4af7f5456fc97e044c63574162060..1b591bfa90d5d6463016e22183dbb5f071e94a75 100644 (file)
@@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
        return 0;
 }
 
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
+                              u8 *data, size_t len)
+{
+       memcpy(data, io->recheck_buffer, len);
+       io->recheck_buffer += len;
+
+       return 0;
+}
+
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+                                  struct bvec_iter start, sector_t cur_block)
+{
+       struct page *page;
+       void *buffer;
+       int r;
+       struct dm_io_request io_req;
+       struct dm_io_region io_loc;
+
+       page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
+       buffer = page_to_virt(page);
+
+       io_req.bi_opf = REQ_OP_READ;
+       io_req.mem.type = DM_IO_KMEM;
+       io_req.mem.ptr.addr = buffer;
+       io_req.notify.fn = NULL;
+       io_req.client = v->io;
+       io_loc.bdev = v->data_dev->bdev;
+       io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+       io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+       r = dm_io(&io_req, 1, &io_loc, NULL);
+       if (unlikely(r))
+               goto free_ret;
+
+       r = verity_hash(v, verity_io_hash_req(v, io), buffer,
+                       1 << v->data_dev_block_bits,
+                       verity_io_real_digest(v, io), true);
+       if (unlikely(r))
+               goto free_ret;
+
+       if (memcmp(verity_io_real_digest(v, io),
+                  verity_io_want_digest(v, io), v->digest_size)) {
+               r = -EIO;
+               goto free_ret;
+       }
+
+       io->recheck_buffer = buffer;
+       r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
+       if (unlikely(r))
+               goto free_ret;
+
+       r = 0;
+free_ret:
+       mempool_free(page, &v->recheck_pool);
+
+       return r;
+}
+
 static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
                          u8 *data, size_t len)
 {
@@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
 {
        bool is_zero;
        struct dm_verity *v = io->v;
-#if defined(CONFIG_DM_VERITY_FEC)
        struct bvec_iter start;
-#endif
        struct bvec_iter iter_copy;
        struct bvec_iter *iter;
        struct crypto_wait wait;
@@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io)
                if (unlikely(r < 0))
                        return r;
 
-#if defined(CONFIG_DM_VERITY_FEC)
-               if (verity_fec_is_enabled(v))
-                       start = *iter;
-#endif
+               start = *iter;
                r = verity_for_io_block(v, io, iter, &wait);
                if (unlikely(r < 0))
                        return r;
@@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
                         * tasklet since it may sleep, so fallback to work-queue.
                         */
                        return -EAGAIN;
+               } else if (verity_recheck(v, io, start, cur_block) == 0) {
+                       if (v->validated_blocks)
+                               set_bit(cur_block, v->validated_blocks);
+                       continue;
 #if defined(CONFIG_DM_VERITY_FEC)
                } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
                                             cur_block, NULL, &start) == 0) {
@@ -941,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
        if (v->verify_wq)
                destroy_workqueue(v->verify_wq);
 
+       mempool_exit(&v->recheck_pool);
+       if (v->io)
+               dm_io_client_destroy(v->io);
+
        if (v->bufio)
                dm_bufio_client_destroy(v->bufio);
 
@@ -1379,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
        v->hash_blocks = hash_position;
 
+       r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
+       if (unlikely(r)) {
+               ti->error = "Cannot allocate mempool";
+               goto bad;
+       }
+
+       v->io = dm_io_client_create();
+       if (IS_ERR(v->io)) {
+               r = PTR_ERR(v->io);
+               v->io = NULL;
+               ti->error = "Cannot allocate dm io";
+               goto bad;
+       }
+
        v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
                1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
                dm_bufio_alloc_callback, NULL,
@@ -1486,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
 static struct target_type verity_target = {
        .name           = "verity",
        .features       = DM_TARGET_IMMUTABLE,
-       .version        = {1, 9, 0},
+       .version        = {1, 10, 0},
        .module         = THIS_MODULE,
        .ctr            = verity_ctr,
        .dtr            = verity_dtr,
index f3f6070084196825f21dcc17947f67974a90cbde..db93a91169d5e6de31d344a6f37589bbc0bdb654 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef DM_VERITY_H
 #define DM_VERITY_H
 
+#include <linux/dm-io.h>
 #include <linux/dm-bufio.h>
 #include <linux/device-mapper.h>
 #include <linux/interrupt.h>
@@ -68,6 +69,9 @@ struct dm_verity {
        unsigned long *validated_blocks; /* bitset blocks validated */
 
        char *signature_key_desc; /* signature keyring reference */
+
+       struct dm_io_client *io;
+       mempool_t recheck_pool;
 };
 
 struct dm_verity_io {
@@ -76,14 +80,16 @@ struct dm_verity_io {
        /* original value of bio->bi_end_io */
        bio_end_io_t *orig_bi_end_io;
 
+       struct bvec_iter iter;
+
        sector_t block;
        unsigned int n_blocks;
        bool in_tasklet;
 
-       struct bvec_iter iter;
-
        struct work_struct work;
 
+       char *recheck_buffer;
+
        /*
         * Three variably-size fields follow this struct:
         *
index 2266358d807466f95d02b431d09ee39805dff5e8..9e41a9aaba8b5cab9d513ef047718071551b35f6 100644 (file)
@@ -579,8 +579,12 @@ static void submit_flushes(struct work_struct *ws)
                        rcu_read_lock();
                }
        rcu_read_unlock();
-       if (atomic_dec_and_test(&mddev->flush_pending))
+       if (atomic_dec_and_test(&mddev->flush_pending)) {
+               /* The pair is percpu_ref_get() from md_flush_request() */
+               percpu_ref_put(&mddev->active_io);
+
                queue_work(md_wq, &mddev->flush_work);
+       }
 }
 
 static void md_submit_flush_data(struct work_struct *ws)
@@ -8788,12 +8792,16 @@ void md_do_sync(struct md_thread *thread)
        int ret;
 
        /* just incase thread restarts... */
-       if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
-           test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+       if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
                return;
-       if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+
+       if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+               goto skip;
+
+       if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+           !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               return;
+               goto skip;
        }
 
        if (mddev_is_clustered(mddev)) {
@@ -9368,13 +9376,19 @@ static void md_start_sync(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, sync_work);
        int spares = 0;
        bool suspend = false;
+       char *name;
 
-       if (md_spares_need_change(mddev))
+       /*
+        * If reshape is still in progress, spares won't be added or removed
+        * from conf until reshape is done.
+        */
+       if (mddev->reshape_position == MaxSector &&
+           md_spares_need_change(mddev)) {
                suspend = true;
+               mddev_suspend(mddev, false);
+       }
 
-       suspend ? mddev_suspend_and_lock_nointr(mddev) :
-                 mddev_lock_nointr(mddev);
-
+       mddev_lock_nointr(mddev);
        if (!md_is_rdwr(mddev)) {
                /*
                 * On a read-only array we can:
@@ -9400,8 +9414,10 @@ static void md_start_sync(struct work_struct *ws)
        if (spares)
                md_bitmap_write_all(mddev->bitmap);
 
+       name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+                       "reshape" : "resync";
        rcu_assign_pointer(mddev->sync_thread,
-                          md_register_thread(md_do_sync, mddev, "resync"));
+                          md_register_thread(md_do_sync, mddev, name));
        if (!mddev->sync_thread) {
                pr_warn("%s: could not start resync thread...\n",
                        mdname(mddev));
@@ -9445,6 +9461,20 @@ not_running:
                sysfs_notify_dirent_safe(mddev->sysfs_action);
 }
 
+static void unregister_sync_thread(struct mddev *mddev)
+{
+       if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
+               /* resync/recovery still happening */
+               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               return;
+       }
+
+       if (WARN_ON_ONCE(!mddev->sync_thread))
+               return;
+
+       md_reap_sync_thread(mddev);
+}
+
 /*
  * This routine is regularly called by all per-raid-array threads to
  * deal with generic issues like resync and super-block update.
@@ -9469,9 +9499,6 @@ not_running:
  */
 void md_check_recovery(struct mddev *mddev)
 {
-       if (READ_ONCE(mddev->suspended))
-               return;
-
        if (mddev->bitmap)
                md_bitmap_daemon_work(mddev);
 
@@ -9485,7 +9512,8 @@ void md_check_recovery(struct mddev *mddev)
        }
 
        if (!md_is_rdwr(mddev) &&
-           !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+           !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
+           !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
                return;
        if ( ! (
                (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9507,8 +9535,7 @@ void md_check_recovery(struct mddev *mddev)
                        struct md_rdev *rdev;
 
                        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
-                               /* sync_work already queued. */
-                               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                               unregister_sync_thread(mddev);
                                goto unlock;
                        }
 
@@ -9571,16 +9598,7 @@ void md_check_recovery(struct mddev *mddev)
                 * still set.
                 */
                if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
-                       if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
-                               /* resync/recovery still happening */
-                               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                               goto unlock;
-                       }
-
-                       if (WARN_ON_ONCE(!mddev->sync_thread))
-                               goto unlock;
-
-                       md_reap_sync_thread(mddev);
+                       unregister_sync_thread(mddev);
                        goto unlock;
                }
 
index 7412066ea22c7a525ed3e9ff1cfc1b5db2b2b527..a5f8419e2df1d5624f587a3615c3e46348532701 100644 (file)
@@ -4175,11 +4175,7 @@ static int raid10_run(struct mddev *mddev)
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
-               set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-               rcu_assign_pointer(mddev->sync_thread,
-                       md_register_thread(md_do_sync, mddev, "reshape"));
-               if (!mddev->sync_thread)
-                       goto out_free_conf;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        }
 
        return 0;
@@ -4573,16 +4569,8 @@ out:
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
        clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
-       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-
-       rcu_assign_pointer(mddev->sync_thread,
-                          md_register_thread(md_do_sync, mddev, "reshape"));
-       if (!mddev->sync_thread) {
-               ret = -EAGAIN;
-               goto abort;
-       }
+       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        conf->reshape_checkpoint = jiffies;
-       md_wakeup_thread(mddev->sync_thread);
        md_new_event();
        return 0;
 
index 8497880135ee4269ef329e58a10757870ae2df18..6a7a32f7fb912019754f75338104373009604051 100644 (file)
@@ -7936,11 +7936,7 @@ static int raid5_run(struct mddev *mddev)
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
-               set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-               rcu_assign_pointer(mddev->sync_thread,
-                       md_register_thread(md_do_sync, mddev, "reshape"));
-               if (!mddev->sync_thread)
-                       goto abort;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        }
 
        /* Ok, everything is just fine now */
@@ -8506,29 +8502,8 @@ static int raid5_start_reshape(struct mddev *mddev)
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
        clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
-       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-       rcu_assign_pointer(mddev->sync_thread,
-                          md_register_thread(md_do_sync, mddev, "reshape"));
-       if (!mddev->sync_thread) {
-               mddev->recovery = 0;
-               spin_lock_irq(&conf->device_lock);
-               write_seqcount_begin(&conf->gen_lock);
-               mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
-               mddev->new_chunk_sectors =
-                       conf->chunk_sectors = conf->prev_chunk_sectors;
-               mddev->new_layout = conf->algorithm = conf->prev_algo;
-               rdev_for_each(rdev, mddev)
-                       rdev->new_data_offset = rdev->data_offset;
-               smp_wmb();
-               conf->generation --;
-               conf->reshape_progress = MaxSector;
-               mddev->reshape_position = MaxSector;
-               write_seqcount_end(&conf->gen_lock);
-               spin_unlock_irq(&conf->device_lock);
-               return -EAGAIN;
-       }
+       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        conf->reshape_checkpoint = jiffies;
-       md_wakeup_thread(mddev->sync_thread);
        md_new_event();
        return 0;
 }
index 03319a1fa97fda2bf967dd425af9aef83fc1602d..dbd26c3b245bca56adffc8288d5657dd3c61d3b8 100644 (file)
@@ -263,7 +263,6 @@ struct fastrpc_channel_ctx {
        int domain_id;
        int sesscount;
        int vmcount;
-       u64 perms;
        struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
        struct rpmsg_device *rpdev;
        struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
@@ -1279,9 +1278,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
 
                /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
                if (fl->cctx->vmcount) {
+                       u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
                        err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
                                                        (u64)fl->cctx->remote_heap->size,
-                                                       &fl->cctx->perms,
+                                                       &src_perms,
                                                        fl->cctx->vmperms, fl->cctx->vmcount);
                        if (err) {
                                dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1915,8 +1916,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
 
        /* Add memory to static PD pool, protection thru hypervisor */
        if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+               u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
                err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
-                       &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
+                       &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
                if (err) {
                        dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                        buf->phys, buf->size, err);
@@ -2290,7 +2293,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
 
        if (vmcount) {
                data->vmcount = vmcount;
-               data->perms = BIT(QCOM_SCM_VMID_HLOS);
                for (i = 0; i < data->vmcount; i++) {
                        data->vmperms[i].vmid = vmids[i];
                        data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
index c6eb27d46cb06de4ade9c0cdbbdd270ffe8ab474..15119584473cafbaaa96ce18f059571cfc2196bc 100644 (file)
@@ -198,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+       /* Turn on for wakeup if turned off by runtime suspend */
+       if (lis3->pdata && lis3->pdata->wakeup_flags) {
+               if (pm_runtime_suspended(dev))
+                       lis3lv02d_poweron(lis3);
+       /* For non wakeup turn off if not already turned off by runtime suspend */
+       } else if (!pm_runtime_suspended(dev))
                lis3lv02d_poweroff(lis3);
+
        return 0;
 }
 
@@ -208,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       /*
-        * pm_runtime documentation says that devices should always
-        * be powered on at resume. Pm_runtime turns them off after system
-        * wide resume is complete.
-        */
-       if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
-               pm_runtime_suspended(dev))
+       /* Turn back off if turned on for wakeup and runtime suspended*/
+       if (lis3->pdata && lis3->pdata->wakeup_flags) {
+               if (pm_runtime_suspended(dev))
+                       lis3lv02d_poweroff(lis3);
+       /* For non wakeup turn back on if not runtime suspended */
+       } else if (!pm_runtime_suspended(dev))
                lis3lv02d_poweron(lis3);
 
        return 0;
index be52b113aea937c7c658e06c012815cec8552f28..89364bdbb1290f5726a34945679e341b17289493 100644 (file)
@@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = {
  *
  *    The function checks if the device is pci device and
  *    Intel VGA adapter, the subcomponent is SW Proxy
- *    and the parent of MEI PCI and the parent of VGA are the same PCH device.
+ *    and the VGA is on the bus 0 reserved for built-in devices
+ *    to reject discrete GFX.
  *
  * @dev: master device
  * @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
@@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
        if (subcomponent != I915_COMPONENT_GSC_PROXY)
                return 0;
 
-       return component_compare_dev(dev->parent, ((struct device *)data)->parent);
+       /* Only built-in GFX */
+       return (pdev->bus->number == 0);
 }
 
 static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
@@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
        }
 
        component_match_add_typed(&cldev->dev, &master_match,
-                                 mei_gsc_proxy_component_match, cldev->dev.parent);
+                                 mei_gsc_proxy_component_match, NULL);
        if (IS_ERR_OR_NULL(master_match)) {
                ret = -ENOMEM;
                goto err_exit;
index 961e5d53a27a8c4221b4b33c9d4e70f0f0155ee7..aac36750d2c54a658debcca55063d2e2a02bf1ce 100644 (file)
 #define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
 
 #define MEI_DEV_ID_MTL_M      0x7E70  /* Meteor Lake Point M */
+#define MEI_DEV_ID_ARL_S      0x7F68  /* Arrow Lake Point S */
+#define MEI_DEV_ID_ARL_H      0x7770  /* Arrow Lake Point H */
 
 /*
  * MEI HW Section
index 676d566f38ddfd2cbb5c167f5691f737b4fcf01c..8cf636c5403225f7588a2428318cec1ff7fd2700 100644 (file)
@@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
 
        /* required last entry */
        {0, }
index 6f4a4be6ccb5508dbc8857ce21b7643e93c1ea63..55f7db490d3bbbabd08b7bd2ba6ff078abb93ad6 100644 (file)
@@ -535,6 +535,7 @@ static const struct acpi_device_id vsc_tp_acpi_ids[] = {
        { "INTC1009" }, /* Raptor Lake */
        { "INTC1058" }, /* Tiger Lake */
        { "INTC1094" }, /* Alder Lake */
+       { "INTC10D0" }, /* Meteor Lake */
        {}
 };
 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
index f410bee501328f6af96b4f0029d4856e45e06766..58ed7193a3ca460fe58a46427306b385a40a2d3e 100644 (file)
@@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
        static unsigned ext_csd_bits[] = {
                EXT_CSD_BUS_WIDTH_8,
                EXT_CSD_BUS_WIDTH_4,
+               EXT_CSD_BUS_WIDTH_1,
        };
        static unsigned bus_widths[] = {
                MMC_BUS_WIDTH_8,
                MMC_BUS_WIDTH_4,
+               MMC_BUS_WIDTH_1,
        };
        struct mmc_host *host = card->host;
        unsigned idx, bus_width = 0;
index 35067e1e6cd8017b1bb37683f9dda6169af5cbf1..f5da7f9baa52d4b29cd396f0aa88e1ff7891666c 100644 (file)
@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
        struct scatterlist *sg;
        int i;
 
+       host->dma_in_progress = true;
+
        if (!host->variant->dma_lli || data->sg_len == 1 ||
            idma->use_bounce_buffer) {
                u32 dma_addr;
@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
        return 0;
 }
 
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+       struct mmc_data *data = host->data;
+       struct sdmmc_idma *idma = host->dma_priv;
+
+       if (!dma_inprogress(host))
+               return;
+
+       writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+       host->dma_in_progress = false;
+       data->host_cookie = 0;
+
+       if (!idma->use_bounce_buffer)
+               dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+                            mmc_get_dma_dir(data));
+}
+
 static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
 {
+       if (!dma_inprogress(host))
+               return;
+
        writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+       host->dma_in_progress = false;
 
        if (!data->host_cookie)
                sdmmc_idma_unprep_data(host, data, 0);
@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
        .dma_setup = sdmmc_idma_setup,
        .dma_start = sdmmc_idma_start,
        .dma_finalize = sdmmc_idma_finalize,
+       .dma_error = sdmmc_idma_error,
        .set_clkreg = mmci_sdmmc_set_clkreg,
        .set_pwrreg = mmci_sdmmc_set_pwrreg,
        .busy_complete = sdmmc_busy_complete,
index 8cf3a375de659a6d98b7dcfc2f4e2be09f7c4a5d..cc9d28b75eb911733d847a1d0c19cf24d9a3f755 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/ktime.h>
+#include <linux/iopoll.h>
 #include <linux/of_address.h>
 
 #include "sdhci-pltfm.h"
 #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST     (XENON_EMMC_PHY_REG_BASE + 0x18)
 #define XENON_LOGIC_TIMING_VALUE               0x00AA8977
 
+#define XENON_MAX_PHY_TIMEOUT_LOOPS            100
+
 /*
  * List offset of PHY registers and some special register values
  * in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
        return 0;
 }
 
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+       u32 reg;
+       int err;
+
+       err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+                               1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+       if (err)
+               dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+       return err;
+}
+
 /*
  * eMMC 5.0/5.1 PHY init/re-init.
  * eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
        struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
        struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
 
+       int ret = xenon_check_stability_internal_clk(host);
+
+       if (ret)
+               return ret;
+
        reg = sdhci_readl(host, phy_regs->timing_adj);
        reg |= XENON_PHY_INITIALIZAION;
        sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
        /* get the wait time */
        wait /= clock;
        wait++;
-       /* wait for host eMMC PHY init completes */
-       udelay(wait);
 
-       reg = sdhci_readl(host, phy_regs->timing_adj);
-       reg &= XENON_PHY_INITIALIZAION;
-       if (reg) {
+       /*
+        * AC5X spec says bit must be polled until zero.
+        * We see cases in which timeout can take longer
+        * than the standard calculation on AC5X, which is
+        * expected following the spec comment above.
+        * According to the spec, we must wait as long as
+        * it takes for that bit to toggle on AC5X.
+        * Cap that with 100 delay loops so we won't get
+        * stuck here forever:
+        */
+
+       ret = read_poll_timeout(sdhci_readl, reg,
+                               !(reg & XENON_PHY_INITIALIZAION),
+                               wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+                               false, host, phy_regs->timing_adj);
+       if (ret)
                dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
-                       wait);
-               return -ETIMEDOUT;
-       }
+                       wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
 
-       return 0;
+       return ret;
 }
 
 #define ARMADA_3700_SOC_PAD_1_8V       0x1
index e451b28840d58b2b0e6b5fdd4d50fe809dd29de4..5887feb347a4e42aa1dcc779bc7f5b252402b16e 100644 (file)
@@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
                if (plen == mtd_name_len &&
                    !strncmp(mtd->name, pname + offset, plen)) {
                        mtd_set_of_node(mtd, mtd_dn);
+                       of_node_put(mtd_dn);
                        break;
                }
        }
index a466987448502e0b576b612f42139f878d4014ff..5b0f5a9cef81b5fbc1494cb9f00b123ea06f0d35 100644 (file)
@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
        MARVELL_LAYOUT( 2048,   512,  4,  1,  1, 2048, 32, 30,  0,  0,  0),
        MARVELL_LAYOUT( 2048,   512,  8,  2,  1, 1024,  0, 30,1024,32, 30),
        MARVELL_LAYOUT( 2048,   512,  8,  2,  1, 1024,  0, 30,1024,64, 30),
-       MARVELL_LAYOUT( 2048,   512,  12, 3,  2, 704,   0, 30,640,  0, 30),
-       MARVELL_LAYOUT( 2048,   512,  16, 5,  4, 512,   0, 30,  0, 32, 30),
+       MARVELL_LAYOUT( 2048,   512,  16, 4,  4, 512,   0, 30,  0, 32, 30),
        MARVELL_LAYOUT( 4096,   512,  4,  2,  2, 2048, 32, 30,  0,  0,  0),
-       MARVELL_LAYOUT( 4096,   512,  8,  5,  4, 1024,  0, 30,  0, 64, 30),
-       MARVELL_LAYOUT( 4096,   512,  12, 6,  5, 704,   0, 30,576, 32, 30),
-       MARVELL_LAYOUT( 4096,   512,  16, 9,  8, 512,   0, 30,  0, 32, 30),
+       MARVELL_LAYOUT( 4096,   512,  8,  4,  4, 1024,  0, 30,  0, 64, 30),
+       MARVELL_LAYOUT( 4096,   512,  16, 8,  8, 512,   0, 30,  0, 32, 30),
        MARVELL_LAYOUT( 8192,   512,  4,  4,  4, 2048,  0, 30,  0,  0,  0),
-       MARVELL_LAYOUT( 8192,   512,  8,  9,  8, 1024,  0, 30,  0, 160, 30),
-       MARVELL_LAYOUT( 8192,   512,  12, 12, 11, 704,  0, 30,448,  64, 30),
-       MARVELL_LAYOUT( 8192,   512,  16, 17, 16, 512,  0, 30,  0,  32, 30),
+       MARVELL_LAYOUT( 8192,   512,  8,  8,  8, 1024,  0, 30,  0, 160, 30),
+       MARVELL_LAYOUT( 8192,   512,  16, 16, 16, 512,  0, 30,  0,  32, 30),
 };
 
 /**
index 987710e09441adefbf238948e759fec4d049b126..6023cba748bb858373a54dd58c5808057d9841fc 100644 (file)
@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
 {
        u8 status2;
        struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
-                                                     &status2);
+                                                     spinand->scratchbuf);
        int ret;
 
        switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
                 * report the maximum of 4 in this case
                 */
                /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+               status2 = *(spinand->scratchbuf);
                return ((status & STATUS_ECC_MASK) >> 2) |
                        ((status2 & STATUS_ECC_MASK) >> 4);
 
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
 {
        u8 status2;
        struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
-                                                     &status2);
+                                                     spinand->scratchbuf);
        int ret;
 
        switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
                 * 1 ... 4 bits are flipped (and corrected)
                 */
                /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+               status2 = *(spinand->scratchbuf);
                return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
 
        case STATUS_ECC_UNCOR_ERROR:
index a11748b8d69b435cf97971cec21c0340365ed6d1..cd0683bcca038d59336ee548b4aaa6acdb2c21a5 100644 (file)
@@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
 
        ASSERT_RTNL();
 
-       if (!bond_xdp_check(bond)) {
+       if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
                xdp_clear_features_flag(bond_dev);
                return;
        }
index 61b71bcfe39625b271e0d932f92ee4564d9d87e2..c3da97abce2027a1ed9173a98b4d08fc755dc1d0 100644 (file)
@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
        mutex_lock(&dev->alu_mutex);
 
        ctrl_addr = IND_ACC_TABLE(table) | addr;
-       ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+       ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
        if (!ret)
-               ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+               ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
 
        mutex_unlock(&dev->alu_mutex);
 
index da3bdd3025022c3dd7286c3b7873e1a108767025..760a9a60bc15c1849f6b70e7d3f5b99c58667523 100644 (file)
@@ -21,6 +21,7 @@ config ADIN1110
        tristate "Analog Devices ADIN1110 MAC-PHY"
        depends on SPI && NET_SWITCHDEV
        select CRC8
+       select PHYLIB
        help
          Say yes here to build support for Analog Devices ADIN1110
          Low Power 10BASE-T1L Ethernet MAC-PHY.
index 11c23a7f3172d39a200b85baad84c3cb3cc0db07..fd1a5149c00319d3c0c32a784f1d819869c85b44 100644 (file)
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
        if (err < 0) {
                dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
                         name, ERR_PTR(err));
-               goto err_out;
+               kfree(padev);
+               return ERR_PTR(err);
        }
 
        err = auxiliary_device_add(aux_dev);
        if (err) {
                dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
                         name, ERR_PTR(err));
-               goto err_out_uninit;
+               auxiliary_device_uninit(aux_dev);
+               return ERR_PTR(err);
        }
 
        return padev;
-
-err_out_uninit:
-       auxiliary_device_uninit(aux_dev);
-err_out:
-       kfree(padev);
-       return ERR_PTR(err);
 }
 
 int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
index 29b04a274d077375d9658ea91c16df1ccd963969..80245c65cc904defdec4637eb66a9c1edd6eb03f 100644 (file)
@@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
        int j = 0, i;
 
        for (i = 0; i < NUM_NET_FILTERS; i++) {
-               if (j == *rule_cnt)
-                       return -EMSGSIZE;
-
                if (!priv->net_filters[i].claimed ||
                    priv->net_filters[i].port != intf->port)
                        continue;
@@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
                    priv->net_filters[i - 1].wake_filter)
                        continue;
 
+               if (j == *rule_cnt)
+                       return -EMSGSIZE;
+
                rule_locs[j++] = priv->net_filters[i].fs.location;
        }
 
index f59557b0cd51523896890ffe6121ffbac54f5f70..6ad1366270f79cba0579bac6088743b1645203ef 100644 (file)
@@ -1050,6 +1050,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
                        netdev_err(dev, "could not attach to PHY\n");
                        goto err_phy_disable;
                }
+
+               /* Indicate that the MAC is responsible for PHY PM */
+               phydev->mac_managed_pm = true;
        } else if (!intf->wolopts) {
                ret = phy_resume(dev->phydev);
                if (ret)
index 20fcb20b42edee5129fcf6e5edde9f3a0ecd2764..66b57783533897e6399ec12505441004646b8ebc 100644 (file)
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
 
        tlv->type = htons(type);
        tlv->length = htons(length);
-       memcpy(tlv->value, value, length);
+       unsafe_memcpy(tlv->value, value, length,
+                     /* Flexible array of flexible arrays */);
 
        vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
        vp->length = htonl(ntohl(vp->length) +
index 9ba15d3183d75726fd88fa6b27a6efaf1fc30790..758535adc9ff5bb0a043683e1875ff1f3c9c2005 100644 (file)
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
        unsigned long            capabilities;
        unsigned long           *supported;
 
+       /* The internal connection to the serdes is XGMII, but this isn't
+        * really correct for the phy mode (which is the external connection).
+        * However, this is how all older device trees say that they want
+        * 10GBASE-R (aka XFI), so just convert it for them.
+        */
+       if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+               mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
        mac_dev->phylink_ops            = &memac_mac_ops;
        mac_dev->set_promisc            = memac_set_promiscuous;
        mac_dev->change_addr            = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
         * (and therefore that xfi_pcs cannot be set). If we are defaulting to
         * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
         */
-       if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+       if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
                memac->xfi_pcs = pcs;
        else
                memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
                goto _return_fm_mac_free;
        }
 
-       /* The internal connection to the serdes is XGMII, but this isn't
-        * really correct for the phy mode (which is the external connection).
-        * However, this is how all older device trees say that they want
-        * 10GBASE-R (aka XFI), so just convert it for them.
-        */
-       if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
-               mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
        /* TODO: The following interface modes are supported by (some) hardware
         * but not by this driver:
         * - 1000BASE-KX
index a2788fd5f8bb857510e6c4b4af362188be996e68..19e450a5bd314ff67676843a767ec1c5c2bd84d4 100644 (file)
@@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
                hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
                                           (u16)(mac_reg & 0xFFFF));
                hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
-                                          FIELD_GET(E1000_RAH_AV, mac_reg));
+                                          (u16)((mac_reg & E1000_RAH_AV) >> 16));
        }
 
        e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
index 54eb55464e3151f00665f9b780c65a1861f8ca07..89a3401d20ab4b1b429802930345c74e823f53e1 100644 (file)
@@ -13560,9 +13560,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
                return err;
 
        i40e_queue_pair_disable_irq(vsi, queue_pair);
+       i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
        err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
        i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
-       i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
        i40e_queue_pair_clean_rings(vsi, queue_pair);
        i40e_queue_pair_reset_stats(vsi, queue_pair);
 
index af42693305815ca8711bb80d25b7221ca8981e16..ce1f11b8ad65c213bc0090e64dabe6e7295d736c 100644 (file)
@@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
  **/
 static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
 {
-       return (hw->aq.fw_maj_ver > maj ||
-               (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min));
+       return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
 }
 
 #endif /* _I40E_PROTOTYPE_H_ */
index 7ac847718882e29b38071ca6b8adb47ca063f1d7..c979192e44d108b370ad132ec900c19d8452db32 100644 (file)
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
        q_vector = vsi->q_vectors[v_idx];
 
        ice_for_each_tx_ring(tx_ring, q_vector->tx) {
-               if (vsi->netdev)
-                       netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
-                                            NETDEV_QUEUE_TYPE_TX, NULL);
+               ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+                                  NULL);
                tx_ring->q_vector = NULL;
        }
        ice_for_each_rx_ring(rx_ring, q_vector->rx) {
-               if (vsi->netdev)
-                       netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
-                                            NETDEV_QUEUE_TYPE_RX, NULL);
+               ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+                                  NULL);
                rx_ring->q_vector = NULL;
        }
 
index b9c5eced6326f8fe3958c446f3e8b8bb0c517f90..bd9b1fed74ab86d3da3d9941f4139bbd46f3115e 100644 (file)
@@ -30,6 +30,26 @@ static const char * const pin_type_name[] = {
        [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
 };
 
+/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+       if (ice_is_reset_in_progress(pf->state)) {
+               NL_SET_ERR_MSG(extack, "PF reset in progress");
+               return true;
+       }
+       return false;
+}
+
 /**
  * ice_dpll_pin_freq_set - set pin's frequency
  * @pf: private board structure
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
        struct ice_pf *pf = d->pf;
        int ret;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
        mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
  * ice_dpll_pin_enable - enable a pin on dplls
  * @hw: board private hw structure
  * @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
  * @pin_type: type of pin being enabled
  * @extack: error reporting
  *
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
  */
 static int
 ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
-                   enum ice_dpll_pin_type pin_type,
+                   u8 dpll_idx, enum ice_dpll_pin_type pin_type,
                    struct netlink_ext_ack *extack)
 {
        u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
                ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
                break;
        case ICE_DPLL_PIN_TYPE_OUTPUT:
+               flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
                if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
                        flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
                flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
-               ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+               ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+                                               0, 0);
                break;
        default:
                return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
        case ICE_DPLL_PIN_TYPE_INPUT:
                ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
                                               NULL, &pin->flags[0],
-                                              &pin->freq, NULL);
+                                              &pin->freq, &pin->phase_adjust);
                if (ret)
                        goto err;
                if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
                break;
        case ICE_DPLL_PIN_TYPE_OUTPUT:
                ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
-                                               &pin->flags[0], NULL,
+                                               &pin->flags[0], &parent,
                                                &pin->freq, NULL);
                if (ret)
                        goto err;
-               if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
-                       pin->state[0] = DPLL_PIN_STATE_CONNECTED;
-               else
-                       pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+               parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+               if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+                       pin->state[pf->dplls.eec.dpll_idx] =
+                               parent == pf->dplls.eec.dpll_idx ?
+                               DPLL_PIN_STATE_CONNECTED :
+                               DPLL_PIN_STATE_DISCONNECTED;
+                       pin->state[pf->dplls.pps.dpll_idx] =
+                               parent == pf->dplls.pps.dpll_idx ?
+                               DPLL_PIN_STATE_CONNECTED :
+                               DPLL_PIN_STATE_DISCONNECTED;
+               } else {
+                       pin->state[pf->dplls.eec.dpll_idx] =
+                               DPLL_PIN_STATE_DISCONNECTED;
+                       pin->state[pf->dplls.pps.dpll_idx] =
+                               DPLL_PIN_STATE_DISCONNECTED;
+               }
                break;
        case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
                for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -568,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
        struct ice_pf *pf = d->pf;
        int ret;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        if (enable)
-               ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+               ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+                                         extack);
        else
                ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
        if (!ret)
@@ -603,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
                          struct netlink_ext_ack *extack)
 {
        bool enable = state == DPLL_PIN_STATE_CONNECTED;
+       struct ice_dpll_pin *p = pin_priv;
+       struct ice_dpll *d = dpll_priv;
+
+       if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+               return 0;
 
        return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
                                      extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -665,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
        struct ice_pf *pf = d->pf;
        int ret;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
        if (ret)
                goto unlock;
-       if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+       if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+           pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
                *state = p->state[d->dpll_idx];
-       else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
-               *state = p->state[0];
        ret = 0;
 unlock:
        mutex_unlock(&pf->dplls.lock);
@@ -790,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
        struct ice_pf *pf = d->pf;
        int ret;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
        mutex_unlock(&pf->dplls.lock);
@@ -910,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
        u8 flag, flags_en = 0;
        int ret;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        switch (type) {
        case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1069,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
        int ret = -EINVAL;
        u32 hw_idx;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        hw_idx = parent->idx - pf->dplls.base_rclk_idx;
        if (hw_idx >= pf->dplls.num_inputs)
@@ -1123,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
        int ret = -EINVAL;
        u32 hw_idx;
 
+       if (ice_dpll_is_reset(pf, extack))
+               return -EBUSY;
+
        mutex_lock(&pf->dplls.lock);
        hw_idx = parent->idx - pf->dplls.base_rclk_idx;
        if (hw_idx >= pf->dplls.num_inputs)
@@ -1305,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
        struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
        struct ice_dpll *de = &pf->dplls.eec;
        struct ice_dpll *dp = &pf->dplls.pps;
-       int ret;
+       int ret = 0;
 
+       if (ice_is_reset_in_progress(pf->state))
+               goto resched;
        mutex_lock(&pf->dplls.lock);
        ret = ice_dpll_update_state(pf, de, false);
        if (!ret)
@@ -1326,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
        ice_dpll_notify_changes(de);
        ice_dpll_notify_changes(dp);
 
+resched:
        /* Run twice a second or reschedule if update failed */
        kthread_queue_delayed_work(d->kworker, &d->work,
                                   ret ? msecs_to_jiffies(10) :
@@ -1532,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
        }
        if (WARN_ON_ONCE(!vsi || !vsi->netdev))
                return;
-       netdev_dpll_pin_clear(vsi->netdev);
+       dpll_netdev_pin_clear(vsi->netdev);
        dpll_pin_put(rclk->pin);
 }
 
@@ -1576,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
        }
        if (WARN_ON((!vsi || !vsi->netdev)))
                return -EINVAL;
-       netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+       dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
 
        return 0;
 
@@ -2055,6 +2120,7 @@ void ice_dpll_init(struct ice_pf *pf)
        struct ice_dplls *d = &pf->dplls;
        int err = 0;
 
+       mutex_init(&d->lock);
        err = ice_dpll_init_info(pf, cgu);
        if (err)
                goto err_exit;
@@ -2067,7 +2133,6 @@ void ice_dpll_init(struct ice_pf *pf)
        err = ice_dpll_init_pins(pf, cgu);
        if (err)
                goto deinit_pps;
-       mutex_init(&d->lock);
        if (cgu) {
                err = ice_dpll_init_worker(pf);
                if (err)
index 9be724291ef82ac7e05c198d9febe029b4946a5e..fc23dbe302b46fa35e97ea425add87711abf66ee 100644 (file)
@@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                ice_vsi_map_rings_to_vectors(vsi);
 
                /* Associate q_vector rings to napi */
-               ice_vsi_set_napi_queues(vsi, true);
+               ice_vsi_set_napi_queues(vsi);
 
                vsi->stat_offsets_loaded = false;
 
@@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
 }
 
 /**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
  * @dev: device to which NAPI and queue belong
  * @queue_index: Index of queue
  * @type: queue type as RX or TX
  * @napi: NAPI context
  * @locked: is the rtnl_lock already held
  *
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
  */
 static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
-                  enum netdev_queue_type type, struct napi_struct *napi,
-                  bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+                    enum netdev_queue_type type, struct napi_struct *napi,
+                    bool locked)
 {
        if (!locked)
                rtnl_lock();
@@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
 }
 
 /**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi)
+{
+       struct ice_pf *pf = vsi->back;
+
+       if (!vsi->netdev)
+               return;
+
+       if (current_work() == &pf->serv_task ||
+           test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+           test_bit(ICE_DOWN, pf->state) ||
+           test_bit(ICE_SUSPENDED, pf->state))
+               __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+                                    false);
+       else
+               __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+                                    true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
  * @q_vector: q_vector pointer
  * @locked: is the rtnl_lock already held
  *
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+       struct ice_rx_ring *rx_ring;
+       struct ice_tx_ring *tx_ring;
+
+       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+               __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+                                    NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+                                    locked);
+
+       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+               __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+                                    NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+                                    locked);
+       /* Also set the interrupt number for the NAPI */
+       netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
  * Associate the q_vector napi with all the queue[s] on the vector
  */
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
 {
        struct ice_rx_ring *rx_ring;
        struct ice_tx_ring *tx_ring;
 
        ice_for_each_rx_ring(rx_ring, q_vector->rx)
-               ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
-                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
-                                  locked);
+               ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
 
        ice_for_each_tx_ring(tx_ring, q_vector->tx)
-               ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
-                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
-                                  locked);
+               ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
        /* Also set the interrupt number for the NAPI */
        netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
 }
@@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
 /**
  * ice_vsi_set_napi_queues
  * @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
  *
  * Associate queue[s] with napi for all vectors
  */
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
 {
        int i;
 
@@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
                return;
 
        ice_for_each_q_vector(vsi, i)
-               ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+               ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
 }
 
 /**
@@ -3140,7 +3192,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
                }
        }
 
-       tx_ring_stats = vsi_stat->rx_ring_stats;
+       tx_ring_stats = vsi_stat->tx_ring_stats;
        vsi_stat->tx_ring_stats =
                krealloc_array(vsi_stat->tx_ring_stats, req_txq,
                               sizeof(*vsi_stat->tx_ring_stats),
index 71bd27244941d549d9253af900629ccb36278072..bfcfc582a4c04ff143390e394d0b65a1d0970391 100644 (file)
@@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
 struct ice_vsi *
 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
 
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
 
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
 
 int ice_vsi_release(struct ice_vsi *vsi);
 
index dd4a9bc0dfdc661b2d2f3c48a2df5b773e4f75bb..df6a68ab747eeea289595765bc033473cef37165 100644 (file)
@@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
        ice_for_each_q_vector(vsi, v_idx) {
                netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
                               ice_napi_poll);
-               ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+               __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
        }
 }
 
@@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
                if (ret)
                        goto err_reinit;
                ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+               ice_vsi_set_napi_queues(pf->vsi[v]);
        }
 
        ret = ice_req_irq_msix_misc(pf);
@@ -8012,6 +8013,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
        pf_sw = pf->first_sw;
        /* find the attribute in the netlink message */
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                __u16 mode;
index a94a1c48c3de50db27c4373fef33632c5ae40f70..b0f78c2f2790949c4ed891d2bac789c9d2f2646d 100644 (file)
@@ -1068,6 +1068,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
        struct ice_pf *pf = pci_get_drvdata(pdev);
        u16 prev_msix, prev_queues, queues;
        bool needs_rebuild = false;
+       struct ice_vsi *vsi;
        struct ice_vf *vf;
        int id;
 
@@ -1102,6 +1103,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
        if (!vf)
                return -ENOENT;
 
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi)
+               return -ENOENT;
+
        prev_msix = vf->num_msix;
        prev_queues = vf->num_vf_qs;
 
@@ -1122,7 +1127,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
        if (vf->first_vector_idx < 0)
                goto unroll;
 
-       if (ice_vf_reconfig_vsi(vf)) {
+       if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
                /* Try to rebuild with previous values */
                needs_rebuild = true;
                goto unroll;
@@ -1148,8 +1153,10 @@ unroll:
        if (vf->first_vector_idx < 0)
                return -EINVAL;
 
-       if (needs_rebuild)
+       if (needs_rebuild) {
                ice_vf_reconfig_vsi(vf);
+               ice_vf_init_host_cfg(vf, vsi);
+       }
 
        ice_ena_vf_mappings(vf);
        ice_put_vf(vf);
index c925813ec9caf06d11199ca066a5096e6d034b4e..6f2328a049bf10e7604f3f33a91ce943944f0e37 100644 (file)
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
                vf->driver_caps = *(u32 *)msg;
        else
                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
-                                 VIRTCHNL_VF_OFFLOAD_RSS_REG |
                                  VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
        vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
                                                    vf->driver_caps);
 
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
-       } else {
-               if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
-               else
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
-       }
 
        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
index 5e19d48a05b45939f3bf72882bb7c9234312a230..d796dbd2a440cd550a9147956c6968286ae9cfa2 100644 (file)
@@ -13,8 +13,6 @@
  * - opcodes needed by VF when caps are activated
  *
  * Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
  * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
  * - VIRTCHNL_VF_OFFLOAD_CRC
  * - VIRTCHNL_VF_OFFLOAD_RX_POLLING
index 8b81a16770459373026f2436099c0280e29f9022..2eecd0f39aa696e1c24083f03bf9a1908c121fdd 100644 (file)
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
                        return -EBUSY;
                usleep_range(1000, 2000);
        }
+
+       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+       ice_qvec_toggle_napi(vsi, q_vector, false);
+
        netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
 
        ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
                if (err)
                        return err;
        }
-       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
        err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
        if (err)
                return err;
 
-       ice_qvec_toggle_napi(vsi, q_vector, false);
        ice_qp_clean_rings(vsi, q_idx);
        ice_qp_reset_stats(vsi, q_idx);
 
@@ -259,11 +260,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
        if (err)
                return err;
 
-       clear_bit(ICE_CFG_BUSY, vsi->state);
        ice_qvec_toggle_napi(vsi, q_vector, true);
        ice_qvec_ena_irq(vsi, q_vector);
 
        netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+       clear_bit(ICE_CFG_BUSY, vsi->state);
 
        return 0;
 }
index d0cdd63b3d5b24108ae4832a5e09f4b417e82dfe..390977a76de25a42766c314ab94c42e6528ab4c9 100644 (file)
@@ -2087,8 +2087,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
                set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
 
        /* schedule the napi to receive all the marker packets */
+       local_bh_disable();
        for (i = 0; i < vport->num_q_vectors; i++)
                napi_schedule(&vport->q_vectors[i].napi);
+       local_bh_enable();
 
        return idpf_wait_for_marker_event(vport);
 }
index 319c544b9f04ce5e9ef6f09a9fa3e3f641583f47..f9457055612004c10f74379122063e8136fe7d76 100644 (file)
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
 
        igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
        /* adjust timestamp for the TX latency based on link speed */
-       if (adapter->hw.mac.type == e1000_i210) {
+       if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
                switch (adapter->link_speed) {
                case SPEED_10:
                        adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                        ktime_t *timestamp)
 {
        struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
        struct skb_shared_hwtstamps ts;
        __le64 *regval = (__le64 *)va;
        int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
        igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
 
        /* adjust timestamp for the RX latency based on link speed */
-       if (adapter->hw.mac.type == e1000_i210) {
+       if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
                switch (adapter->link_speed) {
                case SPEED_10:
                        adjust = IGB_I210_RX_LATENCY_10;
index ba8d3fe186aedacd5a7959e6fd9da3408fe71843..81c21a893ede9c0c432d26e03f3baecc3293b27f 100644 (file)
@@ -6487,7 +6487,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
        int cpu = smp_processor_id();
        struct netdev_queue *nq;
        struct igc_ring *ring;
-       int i, drops;
+       int i, nxmit;
 
        if (unlikely(!netif_carrier_ok(dev)))
                return -ENETDOWN;
@@ -6503,16 +6503,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
        /* Avoid transmit queue timeout since we share it with the slow path */
        txq_trans_cond_update(nq);
 
-       drops = 0;
+       nxmit = 0;
        for (i = 0; i < num_frames; i++) {
                int err;
                struct xdp_frame *xdpf = frames[i];
 
                err = igc_xdp_init_tx_descriptor(ring, xdpf);
-               if (err) {
-                       xdp_return_frame_rx_napi(xdpf);
-                       drops++;
-               }
+               if (err)
+                       break;
+               nxmit++;
        }
 
        if (flags & XDP_XMIT_FLUSH)
@@ -6520,7 +6519,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
 
        __netif_tx_unlock(nq);
 
-       return num_frames - drops;
+       return nxmit;
 }
 
 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
index bd541527c8c74d6922e8683e2f4493d9b361f67b..99876b765b08bc94e9ba1673205f56e9d140a49d 100644 (file)
@@ -2939,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
-       u32 mask;
        struct ixgbe_hw *hw = &adapter->hw;
+       u32 mask;
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -10524,6 +10524,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
        memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
 }
 
+/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64 qmask = BIT_ULL(ring);
+       u32 mask;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+               mask = (qmask & 0xFFFFFFFF);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+               mask = (qmask >> 32);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+               break;
+       default:
+               break;
+       }
+       IXGBE_WRITE_FLUSH(&adapter->hw);
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               synchronize_irq(adapter->msix_entries[ring].vector);
+       else
+               synchronize_irq(adapter->pdev->irq);
+}
+
 /**
  * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
  * @adapter: adapter structure
@@ -10540,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
        tx_ring = adapter->tx_ring[ring];
        xdp_ring = adapter->xdp_ring[ring];
 
+       ixgbe_irq_disable_single(adapter, ring);
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_disable(&rx_ring->q_vector->napi);
+
        ixgbe_disable_txr(adapter, tx_ring);
        if (xdp_ring)
                ixgbe_disable_txr(adapter, xdp_ring);
@@ -10548,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
        if (xdp_ring)
                synchronize_rcu();
 
-       /* Rx/Tx/XDP Tx share the same napi context. */
-       napi_disable(&rx_ring->q_vector->napi);
-
        ixgbe_clean_tx_ring(tx_ring);
        if (xdp_ring)
                ixgbe_clean_tx_ring(xdp_ring);
@@ -10578,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
        tx_ring = adapter->tx_ring[ring];
        xdp_ring = adapter->xdp_ring[ring];
 
-       /* Rx/Tx/XDP Tx share the same napi context. */
-       napi_enable(&rx_ring->q_vector->napi);
-
        ixgbe_configure_tx_ring(adapter, tx_ring);
        if (xdp_ring)
                ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10589,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
        clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
        if (xdp_ring)
                clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_enable(&rx_ring->q_vector->napi);
+       ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+       IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
 /**
index e5d6156655ba48ee607c531e7c8951aa32d6e117..516adb50f9f6b2b8d4c43f12b51d83da30aae904 100644 (file)
@@ -415,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
                        return;
        }
 
+       /* AF modifies given action iff PF/VF has requested for it */
+       if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+               return;
+
        /* copy VF default entry action to the VF mcam entry */
        rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
                                                 target_func);
index 3e064234f6fe950273e16a80f31b365e3cad4865..98d4306929f3edf3782d573b5f207a04d64fecdb 100644 (file)
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
                return -EOPNOTSUPP;
        }
 
+       if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+           !dev->priv.fw_reset) {
+               NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+               return -EOPNOTSUPP;
+       }
+
        if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
                NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
 
index 928bf24d4b123945afc9df29ea5d758792d269cb..d74a5aaf426863681eac1be35a8ac64054087828 100644 (file)
@@ -261,7 +261,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
 {
        if (mdpll->tracking_netdev)
                return;
-       netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+       dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
        mdpll->tracking_netdev = netdev;
 }
 
@@ -269,7 +269,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
 {
        if (!mdpll->tracking_netdev)
                return;
-       netdev_dpll_pin_clear(mdpll->tracking_netdev);
+       dpll_netdev_pin_clear(mdpll->tracking_netdev);
        mdpll->tracking_netdev = NULL;
 }
 
index 078f56a3cbb2b389499c0b609908972af691a41c..ca05b3252a1b0bd395d07ec37f02edfaa40f477c 100644 (file)
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
 
        WARN_ON_ONCE(tracker->inuse);
        tracker->inuse = true;
-       spin_lock(&list->tracker_list_lock);
+       spin_lock_bh(&list->tracker_list_lock);
        list_add_tail(&tracker->entry, &list->tracker_list_head);
-       spin_unlock(&list->tracker_list_lock);
+       spin_unlock_bh(&list->tracker_list_lock);
 }
 
 static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
 
        WARN_ON_ONCE(!tracker->inuse);
        tracker->inuse = false;
-       spin_lock(&list->tracker_list_lock);
+       spin_lock_bh(&list->tracker_list_lock);
        list_del(&tracker->entry);
-       spin_unlock(&list->tracker_list_lock);
+       spin_unlock_bh(&list->tracker_list_lock);
 }
 
 void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
        struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
        struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
 
-       spin_lock(&cqe_list->tracker_list_lock);
+       spin_lock_bh(&cqe_list->tracker_list_lock);
        list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
                struct sk_buff *skb =
                        mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
                pos->inuse = false;
                list_del(&pos->entry);
        }
-       spin_unlock(&cqe_list->tracker_list_lock);
+       spin_unlock_bh(&cqe_list->tracker_list_lock);
 }
 
 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
index 86bf007fd05b7327a79918b5de9beea9353b70e1..b500cc2c9689d1973d8736b7fa4b421e92d4c5ea 100644 (file)
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
 
        if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
                if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
-                       mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+                       mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
                err = -EOPNOTSUPP;
                goto err_check;
        }
index d4ebd8743114573e6da44b6eb2418653ab1c4922..b2cabd6ab86cb9044f8d0dc404fa8a052d31938c 100644 (file)
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
        mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
 }
 
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
-                                   struct mlx5e_macsec_sa *sa,
-                                   bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+                                      struct mlx5e_macsec_sa *sa, bool is_tx,
+                                      struct net_device *netdev, u32 fs_id)
 {
        int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
                                 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
 
        mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
                                fs_id);
-       mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
        sa->macsec_rule = NULL;
 }
 
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+                                   struct mlx5e_macsec_sa *sa, bool is_tx,
+                                   struct net_device *netdev, u32 fs_id)
+{
+       mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+       mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+                                  struct mlx5e_macsec_sa *sa, bool encrypt,
+                                  bool is_tx, u32 *fs_id)
+{
+       struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+       struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+       struct mlx5_macsec_rule_attrs rule_attrs;
+       union mlx5_macsec_rule *macsec_rule;
+
+       rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+       rule_attrs.sci = sa->sci;
+       rule_attrs.assoc_num = sa->assoc_num;
+       rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+                                     MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+       macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+       if (!macsec_rule)
+               return -ENOMEM;
+
+       sa->macsec_rule = macsec_rule;
+
+       return 0;
+}
+
 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
                                struct mlx5e_macsec_sa *sa,
                                bool encrypt, bool is_tx, u32 *fs_id)
 {
        struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
        struct mlx5e_macsec *macsec = priv->macsec;
-       struct mlx5_macsec_rule_attrs rule_attrs;
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_macsec_obj_attrs obj_attrs;
-       union mlx5_macsec_rule *macsec_rule;
        int err;
 
        obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        if (err)
                return err;
 
-       rule_attrs.macsec_obj_id = sa->macsec_obj_id;
-       rule_attrs.sci = sa->sci;
-       rule_attrs.assoc_num = sa->assoc_num;
-       rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
-                                     MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
-       macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
-       if (!macsec_rule) {
-               err = -ENOMEM;
-               goto destroy_macsec_object;
+       if (sa->active) {
+               err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+               if (err)
+                       goto destroy_macsec_object;
        }
 
-       sa->macsec_rule = macsec_rule;
-
        return 0;
 
 destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
                goto destroy_sa;
 
        macsec_device->tx_sa[assoc_num] = tx_sa;
-       if (!secy->operational ||
-           assoc_num != tx_sc->encoding_sa ||
-           !tx_sa->active)
+       if (!secy->operational)
                goto out;
 
        err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
                goto out;
 
        if (ctx_tx_sa->active) {
-               err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+               err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
                if (err)
                        goto out;
        } else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
                        goto out;
                }
 
-               mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+               mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
        }
 out:
        mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
                goto out;
        }
 
-       mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
-                               rx_sc->sc_xarray_element->fs_id);
+       if (rx_sa->active)
+               mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+                                       rx_sc->sc_xarray_element->fs_id);
        mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
        kfree(rx_sa);
        rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
                        if (!rx_sa || !rx_sa->macsec_rule)
                                continue;
 
-                       mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
-                                               rx_sc->sc_xarray_element->fs_id);
+                       mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+                                                  rx_sc->sc_xarray_element->fs_id);
                }
        }
 
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
                                continue;
 
                        if (rx_sa->active) {
-                               err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
-                                                          &rx_sc->sc_xarray_element->fs_id);
+                               err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+                                                             &rx_sc->sc_xarray_element->fs_id);
                                if (err)
                                        goto out;
                        }
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
                if (!tx_sa)
                        continue;
 
-               mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+               mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
        }
 
        for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
                        continue;
 
                if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
-                       err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+                       err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
                        if (err)
                                goto out;
                }
index 5c166d9d2dca62a8db671c7cb62476781a8d1b97..2fa076b23fbead06bceb6697e0ebb0238bb5be7e 100644 (file)
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                mlx5e_skb_cb_hwtstamp_init(skb);
                mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
                                           metadata_index);
+               /* ensure skb is put on metadata_map before tracking the index */
+               wmb();
                mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
                if (!netif_tx_queue_stopped(sq->txq) &&
                    mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
index 190f10aba17028211fc6c34abaa7b35d44310ba2..5a0047bdcb5105ae4992578003007d21dd4fa1b5 100644 (file)
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
 
        xa_for_each(&esw->offloads.vport_reps, i, rep) {
                rpriv = rep->rep_data[REP_ETH].priv;
-               if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+               if (!rpriv || !rpriv->netdev)
                        continue;
 
                rhashtable_walk_enter(&rpriv->tc_ht, &iter);
index b0455134c98eff62c82b3d35bc8c600dc059d960..baaae628b0a0f6510e2c350cbab0b6309b32da52 100644 (file)
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
 }
 
 static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
 {
-       bool vf_dest = false, pf_dest = false;
+       bool internal_dest = false, external_dest = false;
        int i;
 
        for (i = 0; i < max_dest; i++) {
-               if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+               if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+                   dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
                        continue;
 
-               if (dests[i].vport.num == MLX5_VPORT_UPLINK)
-                       pf_dest = true;
+               /* Uplink dest is external, but considered as internal
+                * if there is reformat because firmware uses LB+hairpin to support it.
+                */
+               if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+                   !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+                       external_dest = true;
                else
-                       vf_dest = true;
+                       internal_dest = true;
 
-               if (vf_dest && pf_dest)
+               if (internal_dest && external_dest)
                        return true;
        }
 
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
 
                /* Header rewrite with combined wire+loopback in FDB is not allowed */
                if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
-                   esw_dests_to_vf_pf_vports(dest, i)) {
+                   esw_dests_to_int_external(dest, i)) {
                        esw_warn(esw->dev,
-                                "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+                                "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
                        rule = ERR_PTR(-EINVAL);
                        goto err_esw_get;
                }
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
        return 0;
 }
 
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct net *devl_net, *netdev_net;
-       bool ret = false;
-
-       mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
-       if (dev->mlx5e_res.uplink_netdev) {
-               netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
-               devl_net = devlink_net(devlink);
-               ret = net_eq(devl_net, netdev_net);
-       }
-       mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
-       return ret;
-}
-
 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
 {
        struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
-       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
-           !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
-               return -EPERM;
-       }
-
        mlx5_lag_disable_change(esw->dev);
        err = mlx5_esw_try_lock(esw);
        if (err < 0) {
index f27eab6e49299059ed26ed5edc0cf767b997a08f..2911aa34a5be3f9738b07635a421ba996e4f749a 100644 (file)
@@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
 {
        struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
 
+       if (!fw_reset)
+               return;
+
        MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
        mlx5_eq_notifier_register(dev, &fw_reset->nb);
 }
 
 void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
 {
-       mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+       struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+       if (!fw_reset)
+               return;
+
+       mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
 }
 
 void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
 {
        struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
 
+       if (!fw_reset)
+               return;
+
        set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
        cancel_work_sync(&fw_reset->fw_live_patch_work);
        cancel_work_sync(&fw_reset->reset_request_work);
@@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
 
 int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
 {
-       struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+       struct mlx5_fw_reset *fw_reset;
        int err;
 
+       if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+               return 0;
+
+       fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
        if (!fw_reset)
                return -ENOMEM;
        fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
 
+       if (!fw_reset)
+               return;
+
        devl_params_unregister(priv_to_devlink(dev),
                               mlx5_fw_reset_devlink_params,
                               ARRAY_SIZE(mlx5_fw_reset_devlink_params));
index 8ff6dc9bc8033e74d20c2c5423e4a87d64d05b78..b5c709bba1553e1811767a82d07470f4648b0ca5 100644 (file)
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
        struct health_buffer __iomem *h = health->health;
        u8 synd = ioread8(&h->synd);
 
+       devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
        if (!synd)
                return 0;
 
-       devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
        devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
 
        return 0;
index 4af285918ea2a45cae16ca01ec1ab8fd3f5370c9..75868b3f548ec40549c02ad1ac2a3c5c388eade4 100644 (file)
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
                                 list) {
                if ((vid == 0 || mact_entry->vid == vid) &&
                    ether_addr_equal(addr, mact_entry->mac)) {
+                       sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
                        list_del(&mact_entry->list);
                        devm_kfree(sparx5->dev, mact_entry);
-
-                       sparx5_mact_forget(sparx5, addr, mact_entry->vid);
                }
        }
        mutex_unlock(&sparx5->mact_lock);
index d1f7fc8b1b71ab68775f40ad592ebc8aa0e57211..3c066b62e68947cf81fc34c1169cc2edd2991d5a 100644 (file)
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sparx5);
        sparx5->pdev = pdev;
        sparx5->dev = &pdev->dev;
+       spin_lock_init(&sparx5->tx_lock);
 
        /* Do switch core reset if available */
        reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
index 6f565c0c0c3dcd3d3889abb1bf8eac72899037fc..316fed5f27355207146875ee80b3636420ca4945 100644 (file)
@@ -280,6 +280,7 @@ struct sparx5 {
        int xtr_irq;
        /* Frame DMA */
        int fdma_irq;
+       spinlock_t tx_lock; /* lock for frame transmission */
        struct sparx5_rx rx;
        struct sparx5_tx tx;
        /* PTP */
index 6db6ac6a3bbc26db972e2f611ddd7c72fac29c16..ac7e1cffbcecf0ccc4f89e394730d90ec2ada2f8 100644 (file)
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
        }
 
        skb_tx_timestamp(skb);
+       spin_lock(&sparx5->tx_lock);
        if (sparx5->fdma_irq > 0)
                ret = sparx5_fdma_xmit(sparx5, ifh, skb);
        else
                ret = sparx5_inject(sparx5, ifh, skb, dev);
+       spin_unlock(&sparx5->tx_lock);
 
        if (ret == -EBUSY)
                goto busy;
index c49aa358e42444de33b3a3dd08832bf8f56af394..6ba8d4aca0a038b88e7f3ae3a8299ea0a55bd7e0 100644 (file)
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
                        bars[i].len = 0;
                }
        }
+       ionic->num_bars = 0;
 }
 
 void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,15 +216,17 @@ out:
 
 static void ionic_clear_pci(struct ionic *ionic)
 {
-       ionic->idev.dev_info_regs = NULL;
-       ionic->idev.dev_cmd_regs = NULL;
-       ionic->idev.intr_status = NULL;
-       ionic->idev.intr_ctrl = NULL;
-
-       ionic_unmap_bars(ionic);
-       pci_release_regions(ionic->pdev);
+       if (ionic->num_bars) {
+               ionic->idev.dev_info_regs = NULL;
+               ionic->idev.dev_cmd_regs = NULL;
+               ionic->idev.intr_status = NULL;
+               ionic->idev.intr_ctrl = NULL;
+
+               ionic_unmap_bars(ionic);
+               pci_release_regions(ionic->pdev);
+       }
 
-       if (atomic_read(&ionic->pdev->enable_cnt) > 0)
+       if (pci_is_enabled(ionic->pdev))
                pci_disable_device(ionic->pdev);
 }
 
index 1e7c71f7f081b159e83271eeeb47eb35ac401d69..746072b4dbd0e0d37352bc771aa0c7e963eaa26f 100644 (file)
@@ -319,22 +319,32 @@ do_check_time:
 
 u8 ionic_dev_cmd_status(struct ionic_dev *idev)
 {
+       if (!idev->dev_cmd_regs)
+               return (u8)PCI_ERROR_RESPONSE;
        return ioread8(&idev->dev_cmd_regs->comp.comp.status);
 }
 
 bool ionic_dev_cmd_done(struct ionic_dev *idev)
 {
+       if (!idev->dev_cmd_regs)
+               return false;
        return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
 }
 
 void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
 {
+       if (!idev->dev_cmd_regs)
+               return;
        memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
 }
 
 void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
 {
        idev->opcode = cmd->cmd.opcode;
+
+       if (!idev->dev_cmd_regs)
+               return;
+
        memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
        iowrite32(0, &idev->dev_cmd_regs->done);
        iowrite32(1, &idev->dev_cmd_regs->doorbell);
index cd3c0b01402e64360c9104a069f0a9bd5b23b65f..0ffc9c4904ac80320cc9c26f51ea6e52abf60784 100644 (file)
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
                           void *p)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
+       struct ionic_dev *idev;
        unsigned int offset;
        unsigned int size;
 
        regs->version = IONIC_DEV_CMD_REG_VERSION;
 
+       idev = &lif->ionic->idev;
+       if (!idev->dev_info_regs)
+               return;
+
        offset = 0;
        size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
        memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
 
        offset += size;
        size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
-       memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+       memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
 }
 
 static void ionic_get_link_ext_stats(struct net_device *netdev,
index 5f40324cd243fe2f2f79b924920951304d25df45..3c209c1a23373339b8455387105128f2dd9057be 100644 (file)
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
        dl = priv_to_devlink(ionic);
        devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
 
+       if (!idev->dev_cmd_regs) {
+               err = -ENXIO;
+               goto err_out;
+       }
+
        buf_sz = sizeof(idev->dev_cmd_regs->data);
 
        netdev_dbg(netdev,
index cf2d5ad7b68cc85195e516697d82238c6a7f5924..fcb44ceeb6aa51d944a12b411d904f2715a43be7 100644 (file)
@@ -3559,7 +3559,10 @@ int ionic_lif_init(struct ionic_lif *lif)
                        goto err_out_notifyq_deinit;
        }
 
-       err = ionic_init_nic_features(lif);
+       if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+               err = ionic_set_nic_features(lif, lif->netdev->features);
+       else
+               err = ionic_init_nic_features(lif);
        if (err)
                goto err_out_notifyq_deinit;
 
index 165ab08ad2dda8ea15cca7aba88f586b0010c3f2..2f479de329fec5ef039c5e4ebaa3ea79d88a04a5 100644 (file)
@@ -416,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
 {
        struct ionic_dev *idev = &ionic->idev;
 
+       if (!idev->dev_cmd_regs)
+               return;
+
        iowrite32(0, &idev->dev_cmd_regs->doorbell);
        memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
 }
index 1bd34b2a47e81494eeddf72814b585e47d0b8c60..29367105df548271d3aa22cfad80a40dece256c1 100644 (file)
@@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry {
                .regs = {
                        .ptp_off = PTP_GMAC4_OFFSET,
                        .mmc_off = MMC_GMAC4_OFFSET,
-                       .est_off = EST_XGMAC_OFFSET,
+                       .est_off = EST_GMAC4_OFFSET,
                },
                .desc = &dwmac4_desc_ops,
                .dma = &dwmac410_dma_ops,
index 75d02970450321be0e8f8e3bc2b0e0330f17e4af..7c6aef033a456455e4334466bf276755f33dbd47 100644 (file)
@@ -2672,7 +2672,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
                        }
                        if (skb) {
                                stmmac_get_tx_hwtstamp(priv, p, skb);
-                       } else {
+                       } else if (tx_q->xsk_pool &&
+                                  xp_tx_metadata_enabled(tx_q->xsk_pool)) {
                                struct stmmac_xsk_tx_complete tx_compl = {
                                        .priv = priv,
                                        .desc = p,
@@ -4005,8 +4006,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
 {
        set_bit(__FPE_REMOVING, &priv->fpe_task_state);
 
-       if (priv->fpe_wq)
+       if (priv->fpe_wq) {
                destroy_workqueue(priv->fpe_wq);
+               priv->fpe_wq = NULL;
+       }
 
        netdev_info(priv->dev, "FPE workqueue stop");
 }
@@ -6059,11 +6062,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
        struct net_device *dev = (struct net_device *)dev_id;
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (unlikely(!dev)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
@@ -6079,11 +6077,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
        struct net_device *dev = (struct net_device *)dev_id;
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (unlikely(!dev)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
@@ -6105,11 +6098,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
        dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
        priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
 
-       if (unlikely(!data)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
@@ -6136,11 +6124,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
        dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
        priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
 
-       if (unlikely(!data)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
index 9d2f4ac783e43502586b27283a4db73351ca0583..2939a21ca74f3cf0f627981df74a949e9c61011e 100644 (file)
@@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
                   txqueue,
                   netif_tx_queue_stopped(netif_txq),
                   jiffies_to_msecs(jiffies - trans_start),
-                  dql_avail(&netif_txq->dql),
+                  netdev_queue_dql_avail(netif_txq),
                   k3_cppi_desc_pool_avail(tx_chn->desc_pool));
 
        if (netif_tx_queue_stopped(netif_txq)) {
index d5b75af163d35e6b257e9d3dcb48ada80f8a0f20..c1b0d35c8d05207b351b9313f6ae24b986ff3ca1 100644 (file)
@@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
        if (gelic_descr_get_status(descr) !=  GELIC_DESCR_DMA_NOT_IN_USE)
                dev_info(ctodev(card), "%s: ERROR status\n", __func__);
 
-       descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
-       if (!descr->skb) {
-               descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
-               return -ENOMEM;
-       }
        descr->hw_regs.dmac_cmd_status = 0;
        descr->hw_regs.result_size = 0;
        descr->hw_regs.valid_size = 0;
        descr->hw_regs.data_error = 0;
        descr->hw_regs.payload.dev_addr = 0;
        descr->hw_regs.payload.size = 0;
-       descr->skb = NULL;
+
+       descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+       if (!descr->skb) {
+               descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
+               return -ENOMEM;
+       }
 
        offset = ((unsigned long)descr->skb->data) &
                (GELIC_NET_RXBUF_ALIGN - 1);
index 32c51c244153bd760b9f58001906c04c8b0f37ff..c4ed36c71897439fc8f6c11d069c88996e2a2a3c 100644 (file)
@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
        struct genevehdr *gnvh = geneve_hdr(skb);
        struct metadata_dst *tun_dst = NULL;
        unsigned int len;
-       int err = 0;
+       int nh, err = 0;
        void *oiph;
 
        if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                skb->pkt_type = PACKET_HOST;
        }
 
-       oiph = skb_network_header(skb);
+       /* Save offset of outer header relative to skb->head,
+        * because we are going to reset the network header to the inner header
+        * and might change skb->head.
+        */
+       nh = skb_network_header(skb) - skb->head;
+
        skb_reset_network_header(skb);
 
+       if (!pskb_inet_may_pull(skb)) {
+               DEV_STATS_INC(geneve->dev, rx_length_errors);
+               DEV_STATS_INC(geneve->dev, rx_errors);
+               goto drop;
+       }
+
+       /* Get the outer header. */
+       oiph = skb->head + nh;
+
        if (geneve_get_sk_family(gs) == AF_INET)
                err = IP_ECN_decapsulate(oiph, skb);
 #if IS_ENABLED(CONFIG_IPV6)
index b1919278e931f4e9fb6b2d2ec2feb2193b2cda61..2b5357d94ff5683049510c71c932be05abe0f211 100644 (file)
@@ -1903,26 +1903,26 @@ static int __init gtp_init(void)
 
        get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
 
-       err = rtnl_link_register(&gtp_link_ops);
+       err = register_pernet_subsys(&gtp_net_ops);
        if (err < 0)
                goto error_out;
 
-       err = genl_register_family(&gtp_genl_family);
+       err = rtnl_link_register(&gtp_link_ops);
        if (err < 0)
-               goto unreg_rtnl_link;
+               goto unreg_pernet_subsys;
 
-       err = register_pernet_subsys(&gtp_net_ops);
+       err = genl_register_family(&gtp_genl_family);
        if (err < 0)
-               goto unreg_genl_family;
+               goto unreg_rtnl_link;
 
        pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
                sizeof(struct pdp_ctx));
        return 0;
 
-unreg_genl_family:
-       genl_unregister_family(&gtp_genl_family);
 unreg_rtnl_link:
        rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+       unregister_pernet_subsys(&gtp_net_ops);
 error_out:
        pr_err("error loading GTP module loaded\n");
        return err;
index 4bc05948f772d8b009e692a62fec564c7380aae3..a78c692f2d3c5dde24879254cab725bc97072634 100644 (file)
@@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
        u32 unit_count;
        u32 unit;
 
-       unit_count = roundup(ipa->endpoint_count, 32);
+       unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
        for (unit = 0; unit < unit_count; unit++) {
                const struct reg *reg;
                u32 val;
index 894172a3e15fe8a6a86e38b64246ebefcb65362b..337899c69738ec46c2b585db76e11fa25738560e 100644 (file)
@@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
                                ERR_PTR(ret));
                        return ret;
                }
+
+               return genphy_soft_reset(phydev);
        }
 
-       return genphy_soft_reset(phydev);
+       return 0;
 }
 
 static int rtl821x_suspend(struct phy_device *phydev)
index 4a4f8c8e79fa12dc84a8c83cefbf964dd40e1aa2..8f95a562b8d0c471c44591629e04809f7faef9b2 100644 (file)
@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                                   tun->tfiles[tun->numqueues - 1]);
                ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
+               ntfile->xdp_rxq.queue_index = index;
                rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
                                   NULL);
 
index 99ec1d4a972db8c1232ce8ee8eb8d97385a9b5f0..8b6d6a1b3c2eca086e77915e26428c1110127f4d 100644 (file)
@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
        err = dm_read_shared_word(dev, 1, loc, &res);
        if (err < 0) {
                netdev_err(dev->net, "MDIO read error: %d\n", err);
-               return err;
+               return 0;
        }
 
        netdev_dbg(dev->net,
index a6d653ff552a261ca50d331dd7d7aa875ca3c362..d2aa2c5b1989da8a7e099dfdef88c087da3cf37b 100644 (file)
@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 
                lan78xx_rx_urb_submit_all(dev);
 
+               local_bh_disable();
                napi_schedule(&dev->napi);
+               local_bh_enable();
        }
 
        return 0;
@@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        if (dev->chipid == ID_REV_CHIP_ID_7801_)
                buf &= ~MAC_CR_GMII_EN_;
 
-       if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+       if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+           dev->chipid == ID_REV_CHIP_ID_7850_) {
                ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
                if (!ret && sig != EEPROM_INDICATOR) {
                        /* Implies there is no external eeprom. Set mac speed */
@@ -3132,7 +3135,8 @@ static int lan78xx_open(struct net_device *net)
 done:
        mutex_unlock(&dev->dev_mutex);
 
-       usb_autopm_put_interface(dev->intf);
+       if (ret < 0)
+               usb_autopm_put_interface(dev->intf);
 
        return ret;
 }
index a530f20ee257550141e5ec7c17b5fba0087db248..2fa46baa589e5e87e12e145fe46268bdaf9fc219 100644 (file)
@@ -2104,6 +2104,11 @@ static const struct usb_device_id products[] = {
                USB_DEVICE(0x0424, 0x9E08),
                .driver_info = (unsigned long) &smsc95xx_info,
        },
+       {
+               /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+               USB_DEVICE(0x0878, 0x1400),
+               .driver_info = (unsigned long)&smsc95xx_info,
+       },
        {
                /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
                USB_DEVICE(0x184F, 0x0051),
index 578e36ea1589c11f1ca26b6e05a84b455d22999e..cd4a6fe458f95d7bbc3c468ae8585d06cf0ac097 100644 (file)
@@ -1208,14 +1208,6 @@ static int veth_enable_xdp(struct net_device *dev)
                                veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
                                return err;
                        }
-
-                       if (!veth_gro_requested(dev)) {
-                               /* user-space did not require GRO, but adding XDP
-                                * is supposed to get GRO working
-                                */
-                               dev->features |= NETIF_F_GRO;
-                               netdev_features_change(dev);
-                       }
                }
        }
 
@@ -1235,18 +1227,9 @@ static void veth_disable_xdp(struct net_device *dev)
        for (i = 0; i < dev->real_num_rx_queues; i++)
                rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
 
-       if (!netif_running(dev) || !veth_gro_requested(dev)) {
+       if (!netif_running(dev) || !veth_gro_requested(dev))
                veth_napi_del(dev);
 
-               /* if user-space did not require GRO, since adding XDP
-                * enabled it, clear it now
-                */
-               if (!veth_gro_requested(dev) && netif_running(dev)) {
-                       dev->features &= ~NETIF_F_GRO;
-                       netdev_features_change(dev);
-               }
-       }
-
        veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
 }
 
@@ -1478,7 +1461,8 @@ static int veth_alloc_queues(struct net_device *dev)
        struct veth_priv *priv = netdev_priv(dev);
        int i;
 
-       priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
+       priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
+                           GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
        if (!priv->rq)
                return -ENOMEM;
 
@@ -1494,7 +1478,7 @@ static void veth_free_queues(struct net_device *dev)
 {
        struct veth_priv *priv = netdev_priv(dev);
 
-       kfree(priv->rq);
+       kvfree(priv->rq);
 }
 
 static int veth_dev_init(struct net_device *dev)
@@ -1654,6 +1638,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                }
 
                if (!old_prog) {
+                       if (!veth_gro_requested(dev)) {
+                               /* user-space did not require GRO, but adding
+                                * XDP is supposed to get GRO working
+                                */
+                               dev->features |= NETIF_F_GRO;
+                               netdev_features_change(dev);
+                       }
+
                        peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
                        peer->max_mtu = max_mtu;
                }
@@ -1669,6 +1661,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                        if (dev->flags & IFF_UP)
                                veth_disable_xdp(dev);
 
+                       /* if user-space did not require GRO, since adding XDP
+                        * enabled it, clear it now
+                        */
+                       if (!veth_gro_requested(dev)) {
+                               dev->features &= ~NETIF_F_GRO;
+                               netdev_features_change(dev);
+                       }
+
                        if (peer) {
                                peer->hw_features |= NETIF_F_GSO_SOFTWARE;
                                peer->max_mtu = ETH_MAX_MTU;
index 9c69d3674384609b8a7c376900e07a04441c24b0..e6c0f928a6bbf338ca240214635313c05c4e8751 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
        IWL_GEN2_TRIG_TX_FIFO_VO,
 };
 
+enum iwl_bz_tx_fifo {
+       IWL_BZ_EDCA_TX_FIFO_BK,
+       IWL_BZ_EDCA_TX_FIFO_BE,
+       IWL_BZ_EDCA_TX_FIFO_VI,
+       IWL_BZ_EDCA_TX_FIFO_VO,
+       IWL_BZ_TRIG_TX_FIFO_BK,
+       IWL_BZ_TRIG_TX_FIFO_BE,
+       IWL_BZ_TRIG_TX_FIFO_VI,
+       IWL_BZ_TRIG_TX_FIFO_VO,
+};
 /**
  * enum iwl_tx_queue_cfg_actions - TXQ config options
  * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
index 4582afb149d720d077f30c0e7bb1814e5106d453..05b64176859e809986082c002f91eef247e83add 100644 (file)
@@ -1279,7 +1279,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
                mvm->net_detect = true;
        } else {
-               struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+               struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+                       .offloading_tid = 0,
+               };
 
                wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
 
@@ -1291,6 +1293,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                        goto out_noreset;
                }
 
+               ret = iwl_mvm_sta_ensure_queue(
+                       mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+               if (ret)
+                       goto out_noreset;
+
                ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
                                                vif, mvmvif, ap_sta);
                if (ret)
index c4f96125cf33af0eb066c3950e6dba18d505c4f4..25a5a31e63c2a33a0fc0bbe7317f65df62b9e8de 100644 (file)
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
        IWL_GEN2_TRIG_TX_FIFO_BK,
 };
 
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+       IWL_BZ_EDCA_TX_FIFO_VO,
+       IWL_BZ_EDCA_TX_FIFO_VI,
+       IWL_BZ_EDCA_TX_FIFO_BE,
+       IWL_BZ_EDCA_TX_FIFO_BK,
+       IWL_BZ_TRIG_TX_FIFO_VO,
+       IWL_BZ_TRIG_TX_FIFO_VI,
+       IWL_BZ_TRIG_TX_FIFO_BE,
+       IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
 struct iwl_mvm_mac_iface_iterator_data {
        struct iwl_mvm *mvm;
        struct ieee80211_vif *vif;
index 40627961b834a2ee860445b4557cf19498f4e166..81dbef6947f5578dd50e12f157124fa48fcdb728 100644 (file)
@@ -1581,12 +1581,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
 
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
 
 static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
                                           enum ieee80211_ac_numbers ac)
 {
-       return iwl_mvm_has_new_tx_api(mvm) ?
-               iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+               return iwl_mvm_ac_to_bz_tx_fifo[ac];
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+       return iwl_mvm_ac_to_tx_fifo[ac];
 }
 
 struct iwl_rate_info {
index 2a3ca97859749749fff954e097a9d62ae86f5d24..c2e0cff740e9281ee7f73a2a9db4d0add160fee1 100644 (file)
@@ -1502,6 +1502,34 @@ out_err:
        return ret;
 }
 
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+                            struct ieee80211_txq *txq)
+{
+       struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+       int ret = -EINVAL;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+           !txq->sta) {
+               return 0;
+       }
+
+       if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+               set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               ret = 0;
+       }
+
+       local_bh_disable();
+       spin_lock(&mvm->add_stream_lock);
+       if (!list_empty(&mvmtxq->list))
+               list_del_init(&mvmtxq->list);
+       spin_unlock(&mvm->add_stream_lock);
+       local_bh_enable();
+
+       return ret;
+}
+
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 {
        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
index b33a0ce096d46c2f92eb127d8942062b42f39345..3cf8a70274ce888833014b4492348c233be0a4c0 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2016 Intel Deutschland GmbH
  */
@@ -571,6 +571,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
                                       bool disable);
 
 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
index b71267c6667cf80309532c4ca2602354ccfc34a2..fa8cd33be1312dc57f075cf6557270794dcc2939 100644 (file)
@@ -1304,7 +1304,7 @@ static struct device_node *parse_remote_endpoint(struct device_node *np,
                                                 int index)
 {
        /* Return NULL for index > 0 to signify end of remote-endpoints. */
-       if (!index || strcmp(prop_name, "remote-endpoint"))
+       if (index > 0 || strcmp(prop_name, "remote-endpoint"))
                return NULL;
 
        return of_graph_get_remote_port_parent(np);
index c8be056c248ded75cae622f1d8cd82bcc81e5500..cfd84a899c82d881f9ed5c446aed0c204bfd3cd4 100644 (file)
@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
 
        return (irq_hw_number_t)desc->msi_index |
                pci_dev_id(dev) << 11 |
-               (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+               ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
 }
 
 static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
index bc0d414a6aff94ee8980821666eb0c18bca4954f..308c9969642e1f149cdebd9f8aed7812adbc5f1f 100644 (file)
@@ -59,7 +59,7 @@
 #define   CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK     GENMASK_ULL(63, 59)
 
 #define CXL_PMU_FILTER_CFG_REG(n, f)   (0x400 + 4 * ((f) + (n) * 8))
-#define   CXL_PMU_FILTER_CFG_VALUE_MSK                 GENMASK(15, 0)
+#define   CXL_PMU_FILTER_CFG_VALUE_MSK                 GENMASK(31, 0)
 
 #define CXL_PMU_COUNTER_REG(n)         (0xc00 + 8 * (n))
 
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
 }
 
 /*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
  * is needed.
  */
 
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
                if (cxl_pmu_config1_hdm_filter_en(event))
                        cfg = cxl_pmu_config2_get_hdm_decoder(event);
                else
-                       cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+                       cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
                writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
        }
 
index 0dda70e1ef90a19017c902689f970dea684b4f4c..c78a6fd6c57f612221749d44673d47845911231f 100644 (file)
@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
        struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
        struct hw_perf_event *hwc = &event->hw;
 
-       if (!rvpmu->ctr_get_width)
-       /**
-        * If the pmu driver doesn't support counter width, set it to default
-        * maximum allowed by the specification.
-        */
-               cwidth = 63;
-       else {
-               if (hwc->idx == -1)
-                       /* Handle init case where idx is not initialized yet */
-                       cwidth = rvpmu->ctr_get_width(0);
-               else
-                       cwidth = rvpmu->ctr_get_width(hwc->idx);
-       }
+       if (hwc->idx == -1)
+               /* Handle init case where idx is not initialized yet */
+               cwidth = rvpmu->ctr_get_width(0);
+       else
+               cwidth = rvpmu->ctr_get_width(hwc->idx);
 
        return GENMASK_ULL(cwidth, 0);
 }
index 79fdd667922e812612aae1f597714bbefa0d4899..fa0bccf4edf2ea6172c7ee72d577cb0904073ea7 100644 (file)
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
        return pmu_legacy_ctr_get_idx(event);
 }
 
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+       return 63;
+}
+
 static u64 pmu_legacy_read_ctr(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
        pmu->ctr_stop = NULL;
        pmu->event_map = pmu_legacy_event_map;
        pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
-       pmu->ctr_get_width = NULL;
+       pmu->ctr_get_width = pmu_legacy_ctr_get_width;
        pmu->ctr_clear_idx = NULL;
        pmu->ctr_read = pmu_legacy_read_ctr;
        pmu->event_mapped = pmu_legacy_event_mapped;
        pmu->event_unmapped = pmu_legacy_event_unmapped;
        pmu->csr_index = pmu_legacy_csr_index;
+       pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+       pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
 
        perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
 }
index 16acd4dcdb96c75e07b45a3745a71842f2d7d2b8..452aab49db1e8ccc35a6bb0b76661ca7cb6fb71f 100644 (file)
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
 
        if (event->hw.idx != -1)
                csr_write(CSR_SCOUNTEREN,
-                         csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+                         csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
 }
 
 static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
 
        if (event->hw.idx != -1)
                csr_write(CSR_SCOUNTEREN,
-                         csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+                         csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
 }
 
 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
                /* compute hardware counter index */
                hidx = info->csr - CSR_CYCLE;
                /* check if the corresponding bit is set in sscountovf */
-               if (!(overflow & (1 << hidx)))
+               if (!(overflow & BIT(hidx)))
                        continue;
 
                /*
                 * Keep a track of overflowed counters so that they can be started
                 * with updated initial value.
                 */
-               overflowed_ctrs |= 1 << lidx;
+               overflowed_ctrs |= BIT(lidx);
                hw_evt = &event->hw;
                riscv_pmu_event_update(event);
                perf_sample_data_init(&data, 0, hw_evt->last_period);
index e625b32889bfceaef9846db42e594e971cccb54d..0928a526e2ab3692eaeb1e4abaa45e23eee4cf5b 100644 (file)
@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
                        return ret;
                }
 
-               priv->id = of_alias_get_id(np, "mipi_dphy");
+               priv->id = of_alias_get_id(np, "mipi-dphy");
                if (priv->id < 0) {
                        dev_err(dev, "Failed to get phy node alias id: %d\n",
                                priv->id);
index a623f092b11f642bd3d35655e162a94a454bb14f..a43e20abb10d54a2ff2bbe29907f5c4597d6871d 100644 (file)
 #define EUSB2_TUNE_EUSB_EQU            0x5A
 #define EUSB2_TUNE_EUSB_HS_COMP_CUR    0x5B
 
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v)     \
-       {                                       \
-               .reg = r,                       \
-               .val = v,                       \
-       }
-
-enum reg_fields {
-       F_TUNE_EUSB_HS_COMP_CUR,
-       F_TUNE_EUSB_EQU,
-       F_TUNE_EUSB_SLEW,
-       F_TUNE_USB2_HS_COMP_CUR,
-       F_TUNE_USB2_PREEM,
-       F_TUNE_USB2_EQU,
-       F_TUNE_USB2_SLEW,
-       F_TUNE_SQUELCH_U,
-       F_TUNE_HSDISC,
-       F_TUNE_RES_FSDIF,
-       F_TUNE_IUSB2,
-       F_TUNE_USB2_CROSSOVER,
-       F_NUM_TUNE_FIELDS,
-
-       F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
-       F_FORCE_EN_5,
-
-       F_EN_CTL1,
-
-       F_RPTR_STATUS,
-       F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
-       [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
-       [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
-       [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
-       [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
-       [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
-       [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
-       [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
-       [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
-       [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
-       [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
-       [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
-       [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
-       [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
-       [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
-       [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
-       [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+       TUNE_EUSB_HS_COMP_CUR,
+       TUNE_EUSB_EQU,
+       TUNE_EUSB_SLEW,
+       TUNE_USB2_HS_COMP_CUR,
+       TUNE_USB2_PREEM,
+       TUNE_USB2_EQU,
+       TUNE_USB2_SLEW,
+       TUNE_SQUELCH_U,
+       TUNE_HSDISC,
+       TUNE_RES_FSDIF,
+       TUNE_IUSB2,
+       TUNE_USB2_CROSSOVER,
+       NUM_TUNE_FIELDS,
+
+       FORCE_VAL_5 = NUM_TUNE_FIELDS,
+       FORCE_EN_5,
+
+       EN_CTL1,
+
+       RPTR_STATUS,
+       LAYOUT_SIZE,
 };
 
 struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
 
 struct eusb2_repeater {
        struct device *dev;
-       struct regmap_field *regs[F_NUM_FIELDS];
+       struct regmap *regmap;
        struct phy *phy;
        struct regulator_bulk_data *vregs;
        const struct eusb2_repeater_cfg *cfg;
+       u32 base;
        enum phy_mode mode;
 };
 
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
        "vdd18", "vdd3",
 };
 
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
-       [F_TUNE_IUSB2] = 0x8,
-       [F_TUNE_SQUELCH_U] = 0x3,
-       [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+       [TUNE_IUSB2] = 0x8,
+       [TUNE_SQUELCH_U] = 0x3,
+       [TUNE_USB2_PREEM] = 0x5,
 };
 
 static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
 
 static int eusb2_repeater_init(struct phy *phy)
 {
-       struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
        struct eusb2_repeater *rptr = phy_get_drvdata(phy);
        struct device_node *np = rptr->dev->of_node;
-       u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
-       u8 override;
+       struct regmap *regmap = rptr->regmap;
+       const u32 *init_tbl = rptr->cfg->init_tbl;
+       u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+       u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+       u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+       u32 base = rptr->base;
        u32 val;
        int ret;
-       int i;
+
+       of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+       of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+       of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
 
        ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
        if (ret)
                return ret;
 
-       regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+       regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
 
-       for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
-               if (init_tbl[i]) {
-                       regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-               } else {
-                       /* Write 0 if there's no value set */
-                       u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
-                       regmap_field_update_bits(rptr->regs[i], mask, 0);
-               }
-       }
-       memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+       regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+       regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+       regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+       regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+       regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+       regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+       regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+       regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+       regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
 
-       if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
-               init_tbl[F_TUNE_IUSB2] = override;
+       regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+       regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+       regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
 
-       if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
-               init_tbl[F_TUNE_HSDISC] = override;
-
-       if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
-               init_tbl[F_TUNE_USB2_PREEM] = override;
-
-       for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
-               regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
-       ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
-                                            val, val & RPTR_OK, 10, 5);
+       ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
        if (ret)
                dev_err(rptr->dev, "initialization timed-out\n");
 
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
                                   enum phy_mode mode, int submode)
 {
        struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+       struct regmap *regmap = rptr->regmap;
+       u32 base = rptr->base;
 
        switch (mode) {
        case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
                 * per eUSB 1.2 Spec. Below implement software workaround until
                 * PHY and controller is fixing seen observation.
                 */
-               regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
-                                        F_CLK_19P2M_EN, F_CLK_19P2M_EN);
-               regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
-                                        V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+               regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+               regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
                break;
        case PHY_MODE_USB_DEVICE:
                /*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
                 * repeater doesn't clear previous value due to shared
                 * regulators (say host <-> device mode switch).
                 */
-               regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
-                                        F_CLK_19P2M_EN, 0);
-               regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
-                                        V_CLK_19P2M_EN, 0);
+               regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+               regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
                break;
        default:
                return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct phy_provider *phy_provider;
        struct device_node *np = dev->of_node;
-       struct regmap *regmap;
-       int i, ret;
        u32 res;
+       int ret;
 
        rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
        if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
        if (!rptr->cfg)
                return -EINVAL;
 
-       regmap = dev_get_regmap(dev->parent, NULL);
-       if (!regmap)
+       rptr->regmap = dev_get_regmap(dev->parent, NULL);
+       if (!rptr->regmap)
                return -ENODEV;
 
        ret = of_property_read_u32(np, "reg", &res);
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < F_NUM_FIELDS; i++)
-               eusb2_repeater_tune_reg_fields[i].reg += res;
-
-       ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
-                                          eusb2_repeater_tune_reg_fields,
-                                          F_NUM_FIELDS);
-       if (ret)
-               return ret;
+       rptr->base = res;
 
        ret = eusb2_repeater_init_vregs(rptr);
        if (ret < 0) {
index c2590579190a935d76abc9cde99964c9958d3d07..03fb0d4b75d744492e4646af65287f61e7927f1b 100644 (file)
@@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
 
        qphy->vreg = devm_regulator_get(dev, "vdda-phy");
        if (IS_ERR(qphy->vreg))
-               return dev_err_probe(dev, PTR_ERR(qphy->phy),
+               return dev_err_probe(dev, PTR_ERR(qphy->vreg),
                                     "failed to get vreg\n");
 
        phy_set_drvdata(qphy->phy, qphy);
index 1ad10110dd2544b77ae38a1459497ae6e2905b84..17c4ad7553a5edd0960e8ff7e64e97dd7f22b5f5 100644 (file)
@@ -3562,14 +3562,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = qmp_combo_typec_switch_register(qmp);
-       if (ret)
-               return ret;
-
-       ret = drm_aux_bridge_register(dev);
-       if (ret)
-               return ret;
-
        /* Check for legacy binding with child nodes. */
        usb_np = of_get_child_by_name(dev->of_node, "usb3-phy");
        if (usb_np) {
@@ -3589,6 +3581,14 @@ static int qmp_combo_probe(struct platform_device *pdev)
        if (ret)
                goto err_node_put;
 
+       ret = qmp_combo_typec_switch_register(qmp);
+       if (ret)
+               goto err_node_put;
+
+       ret = drm_aux_bridge_register(dev);
+       if (ret)
+               goto err_node_put;
+
        pm_runtime_set_active(dev);
        ret = devm_pm_runtime_enable(dev);
        if (ret)
index 6621246e4ddf0c567f58abdb6fb6799a08ff594e..5c003988c35d38cead7cc6b3e1e2af04a07bdb28 100644 (file)
@@ -1556,7 +1556,7 @@ static const char * const qmp_phy_vreg_l[] = {
        "vdda-phy", "vdda-pll",
 };
 
-static const struct qmp_usb_offsets qmp_usb_offsets_ipq8074 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
        .serdes         = 0,
        .pcs            = 0x800,
        .pcs_misc       = 0x600,
@@ -1572,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
        .rx             = 0x400,
 };
 
-static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
        .serdes         = 0,
        .pcs            = 0x600,
        .tx             = 0x200,
@@ -1624,7 +1624,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
 static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
        .lanes                  = 1,
 
-       .offsets                = &qmp_usb_offsets_ipq8074,
+       .offsets                = &qmp_usb_offsets_v3,
 
        .serdes_tbl             = ipq9574_usb3_serdes_tbl,
        .serdes_tbl_num         = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
@@ -1642,7 +1642,7 @@ static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
 static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
        .lanes                  = 1,
 
-       .offsets                = &qmp_usb_offsets_ipq8074,
+       .offsets                = &qmp_usb_offsets_v3,
 
        .serdes_tbl             = ipq8074_usb3_serdes_tbl,
        .serdes_tbl_num         = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1678,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
 static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
        .lanes                  = 1,
 
-       .offsets                = &qmp_usb_offsets_v3,
+       .offsets                = &qmp_usb_offsets_v3_msm8996,
 
        .serdes_tbl             = msm8996_usb3_serdes_tbl,
        .serdes_tbl_num         = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
index ee56856cb80c33e4733f2b7f2a43fb681c89fc61..bbcdece83bf422948983a814b7a6221b79cad6a3 100644 (file)
@@ -1644,7 +1644,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
        const struct pinctrl_ops *ops = pctldev->desc->pctlops;
        unsigned int i, pin;
 #ifdef CONFIG_GPIOLIB
-       struct gpio_device *gdev __free(gpio_device_put) = NULL;
+       struct gpio_device *gdev = NULL;
        struct pinctrl_gpio_range *range;
        int gpio_num;
 #endif
index 73f091cd827e69edae7e2c0f4743e54c6db14b40..23aebd4695e99fbebffaf308724484f14aeb7984 100644 (file)
@@ -2562,7 +2562,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = {
 };
 
 static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
-        SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+        SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
 };
 
 static struct platform_driver stm32mp257_pinctrl_driver = {
index feaa09f5b35a125c9c704a432f82c7672b7bc139..4f734e049f4a46b60b139cf38ec7c7a2e193a4f6 100644 (file)
@@ -296,7 +296,8 @@ static int amd_pmf_suspend_handler(struct device *dev)
 {
        struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
 
-       kfree(pdev->buf);
+       if (pdev->smart_pc_enabled)
+               cancel_delayed_work_sync(&pdev->pb_work);
 
        return 0;
 }
@@ -312,6 +313,9 @@ static int amd_pmf_resume_handler(struct device *dev)
                        return ret;
        }
 
+       if (pdev->smart_pc_enabled)
+               schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
        return 0;
 }
 
@@ -330,9 +334,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
                dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
        }
 
-       if (!amd_pmf_init_smart_pc(dev)) {
+       amd_pmf_init_smart_pc(dev);
+       if (dev->smart_pc_enabled) {
                dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
-       } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+               /* If Smart PC is enabled, no need to check for other features */
+               return;
+       }
+
+       if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
                amd_pmf_init_auto_mode(dev);
                dev_dbg(dev->dev, "Auto Mode Init done\n");
        } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
@@ -351,7 +360,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
                amd_pmf_deinit_sps(dev);
        }
 
-       if (!dev->smart_pc_enabled) {
+       if (dev->smart_pc_enabled) {
                amd_pmf_deinit_smart_pc(dev);
        } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
                amd_pmf_deinit_auto_mode(dev);
index 16999c5b334fd44537404c56ab325aff00ede667..66cae1cca73cc16b73210e49af1c836c3da4d260 100644 (file)
@@ -441,11 +441,6 @@ struct apmf_dyn_slider_output {
        struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
 } __packed;
 
-enum smart_pc_status {
-       PMF_SMART_PC_ENABLED,
-       PMF_SMART_PC_DISABLED,
-};
-
 /* Smart PC - TA internals */
 enum system_state {
        SYSTEM_STATE_S0i3,
index f8c0177afb0dae60d4f67f2876ba98c6100d1ceb..dcbe8f85e122947be014778c14478a7374698b49 100644 (file)
@@ -252,15 +252,17 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
        cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
        length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
 
-       if (cookie != POLICY_SIGN_COOKIE || !length)
+       if (cookie != POLICY_SIGN_COOKIE || !length) {
+               dev_dbg(dev->dev, "cookie doesn't match\n");
                return -EINVAL;
+       }
 
        /* Update the actual length */
        dev->policy_sz = length + 512;
        res = amd_pmf_invoke_cmd_init(dev);
        if (res == TA_PMF_TYPE_SUCCESS) {
                /* Now its safe to announce that smart pc is enabled */
-               dev->smart_pc_enabled = PMF_SMART_PC_ENABLED;
+               dev->smart_pc_enabled = true;
                /*
                 * Start collecting the data from TA FW after a small delay
                 * or else, we might end up getting stale values.
@@ -268,7 +270,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
                schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
        } else {
                dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
-               dev->smart_pc_enabled = PMF_SMART_PC_DISABLED;
+               dev->smart_pc_enabled = false;
                return res;
        }
 
@@ -336,25 +338,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
 static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
 #endif
 
-static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev)
-{
-       dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
-       if (!dev->policy_buf)
-               return -ENOMEM;
-
-       dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
-       if (!dev->policy_base)
-               return -ENOMEM;
-
-       memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
-
-       amd_pmf_hex_dump_pb(dev);
-       if (pb_side_load)
-               amd_pmf_open_pb(dev, dev->dbgfs_dir);
-
-       return amd_pmf_start_policy_engine(dev);
-}
-
 static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
 {
        return ver->impl_id == TEE_IMPL_ID_AMDTEE;
@@ -453,22 +436,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
                return ret;
 
        INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
-       amd_pmf_set_dram_addr(dev, true);
-       amd_pmf_get_bios_buffer(dev);
+
+       ret = amd_pmf_set_dram_addr(dev, true);
+       if (ret)
+               goto error;
+
+       dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+       if (!dev->policy_base) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+       if (!dev->policy_buf) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+       amd_pmf_hex_dump_pb(dev);
+
        dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
-       if (!dev->prev_data)
-               return -ENOMEM;
+       if (!dev->prev_data) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ret = amd_pmf_start_policy_engine(dev);
+       if (ret)
+               goto error;
+
+       if (pb_side_load)
+               amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+       return 0;
 
-       return dev->smart_pc_enabled;
+error:
+       amd_pmf_deinit_smart_pc(dev);
+
+       return ret;
 }
 
 void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
 {
-       if (pb_side_load)
+       if (pb_side_load && dev->esbin)
                amd_pmf_remove_pb(dev);
 
+       cancel_delayed_work_sync(&dev->pb_work);
        kfree(dev->prev_data);
+       dev->prev_data = NULL;
        kfree(dev->policy_buf);
-       cancel_delayed_work_sync(&dev->pb_work);
+       dev->policy_buf = NULL;
+       kfree(dev->buf);
+       dev->buf = NULL;
        amd_pmf_tee_deinit(dev);
 }
index b6708bab7c53d5afae8b4cd5d4fb07450a1c92ed..527d8fbc7cc1108da998e86d0d8dd970d9c5b179 100644 (file)
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
         * IRQs into gpiolib.
         */
        ret = devm_request_irq(dev, irq, int0002_irq,
-                              IRQF_SHARED, "INT0002", chip);
+                              IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
        if (ret) {
                dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
                return ret;
index 210b0a81b7ecbe3ec28499c3c8dbd52cbbf1c3fb..084c355c86f5fa9050ccb881a7efa6682b538773 100644 (file)
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
        autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
 
        sparse_keymap_report_event(input_dev, event, val, autorelease);
-
-       /* Some devices need this to report further events */
-       acpi_evaluate_object(handle, "VBDL", NULL, NULL);
 }
 
 /*
index 6bd14d0132dbd73b1ea497679d4dd8297671859e..3d66e1d4eb1f52dad69c8b2f94d5089ccd0b0c25 100644 (file)
 #define P2SBC_HIDE             BIT(8)
 
 #define P2SB_DEVFN_DEFAULT     PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT    PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT     PCI_DEVFN(13, 2)
 
 static const struct x86_cpu_id p2sb_cpu_ids[] = {
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,       PCI_DEVFN(13, 0)),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
        {}
 };
 
@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
 
 static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
 {
-       unsigned int slot, fn;
-
-       if (PCI_FUNC(devfn) == 0) {
-               /*
-                * When function number of the P2SB device is zero, scan it and
-                * other function numbers, and if devices are available, cache
-                * their BAR0s.
-                */
-               slot = PCI_SLOT(devfn);
-               for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
-                       p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
-       } else {
-               /* Scan the P2SB device and cache its BAR0 */
-               p2sb_scan_and_cache_devfn(bus, devfn);
-       }
+       /* Scan the P2SB device and cache its BAR0 */
+       p2sb_scan_and_cache_devfn(bus, devfn);
+
+       /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+       if (devfn == P2SB_DEVFN_GOLDMONT)
+               p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
 
        if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
                return -ENOENT;
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644 (file)
index 0000000..bcf3a0c
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+                     const char *serial_ctrl_uid,
+                     int serial_ctrl_port,
+                     const char *serdev_ctrl_name)
+{
+       struct device *ctrl_dev, *child;
+       struct acpi_device *ctrl_adev;
+       char name[32];
+       int i;
+
+       ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+       if (!ctrl_adev) {
+               pr_err("error could not get %s/%s serial-ctrl adev\n",
+                      serial_ctrl_hid, serial_ctrl_uid);
+               return ERR_PTR(-ENODEV);
+       }
+
+       /* get_first_physical_node() returns a weak ref */
+       ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+       if (!ctrl_dev) {
+               pr_err("error could not get %s/%s serial-ctrl physical node\n",
+                      serial_ctrl_hid, serial_ctrl_uid);
+               ctrl_dev = ERR_PTR(-ENODEV);
+               goto put_ctrl_adev;
+       }
+
+       /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+       for (i = 0; i < 3; i++) {
+               switch (i) {
+               case 0:
+                       snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+                       break;
+               case 1:
+                       snprintf(name, sizeof(name), "%s.%d",
+                                dev_name(ctrl_dev), serial_ctrl_port);
+                       break;
+               case 2:
+                       strscpy(name, serdev_ctrl_name, sizeof(name));
+                       break;
+               }
+
+               child = device_find_child_by_name(ctrl_dev, name);
+               put_device(ctrl_dev);
+               if (!child) {
+                       pr_err("error could not find '%s' device\n", name);
+                       ctrl_dev = ERR_PTR(-ENODEV);
+                       goto put_ctrl_adev;
+               }
+
+               ctrl_dev = child;
+       }
+
+put_ctrl_adev:
+       acpi_dev_put(ctrl_adev);
+       return ctrl_dev;
+}
index 3a396b763c4963d1f965e1d635967bd3f3d60f18..ce3e08815a8e647f2bf5578d0383dd4621d8526f 100644 (file)
@@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj,
                 * Note - this sets the variable and then the password as separate
                 * WMI calls. Function tlmi_save_bios_settings will error if the
                 * password is incorrect.
+                * Workstation's require the opcode to be set before changing the
+                * attribute.
                 */
+               if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+                       ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+                                                 tlmi_priv.pwd_admin->password);
+                       if (ret)
+                               goto out;
+               }
+
                set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
                                    new_setting);
                if (!set_str) {
@@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj,
                if (ret)
                        goto out;
 
-               if (tlmi_priv.save_mode == TLMI_SAVE_BULK) {
+               if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
                        tlmi_priv.save_required = true;
-               } else {
-                       if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
-                               ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
-                                                         tlmi_priv.pwd_admin->password);
-                               if (ret)
-                                       goto out;
-                       }
+               else
                        ret = tlmi_save_bios_settings("");
-               }
        } else { /* old non-opcode based authentication method (deprecated) */
                if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
                        auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
index c4895e9bc7148ae991a541508a0672a9ae0345bf..5ecd9d33250d78f3a38c7f99b8b6c5c903cdf25d 100644 (file)
@@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
                return 0;
        default:
                /* Unknown function */
+               pr_debug("unknown function 0x%x\n", funcmode);
                return -EOPNOTSUPP;
        }
        return 0;
@@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void)
                return;
 
        perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
-       convert_dytc_to_profile(funcmode, perfmode, &profile);
-       if (profile != dytc_current_profile) {
+       err = convert_dytc_to_profile(funcmode, perfmode, &profile);
+       if (!err && profile != dytc_current_profile) {
                dytc_current_profile = profile;
                platform_profile_notify();
        }
index 7aee5e9ff2b8dd5810f83cc0317ed329b4361d2e..975cf24ae359a882974f35762894108d4a117fb8 100644 (file)
@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
 };
 
 static const struct ts_dmi_data chuwi_hi8_air_data = {
-       .acpi_name      = "MSSL1680:00",
+       .acpi_name      = "MSSL1680",
        .properties     = chuwi_hi8_air_props,
 };
 
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
        { }
 };
 
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
-       .acpi_name      = "GDIX1001:00",
-       .properties     = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
-       .acpi_name      = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+       .acpi_name      = "GDIX1001",
        .properties     = gdix1001_upside_down_props,
 };
 
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
-       .acpi_name      = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+       .acpi_name      = "GDIX1002",
        .properties     = gdix1001_upside_down_props,
 };
 
@@ -1412,7 +1407,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* Juno Tablet */
-               .driver_data = (void *)&gdix1002_00_upside_down_data,
+               .driver_data = (void *)&gdix1002_upside_down_data,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
                        /* Both product- and board-name being "Default string" is somewhat rare */
@@ -1658,7 +1653,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* Teclast X89 (Android version / BIOS) */
-               .driver_data = (void *)&gdix1001_00_upside_down_data,
+               .driver_data = (void *)&gdix1001_upside_down_data,
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
                        DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1666,7 +1661,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* Teclast X89 (Windows version / BIOS) */
-               .driver_data = (void *)&gdix1001_01_upside_down_data,
+               .driver_data = (void *)&gdix1001_upside_down_data,
                .matches = {
                        /* tPAD is too generic, also match on bios date */
                        DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1684,7 +1679,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* Teclast X98 Pro */
-               .driver_data = (void *)&gdix1001_00_upside_down_data,
+               .driver_data = (void *)&gdix1001_upside_down_data,
                .matches = {
                        /*
                         * Only match BIOS date, because the manufacturers
@@ -1788,7 +1783,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* "WinBook TW100" */
-               .driver_data = (void *)&gdix1001_00_upside_down_data,
+               .driver_data = (void *)&gdix1001_upside_down_data,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1796,7 +1791,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        },
        {
                /* WinBook TW700 */
-               .driver_data = (void *)&gdix1001_00_upside_down_data,
+               .driver_data = (void *)&gdix1001_upside_down_data,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
@@ -1821,7 +1816,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
        int error;
 
        if (has_acpi_companion(dev) &&
-           !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+           strstarts(client->name, ts_data->acpi_name)) {
                error = device_create_managed_software_node(dev, ts_data->properties, NULL);
                if (error)
                        dev_err(dev, "failed to add properties: %d\n", error);
index f8221a15575b327c78df4edb191fdc28c52fe2c1..a3415f1c0b5f82a3f54d4a6eb6fed54d98d5e366 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 
 #include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
 
 static struct platform_device *x86_android_tablet_device;
 
@@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
                if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
                        irq_set_irq_type(irq, irq_type);
 
+               if (data->free_gpio)
+                       devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+
                return irq;
        case X86_ACPI_IRQ_TYPE_PMIC:
                status = acpi_get_handle(NULL, data->chip, &handle);
@@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
 
 static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
 {
-       struct acpi_device *ctrl_adev, *serdev_adev;
+       struct acpi_device *serdev_adev;
        struct serdev_device *serdev;
        struct device *ctrl_dev;
        int ret = -ENODEV;
 
-       ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
-       if (!ctrl_adev) {
-               pr_err("error could not get %s/%s ctrl adev\n",
-                      info->ctrl_hid, info->ctrl_uid);
-               return -ENODEV;
-       }
+       ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+                                        info->ctrl_devname);
+       if (IS_ERR(ctrl_dev))
+               return PTR_ERR(ctrl_dev);
 
        serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
        if (!serdev_adev) {
                pr_err("error could not get %s serdev adev\n", info->serdev_hid);
-               goto put_ctrl_adev;
-       }
-
-       /* get_first_physical_node() returns a weak ref, no need to put() it */
-       ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
-       if (!ctrl_dev)  {
-               pr_err("error could not get %s/%s ctrl physical dev\n",
-                      info->ctrl_hid, info->ctrl_uid);
-               goto put_serdev_adev;
-       }
-
-       /* ctrl_dev now points to the controller's parent, get the controller */
-       ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
-       if (!ctrl_dev) {
-               pr_err("error could not get %s/%s %s ctrl dev\n",
-                      info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
-               goto put_serdev_adev;
+               goto put_ctrl_dev;
        }
 
        serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
 
 put_serdev_adev:
        acpi_dev_put(serdev_adev);
-put_ctrl_adev:
-       acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+       put_device(ctrl_dev);
        return ret;
 }
 
index f1c66a61bfc52786f1a6cd49da8ee88c423adbea..c297391955adbcb9a6b076dfb8f009ae4bce2bcb 100644 (file)
@@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
                        .trigger = ACPI_EDGE_SENSITIVE,
                        .polarity = ACPI_ACTIVE_LOW,
                        .con_id = "goodix_ts_irq",
+                       .free_gpio = true,
                },
        }, {
                /* Wacom Digitizer in keyboard half */
index bc6bbf7ec6ea137101394b59d38fb7471675b00c..278402dcb808c5f2b7e25a894c117177867250d0 100644 (file)
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
        },
 };
 
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
        .dev_id = "i2c-NVT-ts",
        .table = {
                GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
 };
 
 static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
-       &acer_b1_750_goodix_gpios,
+       &acer_b1_750_nvt_ts_gpios,
        &int3496_reference_gpios,
        NULL
 };
index 49fed9410adbadad39d397a7b541f52b13c03564..468993edfeee25bcb541daedbe6006ccc7fc44bb 100644 (file)
@@ -39,6 +39,7 @@ struct x86_acpi_irq_data {
        int index;
        int trigger;  /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
        int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+       bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */
        const char *con_id;
 };
 
index 709bbc448fad431d894479146982664002578584..d7ef46ccd9b8a414f8066f7fe2718f9867c89003 100644 (file)
@@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
        struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
        int i;
 
+       if (!scmi_pd_data)
+               return;
+
        of_genpd_del_provider(dev->of_node);
 
        for (i = 0; i < scmi_pd_data->num_domains; i++)
index 3078896b13008865816edc575fe0d769b44c9453..47df910645f6680ab4a17948700f426904007b86 100644 (file)
@@ -692,6 +692,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
        unsigned int active_corner, sleep_corner;
        unsigned int this_active_corner = 0, this_sleep_corner = 0;
        unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+       unsigned int peer_enabled_corner;
 
        if (pd->state_synced) {
                to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@@ -701,9 +702,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
                this_sleep_corner = pd->level_count - 1;
        }
 
-       if (peer && peer->enabled)
-               to_active_sleep(peer, peer->corner, &peer_active_corner,
+       if (peer && peer->enabled) {
+               peer_enabled_corner = max(peer->corner, peer->enable_corner);
+               to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
                                &peer_sleep_corner);
+       }
 
        active_corner = max(this_active_corner, peer_active_corner);
 
index f21cb05815ec6391cc5e11c7edc5190b7163aa94..3e31375491d58055b19f1b61b57dcac3d849b363 100644 (file)
@@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2
 config FUEL_GAUGE_MM8013
        tristate "Mitsumi MM8013 fuel gauge driver"
        depends on I2C
+       select REGMAP_I2C
        help
          Say Y here to enable the Mitsumi MM8013 fuel gauge driver.
          It enables the monitoring of many battery parameters, including
index 3a1798b0c1a79f3ed3a3fd0be4d84f6df390b3b4..9910c600743ebd9b9e01a1cb393c0378ae837807 100644 (file)
@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
 {
        struct bq27xxx_device_info *di = i2c_get_clientdata(client);
 
-       free_irq(client->irq, di);
+       if (client->irq)
+               free_irq(client->irq, di);
+
        bq27xxx_battery_teardown(di);
 
        mutex_lock(&battery_mutex);
index 830a1c4cd705784687fb424f3bfd23fdc7743fcf..8bbcd983a74aa8d8e5db6ae9e9cb7480a9220575 100644 (file)
@@ -29,8 +29,8 @@ struct max5970_regulator {
 };
 
 enum max597x_regulator_id {
-       MAX597X_SW0,
-       MAX597X_SW1,
+       MAX597X_sw0,
+       MAX597X_sw1,
 };
 
 static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
 }
 
 static const struct regulator_desc regulators[] = {
-       MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
-       MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+       MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+       MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
 };
 
 static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
index e374fa6e5f2841e4a96fc2ad03907f020284cfc6..d89ae7f16d7a0e1f8d8c0ec19e7ad7565e816858 100644 (file)
@@ -1017,14 +1017,14 @@ static const struct regulator_desc rk805_reg[] = {
 };
 
 static const struct linear_range rk806_buck_voltage_ranges[] = {
-       REGULATOR_LINEAR_RANGE(500000, 0, 160, 6250), /* 500mV ~ 1500mV */
-       REGULATOR_LINEAR_RANGE(1500000, 161, 237, 25000), /* 1500mV ~ 3400mV */
-       REGULATOR_LINEAR_RANGE(3400000, 238, 255, 0),
+       REGULATOR_LINEAR_RANGE(500000, 0, 159, 6250), /* 500mV ~ 1500mV */
+       REGULATOR_LINEAR_RANGE(1500000, 160, 235, 25000), /* 1500mV ~ 3400mV */
+       REGULATOR_LINEAR_RANGE(3400000, 236, 255, 0),
 };
 
 static const struct linear_range rk806_ldo_voltage_ranges[] = {
-       REGULATOR_LINEAR_RANGE(500000, 0, 232, 12500), /* 500mV ~ 3400mV */
-       REGULATOR_LINEAR_RANGE(3400000, 233, 255, 0), /* 500mV ~ 3400mV */
+       REGULATOR_LINEAR_RANGE(500000, 0, 231, 12500), /* 500mV ~ 3400mV */
+       REGULATOR_LINEAR_RANGE(3400000, 232, 255, 0),
 };
 
 static const struct regulator_desc rk806_reg[] = {
index c533d1dadc6bbb0f3f388ac62b01049ac99a72c5..a5dba3829769c7954ed2d3ba38800bc768fb0019 100644 (file)
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
                return -EINVAL;
        if (cdev->private->state == DEV_STATE_NOT_OPER)
                return -ENODEV;
-       if (cdev->private->state == DEV_STATE_VERIFY) {
+       if (cdev->private->state == DEV_STATE_VERIFY ||
+           cdev->private->flags.doverify) {
                /* Remember to fake irb when finished. */
                if (!cdev->private->flags.fake_irb) {
                        cdev->private->flags.fake_irb = FAKE_CMD_IRB;
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
        }
        if (cdev->private->state != DEV_STATE_ONLINE ||
            ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
-            !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
-           cdev->private->flags.doverify)
+            !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
                return -EBUSY;
        ret = cio_set_options (sch, flags);
        if (ret)
index addac7fbe37b9870380cc715acf923344071e6e6..9ce27092729c30a2791b329c117fa9314b268352 100644 (file)
@@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
 
 config JAZZ_ESP
        bool "MIPS JAZZ FAS216 SCSI support"
-       depends on MACH_JAZZ && SCSI
+       depends on MACH_JAZZ && SCSI=y
        select SCSI_SPI_ATTRS
        help
          This is the driver for the onboard SCSI host adapter of MIPS Magnum
index c0c8ab5869572f77fa11f1c2154e85802ff8a4e5..d32ad46318cb09af970085b3ab00fc376a934e4f 100644 (file)
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
 void
 mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
 {
-       struct host_port h_port[64];
+       struct host_port *h_port = NULL;
        int i, j, found, host_port_count = 0, port_idx;
        u16 sz, attached_handle, ioc_status;
        struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
        sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!sas_io_unit_pg0)
                return;
+       h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+       if (!h_port)
+               goto out;
+
        if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
                ioc_err(mrioc, "failure at %s:%d/%s()!\n",
                    __FILE__, __LINE__, __func__);
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
                }
        }
 out:
+       kfree(h_port);
        kfree(sas_io_unit_pg0);
 }
 
index 8761bc58d965f0f6eb6776a4272ca856e8724463..b8120ca93c79740d7827ebff1652b4b22b296421 100644 (file)
@@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
                return -EFAULT;
        }
 
- issue_diag_reset:
+       return 0;
+
+issue_diag_reset:
        rc = _base_diag_reset(ioc);
        return rc;
 }
index 76d369343c7a9c2457e7d7bc16aa518815cbfc8f..8cad9792a56275b38f70595baf7fb095882a6c1f 100644 (file)
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
        return result + 4;
 }
 
+enum scsi_vpd_parameters {
+       SCSI_VPD_HEADER_SIZE = 4,
+       SCSI_VPD_LIST_SIZE = 36,
+};
+
 static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
 {
-       unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+       unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
        int result;
 
        if (sdev->no_vpd_size)
                return SCSI_DEFAULT_VPD_LEN;
 
+       /*
+        * Fetch the supported pages VPD and validate that the requested page
+        * number is present.
+        */
+       if (page != 0) {
+               result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+               if (result < SCSI_VPD_HEADER_SIZE)
+                       return 0;
+
+               result -= SCSI_VPD_HEADER_SIZE;
+               if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+                       return 0;
+       }
        /*
         * Fetch the VPD page header to find out how big the page
         * is. This is done to prevent problems on legacy devices
         * which can not handle allocation lengths as large as
         * potentially requested by the caller.
         */
-       result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+       result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
        if (result < 0)
                return 0;
 
index 0833b3e6aa6e8f35b791d3f75fe208fb0f888914..bdd0acf7fa3cb130e64fac2aacf684aa5a91da8b 100644 (file)
@@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        return true;
 }
 
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+       unsigned int buf_len = sdkp->device->sector_size;
+       char *buffer, cmd[10] = { };
+
+       buffer = kmalloc(buf_len, GFP_KERNEL);
+       if (!buffer)
+               return;
+
+       cmd[0] = READ_10;
+       put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+       put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+       scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+                        SD_TIMEOUT, sdkp->max_retries, NULL);
+       kfree(buffer);
+}
+
 /**
  *     sd_revalidate_disk - called the first time a new disk is seen,
  *     performs disk spin up, read_capacity, etc.
@@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        if (sdkp->media_present) {
                sd_read_capacity(sdkp, buffer);
-
+               /*
+                * Some USB/UAS devices return generic values for mode pages
+                * until the media has been accessed. Trigger a READ operation
+                * to force the device to populate mode pages.
+                */
+               if (sdp->read_before_ms)
+                       sd_read_block_zero(sdkp);
                /*
                 * set the default to rotational.  All non-rotational devices
                 * support the block characteristics VPD page, which will
index ceff1ec13f9ea9ea056da947d3939c51f4797522..385180c98be496989dbf469926f52c974609a013 100644 (file)
@@ -6533,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
 {
        struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-       blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+       if (!ctrl_info->disable_managed_interrupts)
+               return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
                              ctrl_info->pci_dev, 0);
+       else
+               return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
 }
 
 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
index 9b0fdd95276e4e017d32012d1f2de3107556e242..19f4b576f822b2e57309308f5294914af27df570 100644 (file)
@@ -1,5 +1,5 @@
 config POLARFIRE_SOC_SYS_CTRL
-       tristate "POLARFIRE_SOC_SYS_CTRL"
+       tristate "Microchip PolarFire SoC (MPFS) system controller support"
        depends on POLARFIRE_SOC_MAILBOX
        depends on MTD
        help
index f4bfd24386f1b5d2defe9aad6ffcd7123035158d..f913e9bd57ed4a7aa6d1b99d27a40552713b2536 100644 (file)
@@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
 
        pg->client_mask = *match_data;
 
+       pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+       if (IS_ERR(pg->pdr)) {
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+                                   "failed to initialize pdr\n");
+               return ret;
+       }
+
        if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
                ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
                if (ret)
-                       return ret;
+                       goto out_release_pdr_handle;
        }
        if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
                ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
                        goto out_release_altmode_aux;
        }
 
-       pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
-       if (IS_ERR(pg->pdr)) {
-               ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
-               goto out_release_aux_devices;
-       }
-
        service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
        if (IS_ERR(service)) {
                ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
                                    "failed adding pdr lookup for charger_pd\n");
-               goto out_release_pdr_handle;
+               goto out_release_aux_devices;
        }
 
        mutex_lock(&__pmic_glink_lock);
@@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
 
        return 0;
 
-out_release_pdr_handle:
-       pdr_handle_release(pg->pdr);
 out_release_aux_devices:
        if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
                pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -311,6 +310,8 @@ out_release_altmode_aux:
 out_release_ucsi_aux:
        if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
                pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+       pdr_handle_release(pg->pdr);
 
        return ret;
 }
index 5fcd0fdd2faa2d087fc03e001dffe4f5016c80a9..b3808fc24c695e89fa10f46b93e0fcfabc3b4d61 100644 (file)
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
 
        struct work_struct work;
 
-       struct device *bridge;
+       struct auxiliary_device *bridge;
 
        enum typec_orientation orientation;
        u16 svid;
@@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
        else
                pmic_glink_altmode_enable_usb(altmode, alt_port);
 
-       drm_aux_hpd_bridge_notify(alt_port->bridge,
+       drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
                                  alt_port->hpd_state ?
                                  connector_status_connected :
                                  connector_status_disconnected);
@@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
                alt_port->index = port;
                INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
 
-               alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+               alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
                if (IS_ERR(alt_port->bridge)) {
                        fwnode_handle_put(fwnode);
                        return PTR_ERR(alt_port->bridge);
@@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
                }
        }
 
+       for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+               alt_port = &altmode->ports[port];
+               if (!alt_port->bridge)
+                       continue;
+
+               ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
+               if (ret)
+                       return ret;
+       }
+
        altmode->client = devm_pmic_glink_register_client(dev,
                                                          altmode->owner_id,
                                                          pmic_glink_altmode_callback,
index f94e0d370d466e9742261a84a567593b8073f169..1a8d03958dffbfb77a4cd183d8a18fbd3ed53d63 100644 (file)
@@ -1927,24 +1927,18 @@ static void cqspi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 }
 
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
 {
        struct cqspi_st *cqspi = dev_get_drvdata(dev);
-       struct spi_controller *host = dev_get_drvdata(dev);
-       int ret;
 
-       ret = spi_controller_suspend(host);
        cqspi_controller_enable(cqspi, 0);
-
        clk_disable_unprepare(cqspi->clk);
-
-       return ret;
+       return 0;
 }
 
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
 {
        struct cqspi_st *cqspi = dev_get_drvdata(dev);
-       struct spi_controller *host = dev_get_drvdata(dev);
 
        clk_prepare_enable(cqspi->clk);
        cqspi_wait_idle(cqspi);
@@ -1952,12 +1946,27 @@ static int cqspi_resume(struct device *dev)
 
        cqspi->current_cs = -1;
        cqspi->sclk = 0;
+       return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+       struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+       return spi_controller_suspend(cqspi->host);
+}
 
-       return spi_controller_resume(host);
+static int cqspi_resume(struct device *dev)
+{
+       struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+       return spi_controller_resume(cqspi->host);
 }
 
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
-                                cqspi_resume, NULL);
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+       RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+       SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
 
 static const struct cqspi_driver_platdata cdns_qspi = {
        .quirks = CQSPI_DISABLE_DAC_MODE,
index b24190526ce96420fe885e585b00fb820502bacd..adf19e8c4c8a0d1ef9ede14374e8ab6638073a82 100644 (file)
@@ -148,8 +148,7 @@ static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
 {
        struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
 
-       if (spi_get_chipselect(spi, 0) == 0)
-               regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
+       regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
 }
 
 static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
index 942c3117ab3a904de67c59e7e13e5d40995dd00a..82d6264841fc7f090a5541235569e40023330483 100644 (file)
@@ -359,22 +359,22 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
 
        /* Setup the state for the bitbang driver */
        bbp = &hw->bitbang;
-       bbp->ctlr = hw->host;
+       bbp->master = hw->host;
        bbp->setup_transfer = spi_ppc4xx_setupxfer;
        bbp->txrx_bufs = spi_ppc4xx_txrx;
        bbp->use_dma = 0;
-       bbp->ctlr->setup = spi_ppc4xx_setup;
-       bbp->ctlr->cleanup = spi_ppc4xx_cleanup;
-       bbp->ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
-       bbp->ctlr->use_gpio_descriptors = true;
+       bbp->master->setup = spi_ppc4xx_setup;
+       bbp->master->cleanup = spi_ppc4xx_cleanup;
+       bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
+       bbp->master->use_gpio_descriptors = true;
        /*
         * The SPI core will count the number of GPIO descriptors to figure
         * out the number of chip selects available on the platform.
         */
-       bbp->ctlr->num_chipselect = 0;
+       bbp->master->num_chipselect = 0;
 
        /* the spi->mode bits understood by this driver: */
-       bbp->ctlr->mode_bits =
+       bbp->master->mode_bits =
                SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
 
        /* Get the clock for the OPB */
index 41b7489d37ce95e059ec4849ae7039949c6e6ff1..ed4fd22eac6e0412821a11b1914e4f8e43153576 100644 (file)
@@ -907,12 +907,15 @@ new_bio:
 
        return 0;
 fail:
-       if (bio)
-               bio_put(bio);
+       if (bio) {
+               bio_uninit(bio);
+               kfree(bio);
+       }
        while (req->bio) {
                bio = req->bio;
                req->bio = bio->bi_next;
-               bio_put(bio);
+               bio_uninit(bio);
+               kfree(bio);
        }
        req->biotail = NULL;
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
index 4b10921276942ed13a43e531b723e746b76dd6fa..1892e49a8e6a68b5c0f8e719042b34b4d2856b42 100644 (file)
@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
        if (rc) {
                pr_err("device registration failed, err: %d\n", rc);
                put_device(&optee_device->dev);
+               return rc;
        }
 
        if (func == PTA_CMD_GET_DEVICES_SUPP)
                device_create_file(&optee_device->dev,
                                   &dev_attr_need_supplicant);
 
-       return rc;
+       return 0;
 }
 
 static int __optee_enumerate_devices(u32 func)
index 900114ba4371b10fd941e0add6ade8210c74268f..fad40c4bc710341f27b7f98f6d4317aef6627ae0 100644 (file)
@@ -1249,6 +1249,9 @@ int tb_port_update_credits(struct tb_port *port)
        ret = tb_port_do_update_credits(port);
        if (ret)
                return ret;
+
+       if (!port->dual_link_port)
+               return 0;
        return tb_port_do_update_credits(port->dual_link_port);
 }
 
index 6e05c5c7bca1ad258502eaf158b94534c1cdd23d..c2a4e88b328f35888cb44c0fe1ca5f57f2040e66 100644 (file)
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
 
 config HVC_RISCV_SBI
        bool "RISC-V SBI console support"
-       depends on RISCV_SBI
+       depends on RISCV_SBI && NONPORTABLE
        select HVC_DRIVER
        help
          This enables support for console output via RISC-V SBI calls, which
-         is normally used only during boot to output printk.
+         is normally used only during boot to output printk.  This driver
+         conflicts with real console drivers and should not be enabled on
+         systems that directly access the console.
 
-         If you don't know what do to here, say Y.
+         If you don't know what do to here, say N.
 
 config HVCS
        tristate "IBM Hypervisor Virtual Console Server support"
index 2d1f350a4bea2a86103d707cc322ded0f5941abb..c1d43f040c43abc517c4ef6b48a5423e936e97f2 100644 (file)
@@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
        long rate;
        int ret;
 
-       clk_disable_unprepare(d->clk);
        rate = clk_round_rate(d->clk, newrate);
-       if (rate > 0) {
+       if (rate > 0 && p->uartclk != rate) {
+               clk_disable_unprepare(d->clk);
                /*
                 * Note that any clock-notifer worker will block in
                 * serial8250_update_uartclk() until we are done.
@@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
                ret = clk_set_rate(d->clk, newrate);
                if (!ret)
                        p->uartclk = rate;
+               clk_prepare_enable(d->clk);
        }
-       clk_prepare_enable(d->clk);
 
        dw8250_do_set_termios(p, termios, old);
 }
index fccec1698a54104c1487ea65536dce7729123c61..cf2c890a560f05204e249b931668deca04b3cb27 100644 (file)
@@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
        }
 }
 
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+       struct uart_port *port = &uap->port;
+       u32 cr;
+
+       /* Enable transmitter */
+       cr = pl011_read(uap, REG_CR);
+       cr |= UART011_CR_TXE;
+
+       /* Disable receiver if half-duplex */
+       if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+               cr &= ~UART011_CR_RXE;
+
+       if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+               cr &= ~UART011_CR_RTS;
+       else
+               cr |= UART011_CR_RTS;
+
+       pl011_write(cr, uap, REG_CR);
+
+       if (port->rs485.delay_rts_before_send)
+               mdelay(port->rs485.delay_rts_before_send);
+
+       uap->rs485_tx_started = true;
+}
+
 static void pl011_start_tx(struct uart_port *port)
 {
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
 
+       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+           !uap->rs485_tx_started)
+               pl011_rs485_tx_start(uap);
+
        if (!pl011_dma_tx_start(uap))
                pl011_start_tx_pio(uap);
 }
@@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
        return true;
 }
 
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
-{
-       struct uart_port *port = &uap->port;
-       u32 cr;
-
-       /* Enable transmitter */
-       cr = pl011_read(uap, REG_CR);
-       cr |= UART011_CR_TXE;
-
-       /* Disable receiver if half-duplex */
-       if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
-               cr &= ~UART011_CR_RXE;
-
-       if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
-               cr &= ~UART011_CR_RTS;
-       else
-               cr |= UART011_CR_RTS;
-
-       pl011_write(cr, uap, REG_CR);
-
-       if (port->rs485.delay_rts_before_send)
-               mdelay(port->rs485.delay_rts_before_send);
-
-       uap->rs485_tx_started = true;
-}
-
 /* Returns true if tx interrupts have to be (kept) enabled  */
 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
 {
        struct circ_buf *xmit = &uap->port.state->xmit;
        int count = uap->fifosize >> 1;
 
-       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
-           !uap->rs485_tx_started)
-               pl011_rs485_tx_start(uap);
-
        if (uap->port.x_char) {
                if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
                        return true;
index 5ddf110aedbe513b522d10e691cada5563fec4df..bbcbc91482af0bbd04db16242b4d955d21a9753a 100644 (file)
@@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
 
        lpuart32_write(&sport->port, bd, UARTBAUD);
        lpuart32_serial_setbrg(sport, baud);
-       lpuart32_write(&sport->port, modem, UARTMODIR);
-       lpuart32_write(&sport->port, ctrl, UARTCTRL);
+       /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
+       lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
        /* restore control register */
+       lpuart32_write(&sport->port, ctrl, UARTCTRL);
+       /* re-enable the CTS if needed */
+       lpuart32_write(&sport->port, modem, UARTMODIR);
 
        if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
                sport->is_cs7 = true;
index 4aa72d5aeafbf081ac37241853c49cdb18e46ae5..e14813250616118e5ecfcfdafc1a4c1033f2bf79 100644 (file)
@@ -462,8 +462,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
        }
 }
 
-/* called with port.lock taken and irqs off */
-static void imx_uart_stop_rx(struct uart_port *port)
+static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
 {
        struct imx_port *sport = (struct imx_port *)port;
        u32 ucr1, ucr2, ucr4, uts;
@@ -485,7 +484,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
        /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
        if (port->rs485.flags & SER_RS485_ENABLED &&
            port->rs485.flags & SER_RS485_RTS_ON_SEND &&
-           sport->have_rtscts && !sport->have_rtsgpio) {
+           sport->have_rtscts && !sport->have_rtsgpio && loopback) {
                uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
                uts |= UTS_LOOP;
                imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
@@ -497,6 +496,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
        imx_uart_writel(sport, ucr2, UCR2);
 }
 
+/* called with port.lock taken and irqs off */
+static void imx_uart_stop_rx(struct uart_port *port)
+{
+       /*
+        * Stop RX and enable loopback in order to make sure RS485 bus
+        * is not blocked. Se comment in imx_uart_probe().
+        */
+       imx_uart_stop_rx_with_loopback_ctrl(port, true);
+}
+
 /* called with port.lock taken and irqs off */
 static void imx_uart_enable_ms(struct uart_port *port)
 {
@@ -682,9 +691,14 @@ static void imx_uart_start_tx(struct uart_port *port)
                                imx_uart_rts_inactive(sport, &ucr2);
                        imx_uart_writel(sport, ucr2, UCR2);
 
+                       /*
+                        * Since we are about to transmit we can not stop RX
+                        * with loopback enabled because that will make our
+                        * transmitted data being just looped to RX.
+                        */
                        if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
                            !port->rs485_rx_during_tx_gpio)
-                               imx_uart_stop_rx(port);
+                               imx_uart_stop_rx_with_loopback_ctrl(port, false);
 
                        sport->tx_state = WAIT_AFTER_RTS;
 
index e63a8fbe63bdb22b70fd9362fae5d557c4c59b75..99e08737f293c6868e56d2de80f31bf0e3345ca3 100644 (file)
@@ -851,19 +851,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
 }
 
 static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
-                                            unsigned int remaining)
+                                            unsigned int chunk)
 {
        struct qcom_geni_serial_port *port = to_dev_port(uport);
        struct circ_buf *xmit = &uport->state->xmit;
-       unsigned int tx_bytes;
+       unsigned int tx_bytes, c, remaining = chunk;
        u8 buf[BYTES_PER_FIFO_WORD];
 
        while (remaining) {
                memset(buf, 0, sizeof(buf));
                tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
 
-               memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
-               uart_xmit_advance(uport, tx_bytes);
+               for (c = 0; c < tx_bytes ; c++) {
+                       buf[c] = xmit->buf[xmit->tail];
+                       uart_xmit_advance(uport, 1);
+               }
 
                iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
 
index 88975a4df3060599b233abb49fe07ca53ba15b3a..72b6f4f326e2b04953875062f8ebc6e56f324e45 100644 (file)
@@ -46,8 +46,31 @@ out:
        return 0;
 }
 
+static int serial_port_runtime_suspend(struct device *dev)
+{
+       struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+       struct uart_port *port = port_dev->port;
+       unsigned long flags;
+       bool busy;
+
+       if (port->flags & UPF_DEAD)
+               return 0;
+
+       uart_port_lock_irqsave(port, &flags);
+       busy = __serial_port_busy(port);
+       if (busy)
+               port->ops->start_tx(port);
+       uart_port_unlock_irqrestore(port, flags);
+
+       if (busy)
+               pm_runtime_mark_last_busy(dev);
+
+       return busy ? -EBUSY : 0;
+}
+
 static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
-                                NULL, serial_port_runtime_resume, NULL);
+                                serial_port_runtime_suspend,
+                                serial_port_runtime_resume, NULL);
 
 static int serial_port_probe(struct device *dev)
 {
index 794b7751274034848c65a7e3374b694bcf61c42d..693e932d6feb5842467d1408e04c8d574342cb1f 100644 (file)
@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
                writel_relaxed(cr3, port->membase + ofs->cr3);
                writel_relaxed(cr1, port->membase + ofs->cr1);
 
-               rs485conf->flags |= SER_RS485_RX_DURING_TX;
+               if (!port->rs485_rx_during_tx_gpio)
+                       rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
        } else {
                stm32_usart_clr_bits(port, ofs->cr3,
                                     USART_CR3_DEM | USART_CR3_DEP);
index 156efda7c80d64b3c512d8cc84f228521aefec11..38a765eadbe2bc81494f1fbd7a63b50b87101f08 100644 (file)
@@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
                u32 *ln = vc->vc_uni_lines[vc->state.y];
                unsigned int x = vc->state.x, cols = vc->vc_cols;
 
-               memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+               memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
                memset32(&ln[cols - nr], ' ', nr);
        }
 }
index d77b25b79ae3ec5c949912eb88b2433472180be0..3b89c9d4aa404e48f7f5e95c545652c40ad13127 100644 (file)
@@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev,
        int ret = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
        ktime_t start;
-       bool scale_up, sched_clk_scaling_suspend_work = false;
+       bool scale_up = false, sched_clk_scaling_suspend_work = false;
        struct list_head *clk_list = &hba->clk_list_head;
        struct ufs_clk_info *clki;
        unsigned long irq_flags;
index aeca902ab6cc427b0946cf13ea9b8c725eb3f287..fd1beb10bba726cef258e7438d642f31d6567dfe 100644 (file)
@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
                        return;
        }
 
-       if (request->complete) {
+       /*
+        * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+        * gadget composite driver.
+        */
+       if (request->complete && request->buf != priv_dev->zlp_buf) {
                spin_unlock(&priv_dev->lock);
                usb_gadget_giveback_request(&priv_ep->endpoint,
                                            request);
@@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
 
        while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
                priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+               list_del_init(&priv_req->list);
 
                kfree(priv_req->request.buf);
                cdns3_gadget_ep_free_request(&priv_ep->endpoint,
                                             &priv_req->request);
-               list_del_init(&priv_req->list);
                --priv_ep->wa2_counter;
        }
 
index 33548771a0d3a7212781ff39814fedb7d01f0ab4..465e9267b49c12768ac72ecb818f731fc8787641 100644 (file)
@@ -395,7 +395,6 @@ pm_put:
        return ret;
 }
 
-
 /**
  * cdns_wakeup_irq - interrupt handler for wakeup events
  * @irq: irq number for cdns3/cdnsp core device
index 04b6d12f2b9a39b9bfad76fe1909b22f7c010990..ee917f1b091c893ebccad19bd5a62aea9e65c721 100644 (file)
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
  */
 static void cdns_otg_disable_irq(struct cdns *cdns)
 {
-       writel(0, &cdns->otg_irq_regs->ien);
+       if (cdns->version)
+               writel(0, &cdns->otg_irq_regs->ien);
 }
 
 /**
@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
 
                cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
 
-               if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+               state = readl(&cdns->otg_cdnsp_regs->did);
+
+               if (OTG_CDNSP_CHECK_DID(state)) {
                        cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
                                              &cdns->otg_cdnsp_regs->ien;
                        cdns->version  = CDNSP_CONTROLLER_V2;
-               } else {
+               } else if (OTG_CDNS3_CHECK_DID(state)) {
                        cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
                                              &cdns->otg_v1_regs->ien;
                        writel(1, &cdns->otg_v1_regs->simulate);
                        cdns->version  = CDNS3_CONTROLLER_V1;
+               } else {
+                       dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+                       return -EINVAL;
                }
 
                dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
        return 0;
 }
 
-
 /* Indicate the cdns3 core was power lost before */
 bool cdns_power_is_lost(struct cdns *cdns)
 {
index cbdf94f73ed917bb14baf23a9087b10aca2f7015..d72370c321d3929fc477854585d9e46be6848fef 100644 (file)
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
        __le32 susp_timing_ctrl;
 };
 
-#define OTG_CDNSP_DID  0x0004034E
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
+
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
 
 /*
  * Common registers interface for both CDNS3 and CDNSP version of DRD.
index 6164fc4c96a49b60b73f772bdc92b8acf383269c..ceca4d839dfd42b87167f4de3019ab63776fa6c2 100644 (file)
 #include "../host/xhci.h"
 #include "../host/xhci-plat.h"
 
+/*
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
+ * in Cadence USB3 dual-role controller, so it can't be used
+ * with Cadence CDNSP dual-role controller.
+ */
 #define XECP_PORT_CAP_REG      0x8000
 #define XECP_AUX_CTRL_REG1     0x8120
 
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
        .resume_quirk = xhci_cdns3_resume_quirk,
 };
 
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+
 static int __cdns_host_init(struct cdns *cdns)
 {
        struct platform_device *xhci;
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
                goto err1;
        }
 
-       cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
-                       sizeof(struct xhci_plat_priv), GFP_KERNEL);
+       if (cdns->version < CDNSP_CONTROLLER_V2)
+               cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+                               sizeof(struct xhci_plat_priv), GFP_KERNEL);
+       else
+               cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
+                               sizeof(struct xhci_plat_priv), GFP_KERNEL);
+
        if (!cdns->xhci_plat_data) {
                ret = -ENOMEM;
                goto err1;
index c628c1abc90711cb9b8e652a0d903a6359c968bc..4d63496f98b6c45074eee270db26c53b4019dac6 100644 (file)
@@ -573,7 +573,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
        struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
        struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
 
-       if (!peer_hub)
+       if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
                return 0;
 
        hcd = bus_to_hcd(hdev->bus);
@@ -584,7 +584,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
 
        for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
                peer = peer_hub->ports[port1 - 1];
-               if (peer && peer->location == port_dev->location) {
+               if (peer && peer->connect_type != USB_PORT_NOT_USED &&
+                   peer->location == port_dev->location) {
                        link_peers_report(port_dev, peer);
                        return 1; /* done */
                }
index 4c8dd67246788d3839c59fa5c284113ac61ffea8..28f49400f3e8b178e23c881120577da461178c35 100644 (file)
@@ -2650,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
        int ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
+       if (!dwc->pullups_connected) {
+               spin_unlock_irqrestore(&dwc->lock, flags);
+               return 0;
+       }
+
        dwc->connected = false;
 
        /*
index ca5d5f5649982a6752b03053421cbf31c7589276..28f4e6552e84592566d261ec3174773650c5d444 100644 (file)
@@ -1338,7 +1338,15 @@ parse_ntb:
             "Parsed NTB with %d frames\n", dgram_counter);
 
        to_process -= block_len;
-       if (to_process != 0) {
+
+       /*
+        * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+        * zero pad as needed.
+        */
+       if (to_process == 1 &&
+           (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+               to_process--;
+       } else if ((to_process > 0) && (block_len != 0)) {
                ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
                goto parse_ntb;
        }
index 10c5d7f726a1fdd967d058bcc60302db8d839009..f90eeecf27de110ee4abc9d4cebef8cf73306193 100644 (file)
@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
 
 static inline int machine_without_vbus_sense(void)
 {
-       return  machine_is_omap_osk() || machine_is_sx1();
+       return  machine_is_omap_osk() || machine_is_omap_palmte() ||
+               machine_is_sx1();
 }
 
 static int omap_udc_start(struct usb_gadget *g,
index ac3fc597031573199a141e60e2b54432d2a2782e..cfebb833668e4b014633d0919be1aa1777c25140 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 static int uhci_grlib_init(struct usb_hcd *hcd)
 {
index f0d8a607ff214f86ba33b2e9126ccb186e6c1853..4f64b814d4aa20fdd08e349e5c07324b3e105c5e 100644 (file)
@@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
        /* how many trbs will be queued past the enqueue segment? */
        trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
 
-       if (trbs_past_seg <= 0)
+       /*
+        * Consider expanding the ring already if num_trbs fills the current
+        * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
+        * the next segment. Avoids confusing full ring with special empty ring
+        * case below
+        */
+       if (trbs_past_seg < 0)
                return 0;
 
        /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
index ae41578bd0149900b0a867f71a0cf6080e238566..70165dd86b5de958ab4f5fe0d1573988977be425 100644 (file)
@@ -21,7 +21,9 @@ static const struct class role_class = {
 struct usb_role_switch {
        struct device dev;
        struct mutex lock; /* device lock*/
+       struct module *module; /* the module this device depends on */
        enum usb_role role;
+       bool registered;
 
        /* From descriptor */
        struct device *usb2_port;
@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
        if (IS_ERR_OR_NULL(sw))
                return 0;
 
+       if (!sw->registered)
+               return -EOPNOTSUPP;
+
        mutex_lock(&sw->lock);
 
        ret = sw->set(sw, role);
@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
 {
        enum usb_role role;
 
-       if (IS_ERR_OR_NULL(sw))
+       if (IS_ERR_OR_NULL(sw) || !sw->registered)
                return USB_ROLE_NONE;
 
        mutex_lock(&sw->lock);
@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
                                                  usb_role_switch_match);
 
        if (!IS_ERR_OR_NULL(sw))
-               WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+               WARN_ON(!try_module_get(sw->module));
 
        return sw;
 }
@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
                sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
                                                  NULL, usb_role_switch_match);
        if (!IS_ERR_OR_NULL(sw))
-               WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+               WARN_ON(!try_module_get(sw->module));
 
        return sw;
 }
@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
 void usb_role_switch_put(struct usb_role_switch *sw)
 {
        if (!IS_ERR_OR_NULL(sw)) {
-               module_put(sw->dev.parent->driver->owner);
+               module_put(sw->module);
                put_device(&sw->dev);
        }
 }
@@ -189,15 +194,18 @@ struct usb_role_switch *
 usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
 {
        struct device *dev;
+       struct usb_role_switch *sw = NULL;
 
        if (!fwnode)
                return NULL;
 
        dev = class_find_device_by_fwnode(&role_class, fwnode);
-       if (dev)
-               WARN_ON(!try_module_get(dev->parent->driver->owner));
+       if (dev) {
+               sw = to_role_switch(dev);
+               WARN_ON(!try_module_get(sw->module));
+       }
 
-       return dev ? to_role_switch(dev) : NULL;
+       return sw;
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
 
@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
        sw->set = desc->set;
        sw->get = desc->get;
 
+       sw->module = parent->driver->owner;
        sw->dev.parent = parent;
        sw->dev.fwnode = desc->fwnode;
        sw->dev.class = &role_class;
@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
                return ERR_PTR(ret);
        }
 
+       sw->registered = true;
+
        /* TODO: Symlinks for the host port and the device controller. */
 
        return sw;
@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
  */
 void usb_role_switch_unregister(struct usb_role_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
+               sw->registered = false;
                device_unregister(&sw->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
 
index 4e0eef1440b7fd7407cf2158adad796b516aeda5..300aeef160e75c9d84fbd4b69b3c3ad35a774f5b 100644 (file)
@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
 static int isd200_get_inquiry_data( struct us_data *us )
 {
        struct isd200_info *info = (struct isd200_info *)us->extra;
-       int retStatus = ISD200_GOOD;
+       int retStatus;
        u16 *id = info->id;
 
        usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
                                isd200_fix_driveid(id);
                                isd200_dump_driveid(us, id);
 
+                               /* Prevent division by 0 in isd200_scsi_to_ata() */
+                               if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
+                                       usb_stor_dbg(us, "   Invalid ATA Identify data\n");
+                                       retStatus = ISD200_ERROR;
+                                       goto Done;
+                               }
+
                                memset(&info->InquiryData, 0, sizeof(info->InquiryData));
 
                                /* Standard IDE interface only supports disks */
@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
                }
        }
 
+ Done:
        usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
 
        return(retStatus);
@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
 
 static int isd200_Initialization(struct us_data *us)
 {
+       int rc = 0;
+
        usb_stor_dbg(us, "ISD200 Initialization...\n");
 
        /* Initialize ISD200 info struct */
 
-       if (isd200_init_info(us) == ISD200_ERROR) {
+       if (isd200_init_info(us) < 0) {
                usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
+               rc = -ENOMEM;
        } else {
                /* Get device specific data */
 
-               if (isd200_get_inquiry_data(us) != ISD200_GOOD)
+               if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
                        usb_stor_dbg(us, "ISD200 Initialization Failure\n");
-               else
+                       rc = -EINVAL;
+               } else {
                        usb_stor_dbg(us, "ISD200 Initialization complete\n");
+               }
        }
 
-       return 0;
+       return rc;
 }
 
 
index c54e9805da536a0ec139ad789017b79131c88561..12cf9940e5b6759167f9ae7450df8af92a85c63a 100644 (file)
@@ -179,6 +179,13 @@ static int slave_configure(struct scsi_device *sdev)
                 */
                sdev->use_192_bytes_for_3f = 1;
 
+               /*
+                * Some devices report generic values until the media has been
+                * accessed. Force a READ(10) prior to querying device
+                * characteristics.
+                */
+               sdev->read_before_ms = 1;
+
                /*
                 * Some devices don't like MODE SENSE with page=0x3f,
                 * which is the command used for checking if a device
index 9707f53cfda9c08507082ac33b69b5d146c6927f..71ace274761f182f0cbb942676e74d7e2c26d7a1 100644 (file)
@@ -878,6 +878,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
                sdev->guess_capacity = 1;
 
+       /*
+        * Some devices report generic values until the media has been
+        * accessed. Force a READ(10) prior to querying device
+        * characteristics.
+        */
+       sdev->read_before_ms = 1;
+
        /*
         * Some devices don't like MODE SENSE with page=0x3f,
         * which is the command used for checking if a device
index f81bec0c7b864dc605143078ec9f1cd3e2706379..f8ea3054be54245c4233b48facaabe91f52868ed 100644 (file)
@@ -559,16 +559,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
 }
 static DEVICE_ATTR_RO(hpd);
 
-static struct attribute *dp_altmode_attrs[] = {
+static struct attribute *displayport_attrs[] = {
        &dev_attr_configuration.attr,
        &dev_attr_pin_assignment.attr,
        &dev_attr_hpd.attr,
        NULL
 };
 
-static const struct attribute_group dp_altmode_group = {
+static const struct attribute_group displayport_group = {
        .name = "displayport",
-       .attrs = dp_altmode_attrs,
+       .attrs = displayport_attrs,
+};
+
+static const struct attribute_group *displayport_groups[] = {
+       &displayport_group,
+       NULL,
 };
 
 int dp_altmode_probe(struct typec_altmode *alt)
@@ -576,7 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
        const struct typec_altmode *port = typec_altmode_get_partner(alt);
        struct fwnode_handle *fwnode;
        struct dp_altmode *dp;
-       int ret;
 
        /* FIXME: Port can only be DFP_U. */
 
@@ -587,10 +591,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
              DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
                return -ENODEV;
 
-       ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
-       if (ret)
-               return ret;
-
        dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
        if (!dp)
                return -ENOMEM;
@@ -624,7 +624,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
 {
        struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
 
-       sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
        cancel_work_sync(&dp->work);
 
        if (dp->connector_fwnode) {
@@ -649,6 +648,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
        .driver = {
                .name = "typec_displayport",
                .owner = THIS_MODULE,
+               .dev_groups = displayport_groups,
        },
 };
 module_typec_altmode_driver(dp_altmode_driver);
index f7d7daa60c8dc98b04d29f10b2e1377b02124d61..0965972310275e1c4d82be94051573648175a4fe 100644 (file)
@@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port)
        if (tcpm_port_is_disconnected(port))
                port->hard_reset_count = 0;
 
-       port->try_src_count = 0;
-       port->try_snk_count = 0;
-
        if (!port->attached)
                return;
 
@@ -4876,7 +4873,11 @@ static void run_state_machine(struct tcpm_port *port)
                break;
        case PORT_RESET:
                tcpm_reset_port(port);
-               tcpm_set_cc(port, TYPEC_CC_OPEN);
+               if (port->self_powered)
+                       tcpm_set_cc(port, TYPEC_CC_OPEN);
+               else
+                       tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+                                   TYPEC_CC_RD : tcpm_rp_cc(port));
                tcpm_set_state(port, PORT_RESET_WAIT_OFF,
                               PD_T_ERROR_RECOVERY);
                break;
index 53a7ede8556df5688abb2631ce78a7c13dbc8308..faccc942b381be43700f0de95401bef80af500ec 100644 (file)
@@ -301,6 +301,7 @@ static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
        { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
        { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
        { .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+       { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
        {}
 };
 
index 1183e7a871f8b270a9ff2106cef15e44720184a4..46823c2e2ba1207e327607fa0ca0c757bc0968aa 100644 (file)
@@ -2399,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
        struct fbcon_ops *ops = info->fbcon_par;
        struct fbcon_display *p = &fb_display[vc->vc_num];
        int resize, ret, old_userfont, old_width, old_height, old_charcount;
-       char *old_data = NULL;
+       u8 *old_data = vc->vc_font.data;
 
        resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
-       if (p->userfont)
-               old_data = vc->vc_font.data;
        vc->vc_font.data = (void *)(p->fontdata = data);
        old_userfont = p->userfont;
        if ((p->userfont = userfont))
@@ -2437,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
                update_screen(vc);
        }
 
-       if (old_data && (--REFCOUNT(old_data) == 0))
+       if (old_userfont && (--REFCOUNT(old_data) == 0))
                kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
        return 0;
 
 err_out:
        p->fontdata = old_data;
-       vc->vc_font.data = (void *)old_data;
+       vc->vc_font.data = old_data;
 
        if (userfont) {
                p->userfont = old_userfont;
index c26ee6fd73c9bb0cff77793e2af4ca5407c59aa1..8fdccf033b2d9bf9a05e967fb166bbabe82bfe19 100644 (file)
@@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
                        goto getmem_done;
                }
                pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
-       } else {
-               goto err1;
        }
 
        /*
index 60685ec76d983523f15da7c7e97b38634b750c07..2e612834329ac127ae6e63d802a7b31b89558b0c 100644 (file)
@@ -105,6 +105,7 @@ struct affs_sb_info {
        int work_queued;                /* non-zero delayed work is queued */
        struct delayed_work sb_work;    /* superblock flush delayed work */
        spinlock_t work_lock;           /* protects sb_work and work_queued */
+       struct rcu_head rcu;
 };
 
 #define AFFS_MOUNT_SF_INTL             0x0001 /* International filesystem. */
index 58b391446ae1fd97e48891c82ec8d88f32314303..b56a95cf414a44277783e7242c33cba5cb818707 100644 (file)
@@ -640,7 +640,7 @@ static void affs_kill_sb(struct super_block *sb)
                affs_brelse(sbi->s_root_bh);
                kfree(sbi->s_prefix);
                mutex_destroy(&sbi->s_bmlock);
-               kfree(sbi);
+               kfree_rcu(sbi, rcu);
        }
 }
 
index b5b8de521f99b26ba6c9b2fd707fb794a62612ae..8a67fc427e748a0840d9e92c1c0e8e4a3d7c4fdc 100644 (file)
@@ -479,8 +479,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                    dire->u.name[0] == '.' &&
                    ctx->actor != afs_lookup_filldir &&
                    ctx->actor != afs_lookup_one_filldir &&
-                   memcmp(dire->u.name, ".__afs", 6) == 0)
+                   memcmp(dire->u.name, ".__afs", 6) == 0) {
+                       ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
                        continue;
+               }
 
                /* found the next entry */
                if (!dir_emit(ctx, dire->u.name, nlen,
index 3d33b221d9ca256a3b3d978a835d2db9fff2e284..ef2cc8f565d25b15e086d2fc64c6f565bac7a16b 100644 (file)
@@ -417,13 +417,17 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
 
 static void afs_drop_open_mmap(struct afs_vnode *vnode)
 {
-       if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+       if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
                return;
 
        down_write(&vnode->volume->open_mmaps_lock);
 
-       if (atomic_read(&vnode->cb_nr_mmap) == 0)
+       read_seqlock_excl(&vnode->cb_lock);
+       // the only place where ->cb_nr_mmap may hit 0
+       // see __afs_break_callback() for the other side...
+       if (atomic_dec_and_test(&vnode->cb_nr_mmap))
                list_del_init(&vnode->cb_mmap_link);
+       read_sequnlock_excl(&vnode->cb_lock);
 
        up_write(&vnode->volume->open_mmaps_lock);
        flush_work(&vnode->cb_work);
index 9c03fcf7ffaa84e9f7604444209bd934b64db466..6ce5a612937c61e2021b32cad1f68a22b7c501ca 100644 (file)
@@ -321,8 +321,7 @@ struct afs_net {
        struct list_head        fs_probe_slow;  /* List of afs_server to probe at 5m intervals */
        struct hlist_head       fs_proc;        /* procfs servers list */
 
-       struct hlist_head       fs_addresses4;  /* afs_server (by lowest IPv4 addr) */
-       struct hlist_head       fs_addresses6;  /* afs_server (by lowest IPv6 addr) */
+       struct hlist_head       fs_addresses;   /* afs_server (by lowest IPv6 addr) */
        seqlock_t               fs_addr_lock;   /* For fs_addresses[46] */
 
        struct work_struct      fs_manager;
@@ -561,8 +560,7 @@ struct afs_server {
        struct afs_server __rcu *uuid_next;     /* Next server with same UUID */
        struct afs_server       *uuid_prev;     /* Previous server with same UUID */
        struct list_head        probe_link;     /* Link in net->fs_probe_list */
-       struct hlist_node       addr4_link;     /* Link in net->fs_addresses4 */
-       struct hlist_node       addr6_link;     /* Link in net->fs_addresses6 */
+       struct hlist_node       addr_link;      /* Link in net->fs_addresses6 */
        struct hlist_node       proc_link;      /* Link in net->fs_proc */
        struct list_head        volumes;        /* RCU list of afs_server_entry objects */
        struct afs_server       *gc_next;       /* Next server in manager's list */
index 1b3bd21c168acc223bfaf39fa454d2cb49ae3fbb..a14f6013e316d964bfa6eef3e09befe62d591411 100644 (file)
@@ -90,8 +90,7 @@ static int __net_init afs_net_init(struct net *net_ns)
        INIT_LIST_HEAD(&net->fs_probe_slow);
        INIT_HLIST_HEAD(&net->fs_proc);
 
-       INIT_HLIST_HEAD(&net->fs_addresses4);
-       INIT_HLIST_HEAD(&net->fs_addresses6);
+       INIT_HLIST_HEAD(&net->fs_addresses);
        seqlock_init(&net->fs_addr_lock);
 
        INIT_WORK(&net->fs_manager, afs_manage_servers);
index e169121f603e28d5679a895d0ca0f136270a6f56..038f9d0ae3af8ee1df24dc163c972e826c5d62fb 100644 (file)
@@ -38,7 +38,7 @@ struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer
                seq++; /* 2 on the 1st/lockless path, otherwise odd */
                read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
 
-               hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
+               hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) {
                        estate = rcu_dereference(server->endpoint_state);
                        alist = estate->addresses;
                        for (i = 0; i < alist->nr_addrs; i++)
@@ -177,10 +177,8 @@ added_dup:
         * bit, but anything we might want to do gets messy and memory
         * intensive.
         */
-       if (alist->nr_ipv4 > 0)
-               hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
-       if (alist->nr_addrs > alist->nr_ipv4)
-               hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
+       if (alist->nr_addrs > 0)
+               hlist_add_head_rcu(&server->addr_link, &net->fs_addresses);
 
        write_sequnlock(&net->fs_addr_lock);
 
@@ -511,10 +509,8 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
 
                        list_del(&server->probe_link);
                        hlist_del_rcu(&server->proc_link);
-                       if (!hlist_unhashed(&server->addr4_link))
-                               hlist_del_rcu(&server->addr4_link);
-                       if (!hlist_unhashed(&server->addr6_link))
-                               hlist_del_rcu(&server->addr6_link);
+                       if (!hlist_unhashed(&server->addr_link))
+                               hlist_del_rcu(&server->addr_link);
                }
                write_sequnlock(&net->fs_lock);
 
index 020ecd45e476214f08b9867412ec4b379889344d..af3a3f57c1b3f9512bcaa08ce37a0f8173e809d0 100644 (file)
@@ -353,7 +353,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
 {
        struct afs_server_list *new, *old, *discard;
        struct afs_vldb_entry *vldb;
-       char idbuf[16];
+       char idbuf[24];
        int ret, idsz;
 
        _enter("");
@@ -361,7 +361,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
        /* We look up an ID by passing it as a decimal string in the
         * operation's name parameter.
         */
-       idsz = sprintf(idbuf, "%llu", volume->vid);
+       idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
 
        vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
        if (IS_ERR(vldb)) {
index bb2ff48991f35ed59479a004641e1452c7bad3ea..9cdaa2faa5363333627e0cba54a4efe75b45b144 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -589,13 +589,24 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
 
 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
 {
-       struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
-       struct kioctx *ctx = req->ki_ctx;
+       struct aio_kiocb *req;
+       struct kioctx *ctx;
        unsigned long flags;
 
+       /*
+        * kiocb didn't come from aio or is neither a read nor a write, hence
+        * ignore it.
+        */
+       if (!(iocb->ki_flags & IOCB_AIO_RW))
+               return;
+
+       req = container_of(iocb, struct aio_kiocb, rw);
+
        if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
                return;
 
+       ctx = req->ki_ctx;
+
        spin_lock_irqsave(&ctx->ctx_lock, flags);
        list_add_tail(&req->ki_list, &ctx->active_reqs);
        req->ki_cancel = cancel;
@@ -1509,7 +1520,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
        req->ki_complete = aio_complete_rw;
        req->private = NULL;
        req->ki_pos = iocb->aio_offset;
-       req->ki_flags = req->ki_filp->f_iocb_flags;
+       req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
        if (iocb->aio_flags & IOCB_FLAG_RESFD)
                req->ki_flags |= IOCB_EVENTFD;
        if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
index b4dc319bcb2bc0a5363e74f6d2096d3b5652599d..569b97904da42eec8975e8662dd78895d41d62fe 100644 (file)
@@ -68,9 +68,11 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
 
 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
 {
-       prt_str(out, "bucket=");
-       bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
-       prt_str(out, " ");
+       if (bch2_dev_exists2(c, k.k->p.inode)) {
+               prt_str(out, "bucket=");
+               bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
+               prt_str(out, " ");
+       }
 
        bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
 }
index 5467a8635be113102c56bb6f02986209533c35ac..3ef338df82f5e46228f583a85a7cacdba233a64b 100644 (file)
@@ -2156,7 +2156,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
                 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
                 * that's what we check against in extents mode:
                 */
-               if (k.k->p.inode > end.inode)
+               if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+                            ? bkey_gt(k.k->p, end)
+                            : k.k->p.inode > end.inode))
                        goto end;
 
                if (iter->update_path &&
index 73c12e565af50a465260856baaa831eb2a542caa..27710cdd5710ec5bba9ff9a11cad92f7cf14bc09 100644 (file)
@@ -303,18 +303,6 @@ void bch2_readahead(struct readahead_control *ractl)
        darray_exit(&readpages_iter.folios);
 }
 
-static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
-                            subvol_inum inum, struct folio *folio)
-{
-       bch2_folio_create(folio, __GFP_NOFAIL);
-
-       rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
-       rbio->bio.bi_iter.bi_sector = folio_sector(folio);
-       BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
-
-       bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
-}
-
 static void bch2_read_single_folio_end_io(struct bio *bio)
 {
        complete(bio->bi_private);
@@ -329,6 +317,9 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
        int ret;
        DECLARE_COMPLETION_ONSTACK(done);
 
+       if (!bch2_folio_create(folio, GFP_KERNEL))
+               return -ENOMEM;
+
        bch2_inode_opts_get(&opts, c, &inode->ei_inode);
 
        rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
@@ -336,7 +327,11 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
        rbio->bio.bi_private = &done;
        rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
 
-       __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
+       rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+       rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+       BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
+
+       bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
        wait_for_completion(&done);
 
        ret = blk_status_to_errno(rbio->bio.bi_status);
index e3b219e19e1008ccfe1ff61e966115795f9c1831..33cb6da3a5ad28f2c014c2ef12408937933d49c3 100644 (file)
@@ -88,6 +88,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
                return ret;
 
        shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
+       if (shorten >= iter->count)
+               shorten = 0;
        iter->count -= shorten;
 
        bio = bio_alloc_bioset(NULL,
index 2cf626315652c0054df96ffce536ecc3d11f0969..c33dca641575dffc58b6db8354e71c879ed5cf26 100644 (file)
@@ -892,9 +892,11 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
                                         journal_seq_pin(j, seq)->devs);
                seq++;
 
-               spin_unlock(&j->lock);
-               ret = bch2_mark_replicas(c, &replicas.e);
-               spin_lock(&j->lock);
+               if (replicas.e.nr_devs) {
+                       spin_unlock(&j->lock);
+                       ret = bch2_mark_replicas(c, &replicas.e);
+                       spin_lock(&j->lock);
+               }
        }
        spin_unlock(&j->lock);
 err:
index 45f67e8b29eb67f188e5cfb32aa39e0b1ad1d625..ac6ba04d5521714ece2e2cb00400fff60ec05eb6 100644 (file)
@@ -728,7 +728,7 @@ static int check_snapshot(struct btree_trans *trans,
                return 0;
 
        memset(&s, 0, sizeof(s));
-       memcpy(&s, k.v, bkey_val_bytes(k.k));
+       memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k)));
 
        id = le32_to_cpu(s.parent);
        if (id) {
index 231003b405efc304a4cefa61a6e4e2f30b4b9466..3a32faa86b5c4a2eee98de32951c18dc73052041 100644 (file)
@@ -289,7 +289,7 @@ int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigne
        do {
                nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
        } while (nr_entries == stack->size &&
-                !(ret = darray_make_room(stack, stack->size * 2)));
+                !(ret = darray_make_room_gfp(stack, stack->size * 2, gfp)));
 
        stack->nr = nr_entries;
        up_read(&task->signal->exec_update_lock);
index ceb5f586a2d55571d53db2de227f4ef0f5ec1c27..1043a8142351b2692587f4a5e6d11147ee7fde99 100644 (file)
@@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
 
        block_rsv = get_block_rsv(trans, root);
 
-       if (unlikely(block_rsv->size == 0))
+       if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
                goto try_reserve;
 again:
        ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
index b0bd12b8652f4f51e467a95b4bfa36ec8d894837..43a9a6b5a79f4622607529393eaced15cb1409ac 100644 (file)
@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
        return data_race(rsv->full);
 }
 
+/*
+ * Get the reserved mount of a block reserve in a context where getting a stale
+ * value is acceptable, instead of accessing it directly and trigger data race
+ * warning from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
+{
+       u64 ret;
+
+       spin_lock(&rsv->lock);
+       ret = rsv->reserved;
+       spin_unlock(&rsv->lock);
+
+       return ret;
+}
+
+/*
+ * Get the size of a block reserve in a context where getting a stale value is
+ * acceptable, instead of accessing it directly and trigger data race warning
+ * from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
+{
+       u64 ret;
+
+       spin_lock(&rsv->lock);
+       ret = rsv->size;
+       spin_unlock(&rsv->lock);
+
+       return ret;
+}
+
 #endif /* BTRFS_BLOCK_RSV_H */
index c276b136ab63a16d5278a654ab26a670098806c0..5b0b645714183a7adcbe093d48964ee7380c4b24 100644 (file)
@@ -1046,7 +1046,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                        goto add;
 
                /* Skip too large extent */
-               if (range_len >= extent_thresh)
+               if (em->len >= extent_thresh)
                        goto next;
 
                /*
index 1502d664c89273eb54ba3516528b74eab094f3b3..79c4293ddf373f7d452b2cd05aeec4dd1d9fb5f7 100644 (file)
@@ -725,6 +725,23 @@ leave:
        return ret;
 }
 
+static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
+{
+       if (args->start.srcdevid == 0) {
+               if (memchr(args->start.srcdev_name, 0,
+                          sizeof(args->start.srcdev_name)) == NULL)
+                       return -ENAMETOOLONG;
+       } else {
+               args->start.srcdev_name[0] = 0;
+       }
+
+       if (memchr(args->start.tgtdev_name, 0,
+                  sizeof(args->start.tgtdev_name)) == NULL)
+           return -ENAMETOOLONG;
+
+       return 0;
+}
+
 int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
                            struct btrfs_ioctl_dev_replace_args *args)
 {
@@ -737,10 +754,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
        default:
                return -EINVAL;
        }
-
-       if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
-           args->start.tgtdev_name[0] == '\0')
-               return -EINVAL;
+       ret = btrfs_check_replace_dev_names(args);
+       if (ret < 0)
+               return ret;
 
        ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
                                        args->start.srcdevid,
index e71ef97d0a7cabb236e8cbd073667b2e3d143dca..c843563914cad08e2dd84ef1741e19d933f092ee 100644 (file)
@@ -1307,12 +1307,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
  *
  * @objectid:  root id
  * @anon_dev:  preallocated anonymous block device number for new roots,
- *             pass 0 for new allocation.
+ *             pass NULL for a new allocation.
  * @check_ref: whether to check root item references, If true, return -ENOENT
  *             for orphan roots
  */
 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
-                                            u64 objectid, dev_t anon_dev,
+                                            u64 objectid, dev_t *anon_dev,
                                             bool check_ref)
 {
        struct btrfs_root *root;
@@ -1342,9 +1342,9 @@ again:
                 * that common but still possible.  In that case, we just need
                 * to free the anon_dev.
                 */
-               if (unlikely(anon_dev)) {
-                       free_anon_bdev(anon_dev);
-                       anon_dev = 0;
+               if (unlikely(anon_dev && *anon_dev)) {
+                       free_anon_bdev(*anon_dev);
+                       *anon_dev = 0;
                }
 
                if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
@@ -1366,7 +1366,7 @@ again:
                goto fail;
        }
 
-       ret = btrfs_init_fs_root(root, anon_dev);
+       ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
        if (ret)
                goto fail;
 
@@ -1402,7 +1402,7 @@ fail:
         * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
         * and once again by our caller.
         */
-       if (anon_dev)
+       if (anon_dev && *anon_dev)
                root->anon_dev = 0;
        btrfs_put_root(root);
        return ERR_PTR(ret);
@@ -1418,7 +1418,7 @@ fail:
 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
                                     u64 objectid, bool check_ref)
 {
-       return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
+       return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
 }
 
 /*
@@ -1426,11 +1426,11 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
  * the anonymous block device id
  *
  * @objectid:  tree objectid
- * @anon_dev:  if zero, allocate a new anonymous block device or use the
- *             parameter value
+ * @anon_dev:  if NULL, allocate a new anonymous block device or use the
+ *             parameter value if not NULL
  */
 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
-                                        u64 objectid, dev_t anon_dev)
+                                        u64 objectid, dev_t *anon_dev)
 {
        return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
 }
index 9413726b329bb123202a66cf341320ca2d99e410..eb3473d1c1ac1b239092a594cf0f788f961b4943 100644 (file)
@@ -61,7 +61,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
                                     u64 objectid, bool check_ref);
 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
-                                        u64 objectid, dev_t anon_dev);
+                                        u64 objectid, dev_t *anon_dev);
 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
                                                 struct btrfs_path *path,
                                                 u64 objectid);
index cfd2967f04a293cf3d38956e9e21ce9e6656b498..8b4bef05e22217cfe43af497060889e0e5b02d0a 100644 (file)
@@ -2480,6 +2480,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
                                struct fiemap_cache *cache,
                                u64 offset, u64 phys, u64 len, u32 flags)
 {
+       u64 cache_end;
        int ret = 0;
 
        /* Set at the end of extent_fiemap(). */
@@ -2489,15 +2490,102 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
                goto assign;
 
        /*
-        * Sanity check, extent_fiemap() should have ensured that new
-        * fiemap extent won't overlap with cached one.
-        * Not recoverable.
+        * When iterating the extents of the inode, at extent_fiemap(), we may
+        * find an extent that starts at an offset behind the end offset of the
+        * previous extent we processed. This happens if fiemap is called
+        * without FIEMAP_FLAG_SYNC and there are ordered extents completing
+        * while we call btrfs_next_leaf() (through fiemap_next_leaf_item()).
         *
-        * NOTE: Physical address can overlap, due to compression
+        * For example we are in leaf X processing its last item, which is the
+        * file extent item for file range [512K, 1M[, and after
+        * btrfs_next_leaf() releases the path, there's an ordered extent that
+        * completes for the file range [768K, 2M[, and that results in trimming
+        * the file extent item so that it now corresponds to the file range
+        * [512K, 768K[ and a new file extent item is inserted for the file
+        * range [768K, 2M[, which may end up as the last item of leaf X or as
+        * the first item of the next leaf - in either case btrfs_next_leaf()
+        * will leave us with a path pointing to the new extent item, for the
+        * file range [768K, 2M[, since that's the first key that follows the
+        * last one we processed. So in order not to report overlapping extents
+        * to user space, we trim the length of the previously cached extent and
+        * emit it.
+        *
+        * Upon calling btrfs_next_leaf() we may also find an extent with an
+        * offset smaller than or equals to cache->offset, and this happens
+        * when we had a hole or prealloc extent with several delalloc ranges in
+        * it, but after btrfs_next_leaf() released the path, delalloc was
+        * flushed and the resulting ordered extents were completed, so we can
+        * now have found a file extent item for an offset that is smaller than
+        * or equals to what we have in cache->offset. We deal with this as
+        * described below.
         */
-       if (cache->offset + cache->len > offset) {
-               WARN_ON(1);
-               return -EINVAL;
+       cache_end = cache->offset + cache->len;
+       if (cache_end > offset) {
+               if (offset == cache->offset) {
+                       /*
+                        * We cached a dealloc range (found in the io tree) for
+                        * a hole or prealloc extent and we have now found a
+                        * file extent item for the same offset. What we have
+                        * now is more recent and up to date, so discard what
+                        * we had in the cache and use what we have just found.
+                        */
+                       goto assign;
+               } else if (offset > cache->offset) {
+                       /*
+                        * The extent range we previously found ends after the
+                        * offset of the file extent item we found and that
+                        * offset falls somewhere in the middle of that previous
+                        * extent range. So adjust the range we previously found
+                        * to end at the offset of the file extent item we have
+                        * just found, since this extent is more up to date.
+                        * Emit that adjusted range and cache the file extent
+                        * item we have just found. This corresponds to the case
+                        * where a previously found file extent item was split
+                        * due to an ordered extent completing.
+                        */
+                       cache->len = offset - cache->offset;
+                       goto emit;
+               } else {
+                       const u64 range_end = offset + len;
+
+                       /*
+                        * The offset of the file extent item we have just found
+                        * is behind the cached offset. This means we were
+                        * processing a hole or prealloc extent for which we
+                        * have found delalloc ranges (in the io tree), so what
+                        * we have in the cache is the last delalloc range we
+                        * found while the file extent item we found can be
+                        * either for a whole delalloc range we previously
+                        * emmitted or only a part of that range.
+                        *
+                        * We have two cases here:
+                        *
+                        * 1) The file extent item's range ends at or behind the
+                        *    cached extent's end. In this case just ignore the
+                        *    current file extent item because we don't want to
+                        *    overlap with previous ranges that may have been
+                        *    emmitted already;
+                        *
+                        * 2) The file extent item starts behind the currently
+                        *    cached extent but its end offset goes beyond the
+                        *    end offset of the cached extent. We don't want to
+                        *    overlap with a previous range that may have been
+                        *    emmitted already, so we emit the currently cached
+                        *    extent and then partially store the current file
+                        *    extent item's range in the cache, for the subrange
+                        *    going the cached extent's end to the end of the
+                        *    file extent item.
+                        */
+                       if (range_end <= cache_end)
+                               return 0;
+
+                       if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
+                               phys += cache_end - offset;
+
+                       offset = cache_end;
+                       len = range_end - cache_end;
+                       goto emit;
+               }
        }
 
        /*
@@ -2517,6 +2605,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
                return 0;
        }
 
+emit:
        /* Not mergeable, need to submit cached one */
        ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
                                      cache->len, cache->flags);
@@ -2689,16 +2778,34 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
         * it beyond i_size.
         */
        while (cur_offset < end && cur_offset < i_size) {
+               struct extent_state *cached_state = NULL;
                u64 delalloc_start;
                u64 delalloc_end;
                u64 prealloc_start;
+               u64 lockstart;
+               u64 lockend;
                u64 prealloc_len = 0;
                bool delalloc;
 
+               lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
+               lockend = round_up(end, inode->root->fs_info->sectorsize);
+
+               /*
+                * We are only locking for the delalloc range because that's the
+                * only thing that can change here.  With fiemap we have a lock
+                * on the inode, so no buffered or direct writes can happen.
+                *
+                * However mmaps and normal page writeback will cause this to
+                * change arbitrarily.  We have to lock the extent lock here to
+                * make sure that nobody messes with the tree while we're doing
+                * btrfs_find_delalloc_in_range.
+                */
+               lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
                delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
                                                        delalloc_cached_state,
                                                        &delalloc_start,
                                                        &delalloc_end);
+               unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
                if (!delalloc)
                        break;
 
@@ -2866,15 +2973,15 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                  u64 start, u64 len)
 {
        const u64 ino = btrfs_ino(inode);
-       struct extent_state *cached_state = NULL;
        struct extent_state *delalloc_cached_state = NULL;
        struct btrfs_path *path;
        struct fiemap_cache cache = { 0 };
        struct btrfs_backref_share_check_ctx *backref_ctx;
        u64 last_extent_end;
        u64 prev_extent_end;
-       u64 lockstart;
-       u64 lockend;
+       u64 range_start;
+       u64 range_end;
+       const u64 sectorsize = inode->root->fs_info->sectorsize;
        bool stopped = false;
        int ret;
 
@@ -2885,22 +2992,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                goto out;
        }
 
-       lockstart = round_down(start, inode->root->fs_info->sectorsize);
-       lockend = round_up(start + len, inode->root->fs_info->sectorsize);
-       prev_extent_end = lockstart;
-
-       btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
-       lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       range_start = round_down(start, sectorsize);
+       range_end = round_up(start + len, sectorsize);
+       prev_extent_end = range_start;
 
        ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
        if (ret < 0)
-               goto out_unlock;
+               goto out;
        btrfs_release_path(path);
 
        path->reada = READA_FORWARD;
-       ret = fiemap_search_slot(inode, path, lockstart);
+       ret = fiemap_search_slot(inode, path, range_start);
        if (ret < 0) {
-               goto out_unlock;
+               goto out;
        } else if (ret > 0) {
                /*
                 * No file extent item found, but we may have delalloc between
@@ -2910,7 +3014,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                goto check_eof_delalloc;
        }
 
-       while (prev_extent_end < lockend) {
+       while (prev_extent_end < range_end) {
                struct extent_buffer *leaf = path->nodes[0];
                struct btrfs_file_extent_item *ei;
                struct btrfs_key key;
@@ -2933,21 +3037,21 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                 * The first iteration can leave us at an extent item that ends
                 * before our range's start. Move to the next item.
                 */
-               if (extent_end <= lockstart)
+               if (extent_end <= range_start)
                        goto next_item;
 
                backref_ctx->curr_leaf_bytenr = leaf->start;
 
                /* We have in implicit hole (NO_HOLES feature enabled). */
                if (prev_extent_end < key.offset) {
-                       const u64 range_end = min(key.offset, lockend) - 1;
+                       const u64 hole_end = min(key.offset, range_end) - 1;
 
                        ret = fiemap_process_hole(inode, fieinfo, &cache,
                                                  &delalloc_cached_state,
                                                  backref_ctx, 0, 0, 0,
-                                                 prev_extent_end, range_end);
+                                                 prev_extent_end, hole_end);
                        if (ret < 0) {
-                               goto out_unlock;
+                               goto out;
                        } else if (ret > 0) {
                                /* fiemap_fill_next_extent() told us to stop. */
                                stopped = true;
@@ -2955,7 +3059,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                        }
 
                        /* We've reached the end of the fiemap range, stop. */
-                       if (key.offset >= lockend) {
+                       if (key.offset >= range_end) {
                                stopped = true;
                                break;
                        }
@@ -3003,7 +3107,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                                                                  extent_gen,
                                                                  backref_ctx);
                                if (ret < 0)
-                                       goto out_unlock;
+                                       goto out;
                                else if (ret > 0)
                                        flags |= FIEMAP_EXTENT_SHARED;
                        }
@@ -3014,7 +3118,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                }
 
                if (ret < 0) {
-                       goto out_unlock;
+                       goto out;
                } else if (ret > 0) {
                        /* fiemap_fill_next_extent() told us to stop. */
                        stopped = true;
@@ -3025,12 +3129,12 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
 next_item:
                if (fatal_signal_pending(current)) {
                        ret = -EINTR;
-                       goto out_unlock;
+                       goto out;
                }
 
                ret = fiemap_next_leaf_item(inode, path);
                if (ret < 0) {
-                       goto out_unlock;
+                       goto out;
                } else if (ret > 0) {
                        /* No more file extent items for this inode. */
                        break;
@@ -3049,29 +3153,41 @@ check_eof_delalloc:
        btrfs_free_path(path);
        path = NULL;
 
-       if (!stopped && prev_extent_end < lockend) {
+       if (!stopped && prev_extent_end < range_end) {
                ret = fiemap_process_hole(inode, fieinfo, &cache,
                                          &delalloc_cached_state, backref_ctx,
-                                         0, 0, 0, prev_extent_end, lockend - 1);
+                                         0, 0, 0, prev_extent_end, range_end - 1);
                if (ret < 0)
-                       goto out_unlock;
-               prev_extent_end = lockend;
+                       goto out;
+               prev_extent_end = range_end;
        }
 
        if (cache.cached && cache.offset + cache.len >= last_extent_end) {
                const u64 i_size = i_size_read(&inode->vfs_inode);
 
                if (prev_extent_end < i_size) {
+                       struct extent_state *cached_state = NULL;
                        u64 delalloc_start;
                        u64 delalloc_end;
+                       u64 lockstart;
+                       u64 lockend;
                        bool delalloc;
 
+                       lockstart = round_down(prev_extent_end, sectorsize);
+                       lockend = round_up(i_size, sectorsize);
+
+                       /*
+                        * See the comment in fiemap_process_hole as to why
+                        * we're doing the locking here.
+                        */
+                       lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
                        delalloc = btrfs_find_delalloc_in_range(inode,
                                                                prev_extent_end,
                                                                i_size - 1,
                                                                &delalloc_cached_state,
                                                                &delalloc_start,
                                                                &delalloc_end);
+                       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
                        if (!delalloc)
                                cache.flags |= FIEMAP_EXTENT_LAST;
                } else {
@@ -3080,10 +3196,6 @@ check_eof_delalloc:
        }
 
        ret = emit_last_fiemap_cache(fieinfo, &cache);
-
-out_unlock:
-       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
-       btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
 out:
        free_extent_state(delalloc_cached_state);
        btrfs_free_backref_share_ctx(backref_ctx);
index f88e0ca8331d9b5448e8e07c89e5e66395c782b3..4795738d5785bce730fad21b68a00ff729b97915 100644 (file)
@@ -7835,6 +7835,7 @@ struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        u64 start, u64 len)
 {
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
        int     ret;
 
        ret = fiemap_prep(inode, fieinfo, start, &len, 0);
@@ -7860,7 +7861,26 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        return ret;
        }
 
-       return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
+       btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+       /*
+        * We did an initial flush to avoid holding the inode's lock while
+        * triggering writeback and waiting for the completion of IO and ordered
+        * extents. Now after we locked the inode we do it again, because it's
+        * possible a new write may have happened in between those two steps.
+        */
+       if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+               ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
+               if (ret) {
+                       btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+                       return ret;
+               }
+       }
+
+       ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
+       btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+       return ret;
 }
 
 static int btrfs_writepages(struct address_space *mapping,
index ac3316e0d11c3a42835dc8a2094b00a16019bd64..9d1eac15e09e141212cf0edadffcca873cba58f1 100644 (file)
@@ -721,7 +721,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
        free_extent_buffer(leaf);
        leaf = NULL;
 
-       new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
+       new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
        if (IS_ERR(new_root)) {
                ret = PTR_ERR(new_root);
                btrfs_abort_transaction(trans, ret);
index 7902298c1f25bbee1586a97f2223f3c079e3a8fb..e48a063ef0851f9476fd37a00572c1dd6c6fe379 100644 (file)
@@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
                                if (ret)
                                        goto out;
                        }
-                       if (sctx->cur_inode_last_extent <
-                           sctx->cur_inode_size) {
-                               ret = send_hole(sctx, sctx->cur_inode_size);
-                               if (ret)
+                       if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
+                               ret = range_is_hole_in_parent(sctx,
+                                                     sctx->cur_inode_last_extent,
+                                                     sctx->cur_inode_size);
+                               if (ret < 0) {
                                        goto out;
+                               } else if (ret == 0) {
+                                       ret = send_hole(sctx, sctx->cur_inode_size);
+                                       if (ret < 0)
+                                               goto out;
+                               } else {
+                                       /* Range is already a hole, skip. */
+                                       ret = 0;
+                               }
                        }
                }
                if (need_truncate) {
index 571bb13587d5e7aabc1c40feab093af5257a6782..3b54eb5834746be51807d3e13023fba3f2701981 100644 (file)
@@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
                                    struct btrfs_space_info *space_info)
 {
-       u64 global_rsv_size = fs_info->global_block_rsv.reserved;
+       const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
        u64 ordered, delalloc;
        u64 thresh;
        u64 used;
@@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
        ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
        delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
        if (ordered >= delalloc)
-               used += fs_info->delayed_refs_rsv.reserved +
-                       fs_info->delayed_block_rsv.reserved;
+               used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
+                       btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
        else
                used += space_info->bytes_may_use - global_rsv_size;
 
@@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
                enum btrfs_flush_state flush;
                u64 delalloc_size = 0;
                u64 to_reclaim, block_rsv_size;
-               u64 global_rsv_size = global_rsv->reserved;
+               const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
 
                loops++;
 
@@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
                 * assume it's tied up in delalloc reservations.
                 */
                block_rsv_size = global_rsv_size +
-                       delayed_block_rsv->reserved +
-                       delayed_refs_rsv->reserved +
-                       trans_rsv->reserved;
+                       btrfs_block_rsv_reserved(delayed_block_rsv) +
+                       btrfs_block_rsv_reserved(delayed_refs_rsv) +
+                       btrfs_block_rsv_reserved(trans_rsv);
                if (block_rsv_size < space_info->bytes_may_use)
                        delalloc_size = space_info->bytes_may_use - block_rsv_size;
 
@@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
                        to_reclaim = delalloc_size;
                        flush = FLUSH_DELALLOC;
                } else if (space_info->bytes_pinned >
-                          (delayed_block_rsv->reserved +
-                           delayed_refs_rsv->reserved)) {
+                          (btrfs_block_rsv_reserved(delayed_block_rsv) +
+                           btrfs_block_rsv_reserved(delayed_refs_rsv))) {
                        to_reclaim = space_info->bytes_pinned;
                        flush = COMMIT_TRANS;
-               } else if (delayed_block_rsv->reserved >
-                          delayed_refs_rsv->reserved) {
-                       to_reclaim = delayed_block_rsv->reserved;
+               } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
+                          btrfs_block_rsv_reserved(delayed_refs_rsv)) {
+                       to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
                        flush = FLUSH_DELAYED_ITEMS_NR;
                } else {
-                       to_reclaim = delayed_refs_rsv->reserved;
+                       to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
                        flush = FLUSH_DELAYED_REFS_NR;
                }
 
index c52807d97efa553b0b5e4765e11606a8ce644161..bf8e64c766b63b4c8b424f4437791eaea12f24a2 100644 (file)
@@ -1834,7 +1834,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        }
 
        key.offset = (u64)-1;
-       pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
+       pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
        if (IS_ERR(pending->snap)) {
                ret = PTR_ERR(pending->snap);
                pending->snap = NULL;
index 3a5d69ff25fc221f20c1e37a9854021eff246bad..5f750fa53a2b2a88c81e9ec05c9a7ab22d44a674 100644 (file)
@@ -1639,6 +1639,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
        }
 
 out:
+       /* Reject non SINGLE data profiles without RST */
+       if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
+           (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+           !fs_info->stripe_root) {
+               btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+                         btrfs_bg_type_to_raid_name(map->type));
+               return -EINVAL;
+       }
+
        if (cache->alloc_offset > cache->zone_capacity) {
                btrfs_err(fs_info,
 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
index 7077f72e6f4747c2a1f1bd6c898221d4bdaab380..f449f7340aad0811ae2cea3134731e1a2111f5ff 100644 (file)
@@ -168,6 +168,8 @@ error_unsupported:
        dput(root);
 error_open_root:
        cachefiles_end_secure(cache, saved_cred);
+       put_cred(cache->cache_cred);
+       cache->cache_cred = NULL;
 error_getsec:
        fscache_relinquish_cache(cache_cookie);
        cache->cache = NULL;
index 3f24905f40661302936f08122394947d55e3d5f3..6465e257423091d5183a6bf4c7963a2e8e900766 100644 (file)
@@ -816,6 +816,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
        cachefiles_put_directory(cache->graveyard);
        cachefiles_put_directory(cache->store);
        mntput(cache->mnt);
+       put_cred(cache->cache_cred);
 
        kfree(cache->rootdirname);
        kfree(cache->secctx);
index fae97c25ce58d5b268b7e3d73c5d4c94def4946d..8109aba66e023eb0d3dd5cdf06f3060c5cbf4b1a 100644 (file)
@@ -380,10 +380,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
                ceph_decode_skip_8(p, end, bad_ext);
                /* required_client_features */
                ceph_decode_skip_set(p, end, 64, bad_ext);
+               /* bal_rank_mask */
+               ceph_decode_skip_string(p, end, bad_ext);
+       }
+       if (mdsmap_ev >= 18) {
                ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
-       } else {
-               /* This forces the usage of the (sync) SETXATTR Op */
-               m->m_max_xattr_size = 0;
        }
 bad_ext:
        doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
index 89f1931f1ba6c9643a4098b1255c240e00f0c38e..1f2171dd01bfa34a404eef00113646bdcb978980 100644 (file)
@@ -27,7 +27,11 @@ struct ceph_mdsmap {
        u32 m_session_timeout;          /* seconds */
        u32 m_session_autoclose;        /* seconds */
        u64 m_max_file_size;
-       u64 m_max_xattr_size;           /* maximum size for xattrs blob */
+       /*
+        * maximum size for xattrs blob.
+        * Zeroed by default to force the usage of the (sync) SETXATTR Op.
+        */
+       u64 m_max_xattr_size;
        u32 m_max_mds;                  /* expected up:active mds number */
        u32 m_num_active_mds;           /* actual up:active mds number */
        u32 possible_max_rank;          /* possible max rank index */
index f258c17c18411284b9725ed88110de32fadc8993..be6403b4b14b6a26e611398f0903244d1af96343 100644 (file)
@@ -872,6 +872,9 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
        loff_t pos;
        ssize_t n;
 
+       if (!page)
+               return 0;
+
        if (cprm->to_skip) {
                if (!__dump_skip(cprm, cprm->to_skip))
                        return 0;
@@ -884,7 +887,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
        pos = file->f_pos;
        bvec_set_page(&bvec, page, PAGE_SIZE, 0);
        iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
-       iov_iter_set_copy_mc(&iter);
        n = __kernel_write_iter(cprm->file, &iter, &pos);
        if (n != PAGE_SIZE)
                return 0;
@@ -895,10 +897,44 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
        return 1;
 }
 
+/*
+ * If we might get machine checks from kernel accesses during the
+ * core dump, let's get those errors early rather than during the
+ * IO. This is not performance-critical enough to warrant having
+ * all the machine check logic in the iovec paths.
+ */
+#ifdef copy_mc_to_kernel
+
+#define dump_page_alloc() alloc_page(GFP_KERNEL)
+#define dump_page_free(x) __free_page(x)
+static struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+       void *buf = kmap_local_page(src);
+       size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE);
+       kunmap_local(buf);
+       return left ? NULL : dst;
+}
+
+#else
+
+/* We just want to return non-NULL; it's never used. */
+#define dump_page_alloc() ERR_PTR(-EINVAL)
+#define dump_page_free(x) ((void)(x))
+static inline struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+       return src;
+}
+#endif
+
 int dump_user_range(struct coredump_params *cprm, unsigned long start,
                    unsigned long len)
 {
        unsigned long addr;
+       struct page *dump_page;
+
+       dump_page = dump_page_alloc();
+       if (!dump_page)
+               return 0;
 
        for (addr = start; addr < start + len; addr += PAGE_SIZE) {
                struct page *page;
@@ -912,14 +948,17 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
                 */
                page = get_dump_page(addr);
                if (page) {
-                       int stop = !dump_emit_page(cprm, page);
+                       int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
                        put_page(page);
-                       if (stop)
+                       if (stop) {
+                               dump_page_free(dump_page);
                                return 0;
+                       }
                } else {
                        dump_skip(cprm, PAGE_SIZE);
                }
        }
+       dump_page_free(dump_page);
        return 1;
 }
 #endif
index b813528fb147784c6f308e67d47f3069e3a96e33..6ebccba333368d06667eb6c1ee433046bd0ab7d8 100644 (file)
@@ -3061,7 +3061,10 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
                if (d_unhashed(dentry) || !dentry->d_inode)
                        return D_WALK_SKIP;
 
-               dentry->d_lockref.count--;
+               if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+                       dentry->d_flags |= DCACHE_GENOCIDE;
+                       dentry->d_lockref.count--;
+               }
        }
        return D_WALK_CONTINUE;
 }
index 169252e6dc4616c7712126adde4a36ce8e2d6922..f7206158ee81385eeaab387fd16b05aea5a7634b 100644 (file)
@@ -38,7 +38,7 @@ struct efivar_entry {
 
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
                            struct list_head *),
-               void *data, bool duplicates, struct list_head *head);
+               void *data, struct list_head *head);
 
 int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
 void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
index 6038dd39367abe41430c55b04448ced7727dd287..bb14462f6d992a5506f4fda2158952cb410c96f3 100644 (file)
@@ -343,12 +343,7 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
        if (err)
                return err;
 
-       err = efivar_init(efivarfs_callback, (void *)sb, true,
-                         &sfi->efivarfs_list);
-       if (err)
-               efivar_entry_iter(efivarfs_destroy, &sfi->efivarfs_list, NULL);
-
-       return err;
+       return efivar_init(efivarfs_callback, sb, &sfi->efivarfs_list);
 }
 
 static int efivarfs_get_tree(struct fs_context *fc)
index 114ff0fd4e55732e2ebe0cdc8b20d82436571abf..4d722af1014f2a18198cc3e831d1fea68d46e251 100644 (file)
@@ -361,7 +361,6 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
  * efivar_init - build the initial list of EFI variables
  * @func: callback function to invoke for every variable
  * @data: function-specific data to pass to @func
- * @duplicates: error if we encounter duplicates on @head?
  * @head: initialised head of variable list
  *
  * Get every EFI variable from the firmware and invoke @func. @func
@@ -371,9 +370,9 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
  */
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
                            struct list_head *),
-               void *data, bool duplicates, struct list_head *head)
+               void *data, struct list_head *head)
 {
-       unsigned long variable_name_size = 1024;
+       unsigned long variable_name_size = 512;
        efi_char16_t *variable_name;
        efi_status_t status;
        efi_guid_t vendor_guid;
@@ -390,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
                goto free;
 
        /*
-        * Per EFI spec, the maximum storage allocated for both
-        * the variable name and variable data is 1024 bytes.
+        * A small set of old UEFI implementations reject sizes
+        * above a certain threshold, the lowest seen in the wild
+        * is 512.
         */
 
        do {
-               variable_name_size = 1024;
+               variable_name_size = 512;
 
                status = efivar_get_next_variable(&variable_name_size,
                                                  variable_name,
@@ -413,8 +413,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
                         * we'll ever see a different variable name,
                         * and may end up looping here forever.
                         */
-                       if (duplicates &&
-                           variable_is_present(variable_name, &vendor_guid,
+                       if (variable_is_present(variable_name, &vendor_guid,
                                                head)) {
                                dup_variable_bug(variable_name, &vendor_guid,
                                                 variable_name_size);
@@ -432,9 +431,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
                        break;
                case EFI_NOT_FOUND:
                        break;
+               case EFI_BUFFER_TOO_SMALL:
+                       pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
+                               variable_name_size);
+                       status = EFI_NOT_FOUND;
+                       break;
                default:
-                       printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
-                               status);
+                       pr_warn("efivars: get_next_variable: status=%lx\n", status);
                        status = EFI_NOT_FOUND;
                        break;
                }
index c98aeda8abb215e9be577d1b27dea2713b0b6e87..3d9721b3faa81d2636447c5e04b197843765f179 100644 (file)
@@ -447,5 +447,6 @@ const struct file_operations erofs_file_fops = {
        .llseek         = generic_file_llseek,
        .read_iter      = erofs_file_read_iter,
        .mmap           = erofs_file_mmap,
+       .get_unmapped_area = thp_get_unmapped_area,
        .splice_read    = filemap_splice_read,
 };
index d4cee95af14c7490e85706589853059b99b7e688..2ec9b2bb628d6b03bdf454c3fc6457b4065aabc8 100644 (file)
@@ -323,7 +323,8 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
        unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
        u8 *kin;
 
-       DBG_BUGON(rq->outputsize > rq->inputsize);
+       if (rq->outputsize > rq->inputsize)
+               return -EOPNOTSUPP;
        if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
                cur = bs - (rq->pageofs_out & (bs - 1));
                pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
index 5ff90026fd43fe116e3a34178bf00b9f3303b411..89a7c2453aae6f130e679af1459673397d581842 100644 (file)
@@ -381,11 +381,12 @@ static int erofs_fscache_init_domain(struct super_block *sb)
                goto out;
 
        if (!erofs_pseudo_mnt) {
-               erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
-               if (IS_ERR(erofs_pseudo_mnt)) {
-                       err = PTR_ERR(erofs_pseudo_mnt);
+               struct vfsmount *mnt = kern_mount(&erofs_fs_type);
+               if (IS_ERR(mnt)) {
+                       err = PTR_ERR(mnt);
                        goto out;
                }
+               erofs_pseudo_mnt = mnt;
        }
 
        domain->volume = sbi->volume;
index d4f631d39f0fa83141eafcb13951bb3fd36598bd..f0110a78acb2078aa2ce6eae13e39481e46b7ea9 100644 (file)
@@ -130,24 +130,24 @@ static void *erofs_find_target_block(struct erofs_buf *target,
                        /* string comparison without already matched prefix */
                        diff = erofs_dirnamecmp(name, &dname, &matched);
 
-                       if (!diff) {
-                               *_ndirents = 0;
-                               goto out;
-                       } else if (diff > 0) {
-                               head = mid + 1;
-                               startprfx = matched;
-
-                               if (!IS_ERR(candidate))
-                                       erofs_put_metabuf(target);
-                               *target = buf;
-                               candidate = de;
-                               *_ndirents = ndirents;
-                       } else {
+                       if (diff < 0) {
                                erofs_put_metabuf(&buf);
-
                                back = mid - 1;
                                endprfx = matched;
+                               continue;
+                       }
+
+                       if (!IS_ERR(candidate))
+                               erofs_put_metabuf(target);
+                       *target = buf;
+                       if (!diff) {
+                               *_ndirents = 0;
+                               return de;
                        }
+                       head = mid + 1;
+                       startprfx = matched;
+                       candidate = de;
+                       *_ndirents = ndirents;
                        continue;
                }
 out:           /* free if the candidate is valid */
index 9474cd50da6d4fd8b9fba92f1f3d8717f19245dc..361595433480c46562765ad4d5c886a071005c25 100644 (file)
@@ -275,6 +275,7 @@ struct exfat_sb_info {
 
        spinlock_t inode_hash_lock;
        struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
+       struct rcu_head rcu;
 };
 
 #define EXFAT_CACHE_VALID      0
index d25a96a148af4cdb966c5d20f720aa944cab10c2..cc00f1a7a1e18082af9e0e8ff28f5995de75f1ba 100644 (file)
@@ -35,13 +35,18 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
        if (new_num_clusters == num_clusters)
                goto out;
 
-       exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
-       ret = exfat_find_last_cluster(sb, &clu, &last_clu);
-       if (ret)
-               return ret;
+       if (num_clusters) {
+               exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
+               ret = exfat_find_last_cluster(sb, &clu, &last_clu);
+               if (ret)
+                       return ret;
+
+               clu.dir = last_clu + 1;
+       } else {
+               last_clu = EXFAT_EOF_CLUSTER;
+               clu.dir = EXFAT_EOF_CLUSTER;
+       }
 
-       clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
-                       EXFAT_EOF_CLUSTER : last_clu + 1;
        clu.size = 0;
        clu.flags = ei->flags;
 
@@ -51,17 +56,19 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
                return ret;
 
        /* Append new clusters to chain */
-       if (clu.flags != ei->flags) {
-               exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters);
-               ei->flags = ALLOC_FAT_CHAIN;
-       }
-       if (clu.flags == ALLOC_FAT_CHAIN)
-               if (exfat_ent_set(sb, last_clu, clu.dir))
-                       goto free_clu;
-
-       if (num_clusters == 0)
+       if (num_clusters) {
+               if (clu.flags != ei->flags)
+                       if (exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters))
+                               goto free_clu;
+
+               if (clu.flags == ALLOC_FAT_CHAIN)
+                       if (exfat_ent_set(sb, last_clu, clu.dir))
+                               goto free_clu;
+       } else
                ei->start_clu = clu.dir;
 
+       ei->flags = clu.flags;
+
 out:
        inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
        /* Expanded range not zeroed, do not update valid_size */
index 705710f93e2ddd3c911df119b2c8e0ca5a9152cd..afdf13c34ff526fb423f322d3503b571e2d153e8 100644 (file)
@@ -655,7 +655,6 @@ static int exfat_load_upcase_table(struct super_block *sb,
        unsigned int sect_size = sb->s_blocksize;
        unsigned int i, index = 0;
        u32 chksum = 0;
-       int ret;
        unsigned char skip = false;
        unsigned short *upcase_table;
 
@@ -673,8 +672,7 @@ static int exfat_load_upcase_table(struct super_block *sb,
                if (!bh) {
                        exfat_err(sb, "failed to read sector(0x%llx)",
                                  (unsigned long long)sector);
-                       ret = -EIO;
-                       goto free_table;
+                       return -EIO;
                }
                sector++;
                for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
@@ -701,15 +699,12 @@ static int exfat_load_upcase_table(struct super_block *sb,
 
        exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)",
                  index, chksum, utbl_checksum);
-       ret = -EINVAL;
-free_table:
-       exfat_free_upcase_table(sbi);
-       return ret;
+       return -EINVAL;
 }
 
 static int exfat_load_default_upcase_table(struct super_block *sb)
 {
-       int i, ret = -EIO;
+       int i;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        unsigned char skip = false;
        unsigned short uni = 0, *upcase_table;
@@ -740,8 +735,7 @@ static int exfat_load_default_upcase_table(struct super_block *sb)
                return 0;
 
        /* FATAL error: default upcase table has error */
-       exfat_free_upcase_table(sbi);
-       return ret;
+       return -EIO;
 }
 
 int exfat_create_upcase_table(struct super_block *sb)
index d9d4fa91010bb1d226b1d00afdbb841e73911d33..fcb6582677650bd1462e501e9c5bb67a032befd4 100644 (file)
@@ -39,9 +39,6 @@ static void exfat_put_super(struct super_block *sb)
        exfat_free_bitmap(sbi);
        brelse(sbi->boot_bh);
        mutex_unlock(&sbi->s_lock);
-
-       unload_nls(sbi->nls_io);
-       exfat_free_upcase_table(sbi);
 }
 
 static int exfat_sync_fs(struct super_block *sb, int wait)
@@ -600,7 +597,7 @@ static int __exfat_fill_super(struct super_block *sb)
        ret = exfat_load_bitmap(sb);
        if (ret) {
                exfat_err(sb, "failed to load alloc-bitmap");
-               goto free_upcase_table;
+               goto free_bh;
        }
 
        ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
@@ -613,8 +610,6 @@ static int __exfat_fill_super(struct super_block *sb)
 
 free_alloc_bitmap:
        exfat_free_bitmap(sbi);
-free_upcase_table:
-       exfat_free_upcase_table(sbi);
 free_bh:
        brelse(sbi->boot_bh);
        return ret;
@@ -701,12 +696,10 @@ put_inode:
        sb->s_root = NULL;
 
 free_table:
-       exfat_free_upcase_table(sbi);
        exfat_free_bitmap(sbi);
        brelse(sbi->boot_bh);
 
 check_nls_io:
-       unload_nls(sbi->nls_io);
        return err;
 }
 
@@ -771,13 +764,22 @@ static int exfat_init_fs_context(struct fs_context *fc)
        return 0;
 }
 
+static void delayed_free(struct rcu_head *p)
+{
+       struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
+
+       unload_nls(sbi->nls_io);
+       exfat_free_upcase_table(sbi);
+       exfat_free_sbi(sbi);
+}
+
 static void exfat_kill_sb(struct super_block *sb)
 {
        struct exfat_sb_info *sbi = sb->s_fs_info;
 
        kill_block_super(sb);
        if (sbi)
-               exfat_free_sbi(sbi);
+               call_rcu(&sbi->rcu, delayed_free);
 }
 
 static struct file_system_type exfat_fs_type = {
index 75bf1f88843c4ce96c285423228a70ff77c08517..645240cc0229fe4a2eda4499ae4a834fe3bd3a66 100644 (file)
@@ -92,10 +92,12 @@ static const char *ext4_get_link(struct dentry *dentry, struct inode *inode,
 
        if (!dentry) {
                bh = ext4_getblk(NULL, inode, 0, EXT4_GET_BLOCKS_CACHED_NOWAIT);
-               if (IS_ERR(bh))
-                       return ERR_CAST(bh);
-               if (!bh || !ext4_buffer_uptodate(bh))
+               if (IS_ERR(bh) || !bh)
                        return ERR_PTR(-ECHILD);
+               if (!ext4_buffer_uptodate(bh)) {
+                       brelse(bh);
+                       return ERR_PTR(-ECHILD);
+               }
        } else {
                bh = ext4_bread(NULL, inode, 0, 0);
                if (IS_ERR(bh))
index 91e89e68177ee4bd686a920b9dfad4978d8d5062..b6cad106c37e44258bd6e4433cd4aaedfbb98f65 100644 (file)
@@ -474,8 +474,7 @@ err:
 
 static void cuse_fc_release(struct fuse_conn *fc)
 {
-       struct cuse_conn *cc = fc_to_cc(fc);
-       kfree_rcu(cc, fc.rcu);
+       kfree(fc_to_cc(fc));
 }
 
 /**
index 1df83eebda92771d20a42ea2aaefa118effcbc77..bcbe34488862752154ca2284386baacadf972744 100644 (file)
@@ -888,6 +888,7 @@ struct fuse_mount {
 
        /* Entry on fc->mounts */
        struct list_head fc_entry;
+       struct rcu_head rcu;
 };
 
 static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
index 2a6d44f91729bbd7e3bf1c955a952ecdd695bd0f..516ea2979a90ff2d0eff63a71dc6b8edc4c91b98 100644 (file)
@@ -930,6 +930,14 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
 }
 EXPORT_SYMBOL_GPL(fuse_conn_init);
 
+static void delayed_release(struct rcu_head *p)
+{
+       struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
+
+       put_user_ns(fc->user_ns);
+       fc->release(fc);
+}
+
 void fuse_conn_put(struct fuse_conn *fc)
 {
        if (refcount_dec_and_test(&fc->count)) {
@@ -941,13 +949,12 @@ void fuse_conn_put(struct fuse_conn *fc)
                if (fiq->ops->release)
                        fiq->ops->release(fiq);
                put_pid_ns(fc->pid_ns);
-               put_user_ns(fc->user_ns);
                bucket = rcu_dereference_protected(fc->curr_bucket, 1);
                if (bucket) {
                        WARN_ON(atomic_read(&bucket->count) != 1);
                        kfree(bucket);
                }
-               fc->release(fc);
+               call_rcu(&fc->rcu, delayed_release);
        }
 }
 EXPORT_SYMBOL_GPL(fuse_conn_put);
@@ -1366,7 +1373,7 @@ EXPORT_SYMBOL_GPL(fuse_send_init);
 void fuse_free_conn(struct fuse_conn *fc)
 {
        WARN_ON(!list_empty(&fc->devices));
-       kfree_rcu(fc, rcu);
+       kfree(fc);
 }
 EXPORT_SYMBOL_GPL(fuse_free_conn);
 
@@ -1902,7 +1909,7 @@ static void fuse_sb_destroy(struct super_block *sb)
 void fuse_mount_destroy(struct fuse_mount *fm)
 {
        fuse_conn_put(fm->fc);
-       kfree(fm);
+       kfree_rcu(fm, rcu);
 }
 EXPORT_SYMBOL(fuse_mount_destroy);
 
index 7ededcb720c121794eb3782dd84ccff74685296b..012a3d003fbe6162db231058a647cb07f78ef0a9 100644 (file)
@@ -190,6 +190,7 @@ struct hfsplus_sb_info {
        int work_queued;               /* non-zero delayed work is queued */
        struct delayed_work sync_work; /* FS sync delayed work */
        spinlock_t work_lock;          /* protects sync_work and work_queued */
+       struct rcu_head rcu;
 };
 
 #define HFSPLUS_SB_WRITEBACKUP 0
index 1986b4f18a9013ee27f056b7c871df215f05f862..97920202790f944f0d03dda35cc1a83f27201470 100644 (file)
@@ -277,6 +277,14 @@ void hfsplus_mark_mdb_dirty(struct super_block *sb)
        spin_unlock(&sbi->work_lock);
 }
 
+static void delayed_free(struct rcu_head *p)
+{
+       struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu);
+
+       unload_nls(sbi->nls);
+       kfree(sbi);
+}
+
 static void hfsplus_put_super(struct super_block *sb)
 {
        struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
@@ -302,9 +310,7 @@ static void hfsplus_put_super(struct super_block *sb)
        hfs_btree_close(sbi->ext_tree);
        kfree(sbi->s_vhdr_buf);
        kfree(sbi->s_backup_vhdr_buf);
-       unload_nls(sbi->nls);
-       kfree(sb->s_fs_info);
-       sb->s_fs_info = NULL;
+       call_rcu(&sbi->rcu, delayed_free);
 }
 
 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
index 4e0de939fea127034c24d7badb18253a9351b52e..9342fa6a38c2bad85c13144b8d8ae4940e88e7e6 100644 (file)
@@ -1717,7 +1717,11 @@ static inline int may_lookup(struct mnt_idmap *idmap,
 {
        if (nd->flags & LOOKUP_RCU) {
                int err = inode_permission(idmap, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
-               if (err != -ECHILD || !try_to_unlazy(nd))
+               if (!err)               // success, keep going
+                       return 0;
+               if (!try_to_unlazy(nd))
+                       return -ECHILD; // redo it all non-lazy
+               if (err != -ECHILD)     // hard error
                        return err;
        }
        return inode_permission(idmap, nd->inode, MAY_EXEC);
index a3059b3168fd95756c7e57986ed999e205dfa8aa..9a0d32e4b422ad09518a6c6143638d0c68fb8b84 100644 (file)
@@ -477,6 +477,9 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
 
+       if (!iov_iter_count(from))
+               return 0;
+
        if ((iocb->ki_flags & IOCB_DIRECT) ||
            test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
                return netfs_unbuffered_write_iter(iocb, from);
index 60a40d293c87f5fd1088830f07488775b8725bb4..bee047e20f5d6933e3af452eb150e4eb2e97d941 100644 (file)
@@ -139,6 +139,9 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
 
+       if (!iov_iter_count(from))
+               return 0;
+
        trace_netfs_write_iter(iocb, from);
        netfs_stat(&netfs_n_rh_dio_write);
 
@@ -146,7 +149,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (ret < 0)
                return ret;
        ret = generic_write_checks(iocb, from);
-       if (ret < 0)
+       if (ret <= 0)
                goto out;
        ret = file_remove_privs(file);
        if (ret < 0)
index e8ff1e61ce79b7f67e1252f4b66aa461bfe1d4b8..4261ad6c55b664a7e3da006d007de03664790641 100644 (file)
@@ -748,6 +748,8 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 
        if (!rreq->submitted) {
                netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
+               if (rreq->origin == NETFS_DIO_READ)
+                       inode_dio_end(rreq->inode);
                ret = 0;
                goto out;
        }
index 44eca51b28085d9deff764bfe6f9286388e93983..fbdc9ca80f714bdf3d3cad54e63d7c858612e5f1 100644 (file)
@@ -246,7 +246,7 @@ void nfs_free_client(struct nfs_client *clp)
        put_nfs_version(clp->cl_nfs_mod);
        kfree(clp->cl_hostname);
        kfree(clp->cl_acceptor);
-       kfree(clp);
+       kfree_rcu(clp, rcu);
 }
 EXPORT_SYMBOL_GPL(nfs_free_client);
 
@@ -1006,6 +1006,14 @@ struct nfs_server *nfs_alloc_server(void)
 }
 EXPORT_SYMBOL_GPL(nfs_alloc_server);
 
+static void delayed_free(struct rcu_head *p)
+{
+       struct nfs_server *server = container_of(p, struct nfs_server, rcu);
+
+       nfs_free_iostats(server->io_stats);
+       kfree(server);
+}
+
 /*
  * Free up a server record
  */
@@ -1031,10 +1039,9 @@ void nfs_free_server(struct nfs_server *server)
 
        ida_destroy(&server->lockowner_id);
        ida_destroy(&server->openowner_id);
-       nfs_free_iostats(server->io_stats);
        put_cred(server->cred);
-       kfree(server);
        nfs_release_automount_timer();
+       call_rcu(&server->rcu, delayed_free);
 }
 EXPORT_SYMBOL_GPL(nfs_free_server);
 
index c8ecbe99905960ccd63b7128f273fc38543d876d..ac505671efbdb7a91a346e4f300e352261562eae 100644 (file)
@@ -1431,9 +1431,9 @@ static bool nfs_verifier_is_delegated(struct dentry *dentry)
 static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
 {
        struct inode *inode = d_inode(dentry);
-       struct inode *dir = d_inode(dentry->d_parent);
+       struct inode *dir = d_inode_rcu(dentry->d_parent);
 
-       if (!nfs_verify_change_attribute(dir, verf))
+       if (!dir || !nfs_verify_change_attribute(dir, verf))
                return;
        if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
                nfs_set_verifier_delegated(&verf);
index 3b42938a9d3b229b0bbf1eef9a80f83da5111113..7f27382e0ce25bcb2c660fa799cf8295a9cf486b 100644 (file)
@@ -2457,7 +2457,6 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
        struct ATTR_LIST_ENTRY *le = NULL;
        struct runs_tree *run = &ni->file.run;
        u64 valid_size = ni->i_valid;
-       loff_t i_size = i_size_read(&ni->vfs_inode);
        u64 vbo_disk;
        size_t unc_size;
        u32 frame_size, i, npages_disk, ondisk_size;
@@ -2509,6 +2508,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
                err = -EOPNOTSUPP;
                goto out1;
 #else
+               loff_t i_size = i_size_read(&ni->vfs_inode);
                u32 frame_bits = ni_ext_compress_bits(ni);
                u64 frame64 = frame_vbo >> frame_bits;
                u64 frames, vbo_data;
index 98a031ac26484544b8b07aec1ca72f40250ba2ca..18550c071d71c733204e3a94d274ac4d47c00119 100644 (file)
@@ -1878,8 +1878,6 @@ void proc_pid_evict_inode(struct proc_inode *ei)
                hlist_del_init_rcu(&ei->sibling_inodes);
                spin_unlock(&pid->lock);
        }
-
-       put_pid(pid);
 }
 
 struct inode *proc_pid_make_inode(struct super_block *sb,
index b33e490e3fd9f88f569e3453d603041e665cf6bf..05350f3c2812c57562e9208da69d0e98835dadc9 100644 (file)
@@ -30,7 +30,6 @@
 
 static void proc_evict_inode(struct inode *inode)
 {
-       struct proc_dir_entry *de;
        struct ctl_table_header *head;
        struct proc_inode *ei = PROC_I(inode);
 
@@ -38,17 +37,8 @@ static void proc_evict_inode(struct inode *inode)
        clear_inode(inode);
 
        /* Stop tracking associated processes */
-       if (ei->pid) {
+       if (ei->pid)
                proc_pid_evict_inode(ei);
-               ei->pid = NULL;
-       }
-
-       /* Let go of any associated proc directory entry */
-       de = ei->pde;
-       if (de) {
-               pde_put(de);
-               ei->pde = NULL;
-       }
 
        head = ei->sysctl;
        if (head) {
@@ -80,6 +70,13 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
 
 static void proc_free_inode(struct inode *inode)
 {
+       struct proc_inode *ei = PROC_I(inode);
+
+       if (ei->pid)
+               put_pid(ei->pid);
+       /* Let go of any associated proc directory entry */
+       if (ei->pde)
+               pde_put(ei->pde);
        kmem_cache_free(proc_inode_cachep, PROC_I(inode));
 }
 
index b55dbc70287b492ae2e4ed43e2a3c04ee0818798..06a297a27ba3b31a5e2092fcd08d1ca9eebb5849 100644 (file)
@@ -271,7 +271,7 @@ static void proc_kill_sb(struct super_block *sb)
 
        kill_anon_super(sb);
        put_pid_ns(fs_info->pid_ns);
-       kfree(fs_info);
+       kfree_rcu(fs_info, rcu);
 }
 
 static struct file_system_type proc_fs_type = {
index 2a4a4e3a8751f2ce8f0409ce79dc5024e02bb883..0c269396ae151b083b4e545e58b2300ca16e0ee3 100644 (file)
@@ -1172,6 +1172,9 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
 {
        char *target_path;
 
+       if (!dentry)
+               return ERR_PTR(-ECHILD);
+
        target_path = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!target_path)
                return ERR_PTR(-ENOMEM);
index d35e852954892dadcf1df6757c8b491904d2edbb..d6efeba0d0ce7c464c6d32c8d9001007d35259b1 100644 (file)
@@ -274,9 +274,10 @@ static void destroy_super_work(struct work_struct *work)
 {
        struct super_block *s = container_of(work, struct super_block,
                                                        destroy_work);
-       int i;
-
-       for (i = 0; i < SB_FREEZE_LEVELS; i++)
+       security_sb_free(s);
+       put_user_ns(s->s_user_ns);
+       kfree(s->s_subtype);
+       for (int i = 0; i < SB_FREEZE_LEVELS; i++)
                percpu_free_rwsem(&s->s_writers.rw_sem[i]);
        kfree(s);
 }
@@ -296,9 +297,6 @@ static void destroy_unused_super(struct super_block *s)
        super_unlock_excl(s);
        list_lru_destroy(&s->s_dentry_lru);
        list_lru_destroy(&s->s_inode_lru);
-       security_sb_free(s);
-       put_user_ns(s->s_user_ns);
-       kfree(s->s_subtype);
        shrinker_free(s->s_shrink);
        /* no delays needed */
        destroy_super_work(&s->destroy_work);
@@ -409,9 +407,6 @@ static void __put_super(struct super_block *s)
                WARN_ON(s->s_dentry_lru.node);
                WARN_ON(s->s_inode_lru.node);
                WARN_ON(!list_empty(&s->s_mounts));
-               security_sb_free(s);
-               put_user_ns(s->s_user_ns);
-               kfree(s->s_subtype);
                call_rcu(&s->rcu, destroy_super_rcu);
        }
 }
index 5a2512d20bd07473a872592911ede7246b8c11b7..98401de832eeab2c0ca66db4f9d8f22041dc4e28 100644 (file)
@@ -350,7 +350,6 @@ xfs_setup_dax_always(
                return -EINVAL;
        }
 
-       xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
        return 0;
 
 disable_dax:
index c4c423e97f069c325ba2ed41b6839adb160d95f6..4453906105ca183a8fe20be81468f5211666d01f 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <drm/drm_connector.h>
 
+struct auxiliary_device;
+
 #if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
 int drm_aux_bridge_register(struct device *parent);
 #else
@@ -19,10 +21,23 @@ static inline int drm_aux_bridge_register(struct device *parent)
 #endif
 
 #if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
 struct device *drm_dp_hpd_bridge_register(struct device *parent,
                                          struct device_node *np);
 void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
 #else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+                                                                   struct device_node *np)
+{
+       return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+{
+       return 0;
+}
+
 static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
                                                        struct device_node *np)
 {
index 555aae5448ae4ec00065e00553955b31bb44884b..bd1e361b351c5afa88ff02e7023cd79acd5454fd 100644 (file)
@@ -83,7 +83,7 @@ struct bvec_iter {
 
        unsigned int            bi_bvec_done;   /* number of bytes completed in
                                                   current bvec */
-} __packed;
+} __packed __aligned(4);
 
 struct bvec_iter_all {
        struct bio_vec  bv;
index 91125eca4c8ab8ded08a5b4b687c65c69d656401..03fa6d50d46fe5886d92d3cb6cddfe29fc43af11 100644 (file)
@@ -140,22 +140,4 @@ struct cxl_cper_event_rec {
        union cxl_event event;
 } __packed;
 
-typedef void (*cxl_cper_callback)(enum cxl_event_type type,
-                                 struct cxl_cper_event_rec *rec);
-
-#ifdef CONFIG_ACPI_APEI_GHES
-int cxl_cper_register_callback(cxl_cper_callback callback);
-int cxl_cper_unregister_callback(cxl_cper_callback callback);
-#else
-static inline int cxl_cper_register_callback(cxl_cper_callback callback)
-{
-       return 0;
-}
-
-static inline int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
-       return 0;
-}
-#endif
-
 #endif /* _LINUX_CXL_EVENT_H */
index 1666c387861f7a8fae32d7ae3acd17c950142ff5..d07cf2f1bb7db18c37333fd211b2e5b18657b254 100644 (file)
@@ -173,6 +173,7 @@ struct dentry_operations {
 #define DCACHE_DONTCACHE               BIT(7) /* Purge from memory on final dput() */
 
 #define DCACHE_CANT_MOUNT              BIT(8)
+#define DCACHE_GENOCIDE                        BIT(9)
 #define DCACHE_SHRINK_LIST             BIT(10)
 
 #define DCACHE_OP_WEAK_REVALIDATE      BIT(11)
index 9cf896ea1d4122f3bc7094e46a5af81b999937dc..e37344f6a231893fa829bf87c8a18e86bb8b8742 100644 (file)
@@ -10,6 +10,8 @@
 #include <uapi/linux/dpll.h>
 #include <linux/device.h>
 #include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
 
 struct dpll_device;
 struct dpll_pin;
@@ -120,15 +122,24 @@ struct dpll_pin_properties {
 };
 
 #if IS_ENABLED(CONFIG_DPLL)
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin);
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin);
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+                              const struct net_device *dev);
 #else
-static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
 {
        return 0;
 }
 
-static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
 {
        return 0;
 }
index 023f37c607094a5339598ac2c7dddd09745c907e..1fbc72c5f112c750b87e7d752e4d5871258ddabe 100644 (file)
@@ -352,6 +352,8 @@ enum rw_hint {
  * unrelated IO (like cache flushing, new IO generation, etc).
  */
 #define IOCB_DIO_CALLER_COMP   (1 << 22)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW            (1 << 23)
 
 /* for use in trace events */
 #define TRACE_IOCB_STRINGS \
index de292a0071389ed122a3540c4a98870fe30aa8d8..e2a916cf29c42ff6c9e298bf64b9e3b3f9208879 100644 (file)
@@ -353,6 +353,15 @@ static inline bool gfp_has_io_fs(gfp_t gfp)
        return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
 }
 
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
+{
+       return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
+}
+
 extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
 
 #ifdef CONFIG_CONTIG_ALLOC
index 2b00faf98017cc9c2a99da9f2cc29c45ecbaf2ec..6ef0557b4bff8ed5d14bc18391d356913136c23c 100644 (file)
@@ -164,8 +164,28 @@ struct hv_ring_buffer {
        u8 buffer[];
 } __packed;
 
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+       ((payload_sz) >=  8 * sizeof(struct hv_ring_buffer) ? \
+       0 : sizeof(struct hv_ring_buffer))
+
 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
                                               (payload_sz))
 
 struct hv_ring_buffer_info {
index 1ea2a820e1eb035c9eea2ec97d9874c52bbd0b42..5e27cb3a3be99b34e705cb7c4569cfbdf2b11f82 100644 (file)
@@ -892,11 +892,14 @@ struct iommu_fwspec {
 struct iommu_sva {
        struct device                   *dev;
        struct iommu_domain             *domain;
+       struct list_head                handle_item;
+       refcount_t                      users;
 };
 
 struct iommu_mm_data {
        u32                     pasid;
        struct list_head        sva_domains;
+       struct list_head        sva_handles;
 };
 
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
index 7e7fd25b09b3ebe3d81e30fb23f506a9ee5a6519..179df96b20f88d065d0c9be4d4ef643b71a801c6 100644 (file)
@@ -2031,6 +2031,32 @@ static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
                return 1;
        return 0;
 }
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock.  This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+                                                  unsigned long mmu_seq,
+                                                  gfn_t gfn)
+{
+       /*
+        * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+        * are always read from memory, e.g. so that checking for retry in a
+        * loop won't result in an infinite retry loop.  Don't force loads for
+        * start+end, as the key to avoiding infinite retry loops is observing
+        * the 1=>0 transition of in-progress, i.e. getting false negatives
+        * due to stale start+end values is acceptable.
+        */
+       if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+           gfn >= kvm->mmu_invalidate_range_start &&
+           gfn < kvm->mmu_invalidate_range_end)
+               return true;
+
+       return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
 #endif
 
 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
index b695f9e946dabb46f08e1d1688d275bb3ff35b49..e2082240586d00b5f21af7b3f9e0ca6176b5884a 100644 (file)
@@ -121,6 +121,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
 int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
 #endif
 void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+                                    phys_addr_t base2, phys_addr_t size2);
 bool memblock_overlaps_region(struct memblock_type *type,
                              phys_addr_t base, phys_addr_t size);
 bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
index c726f90ab752452cbe9726462ecedd72b21656f6..486b7492050c3daa04459c7de0e8471faca27ba4 100644 (file)
@@ -1103,7 +1103,7 @@ struct mlx5_ifc_roce_cap_bits {
        u8         sw_r_roce_src_udp_port[0x1];
        u8         fl_rc_qp_when_roce_disabled[0x1];
        u8         fl_rc_qp_when_roce_enabled[0x1];
-       u8         reserved_at_7[0x1];
+       u8         roce_cc_general[0x1];
        u8         qp_ooo_transmit_default[0x1];
        u8         reserved_at_9[0x15];
        u8         qp_ts_format[0x2];
@@ -10261,7 +10261,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
 
        u8         regs_63_to_46[0x12];
        u8         mrtc[0x1];
-       u8         regs_44_to_32[0xd];
+       u8         regs_44_to_41[0x4];
+       u8         mfrl[0x1];
+       u8         regs_39_to_32[0x8];
 
        u8         regs_31_to_10[0x16];
        u8         mtmp[0x1];
index bd53cf4be7bdcbe4ea47ab640fbe0052ffc88bef..f0e55bf3ec8b5b0dd10c3270c1659e1fbb96ac64 100644 (file)
@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
        union {
                struct {
                        __be16 sz;
-                       u8     start[2];
+                       union {
+                               u8     start[2];
+                               DECLARE_FLEX_ARRAY(u8, data);
+                       };
                } inline_hdr;
                struct {
                        __be16 type;
index ef7bfbb9849733fa7f1f097ba53a36a68cc3384b..78a09af89e39b7a43ce211cbbf17e7fe035d36bb 100644 (file)
@@ -79,8 +79,6 @@ struct xdp_buff;
 struct xdp_frame;
 struct xdp_metadata_ops;
 struct xdp_md;
-/* DPLL specific */
-struct dpll_pin;
 
 typedef u32 xdp_features_t;
 
@@ -2469,7 +2467,7 @@ struct net_device {
        struct devlink_port     *devlink_port;
 
 #if IS_ENABLED(CONFIG_DPLL)
-       struct dpll_pin         *dpll_pin;
+       struct dpll_pin __rcu   *dpll_pin;
 #endif
 #if IS_ENABLED(CONFIG_PAGE_POOL)
        /** @page_pools: page pools created for this netdevice */
@@ -3499,6 +3497,16 @@ static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue
 #endif
 }
 
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+       /* Non-BQL migrated drivers will return 0, too. */
+       return dql_avail(&txq->dql);
+#else
+       return 0;
+#endif
+}
+
 /**
  *     netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
  *     @dev_queue: pointer to transmit queue
@@ -4032,17 +4040,6 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
 int dev_get_port_parent_id(struct net_device *dev,
                           struct netdev_phys_item_id *ppid, bool recurse);
 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
-void netdev_dpll_pin_clear(struct net_device *dev);
-
-static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_DPLL)
-       return dev->dpll_pin;
-#else
-       return NULL;
-#endif
-}
 
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
index 80900d9109920f686f971e431b95361a831fcc26..ce660d51549b469243357bf9187e108582bc4d95 100644 (file)
@@ -474,6 +474,7 @@ struct nf_ct_hook {
                              const struct sk_buff *);
        void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
        void (*set_closing)(struct nf_conntrack *nfct);
+       int (*confirm)(struct sk_buff *skb);
 };
 extern const struct nf_ct_hook __rcu *nf_ct_hook;
 
index cd797e00fe359a91b44b2e012309e87d5b446a7e..92de074e63b98c03cefbb2f07d60de0b8f1fb039 100644 (file)
@@ -124,6 +124,7 @@ struct nfs_client {
        char                    cl_ipaddr[48];
        struct net              *cl_net;
        struct list_head        pending_cb_stateids;
+       struct rcu_head         rcu;
 };
 
 /*
@@ -265,6 +266,7 @@ struct nfs_server {
        const struct cred       *cred;
        bool                    has_sec_mnt_opts;
        struct kobject          kobj;
+       struct rcu_head         rcu;
 };
 
 /* Server capabilities */
index 27a7dad17eefb83b917569fdc6a5df7298f3859b..1f0ee2459f2aa2db997a979ff7df74b1d1bd588c 100644 (file)
@@ -92,4 +92,7 @@
 /********** VFS **********/
 #define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
 
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
 #endif
index de407e7c3b55fdbd9b5d3cbe93585b1e417a3e20..0b2a8985444097f91cd0557563ad9438e3c494ea 100644 (file)
@@ -65,6 +65,7 @@ struct proc_fs_info {
        kgid_t pid_gid;
        enum proc_hidepid hide_pid;
        enum proc_pidonly pidonly;
+       struct rcu_head rcu;
 };
 
 static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
index 4db00ddad26169060e1d42d5ca9b9723546ab81c..8d28f6091a320ef024597dfd6f84526702d5ec41 100644 (file)
@@ -549,6 +549,11 @@ static inline int swap_duplicate(swp_entry_t swp)
        return 0;
 }
 
+static inline int swapcache_prepare(swp_entry_t swp)
+{
+       return 0;
+}
+
 static inline void swap_free(swp_entry_t swp)
 {
 }
index 9ec229dfddaa774b9c0a4f2ae410eb273061c330..1ef95c0287f05daed3a1096ad9d5a4cf66dd0f3e 100644 (file)
@@ -9,9 +9,15 @@
 /*
  * Trace sequences are used to allow a function to call several other functions
  * to create a string of data to use.
+ *
+ * Have the trace seq to be 8K which is typically PAGE_SIZE * 2 on
+ * most architectures. The TRACE_SEQ_BUFFER_SIZE (which is
+ * TRACE_SEQ_SIZE minus the other fields of trace_seq), is the
+ * max size the output of a trace event may be.
  */
 
-#define TRACE_SEQ_BUFFER_SIZE  (PAGE_SIZE * 2 - \
+#define TRACE_SEQ_SIZE         8192
+#define TRACE_SEQ_BUFFER_SIZE  (TRACE_SEQ_SIZE - \
        (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
 
 struct trace_seq {
index bea9c89922d908f66511dacb02edc8f4bcad918c..00cebe2b70de7ef3d74c814d4c59b4539c3d55da 100644 (file)
@@ -40,7 +40,6 @@ struct iov_iter_state {
 
 struct iov_iter {
        u8 iter_type;
-       bool copy_mc;
        bool nofault;
        bool data_source;
        size_t iov_offset;
@@ -248,22 +247,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
 
 #ifdef CONFIG_ARCH_HAS_COPY_MC
 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-static inline void iov_iter_set_copy_mc(struct iov_iter *i)
-{
-       i->copy_mc = true;
-}
-
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
-       return i->copy_mc;
-}
 #else
 #define _copy_mc_to_iter _copy_to_iter
-static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
-       return false;
-}
 #endif
 
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
@@ -355,7 +340,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
        WARN_ON(direction & ~(READ | WRITE));
        *i = (struct iov_iter) {
                .iter_type = ITER_UBUF,
-               .copy_mc = false,
                .data_source = direction,
                .ubuf = buf,
                .count = count,
index da86e106c91d57b2eedfc7bb301867eb8e51c123..2bff5f47ce82f1c6f2774f49d13a647c573034d7 100644 (file)
@@ -249,6 +249,7 @@ struct mctp_route {
 struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
                                     mctp_eid_t daddr);
 
+/* always takes ownership of skb */
 int mctp_local_output(struct sock *sk, struct mctp_route *rt,
                      struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
 
index 956c752ceb3180115eec0b607d81cafe5f038ce8..a763dd327c6ea95d6b94fda1ea2efd8f1784335f 100644 (file)
@@ -276,7 +276,7 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
 }
 
 void flow_offload_route_init(struct flow_offload *flow,
-                            const struct nf_flow_route *route);
+                            struct nf_flow_route *route);
 
 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
 void flow_offload_refresh(struct nf_flowtable *flow_table,
index 934fdb9775519ff45d9455e74a8695bf8a1e4bce..cefe0c4bdae34c91868c22731a3b666f8e16e996 100644 (file)
@@ -238,12 +238,7 @@ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
 
 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
 {
-#ifdef CONFIG_BQL
-       /* Non-BQL migrated drivers will return 0, too. */
-       return dql_avail(&txq->dql);
-#else
-       return 0;
-#endif
+       return netdev_queue_dql_avail(txq);
 }
 
 struct Qdisc_class_ops {
index a43062d4c734bb4e8e855fdd150b8c169a7fe172..8346b0d29542c3d5569b94b35eaa12461f78d62a 100644 (file)
@@ -308,6 +308,9 @@ void switchdev_deferred_process(void);
 int switchdev_port_attr_set(struct net_device *dev,
                            const struct switchdev_attr *attr,
                            struct netlink_ext_ack *extack);
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+                                       enum switchdev_notifier_type nt,
+                                       const struct switchdev_obj *obj);
 int switchdev_port_obj_add(struct net_device *dev,
                           const struct switchdev_obj *obj,
                           struct netlink_ext_ack *extack);
index dd78a11810310e84ef1c1ed8c3e0e274ddd77d7f..f6eba9652d010fbc8482bfd3c99377d631686324 100644 (file)
@@ -2506,7 +2506,7 @@ struct tcp_ulp_ops {
        /* cleanup ulp */
        void (*release)(struct sock *sk);
        /* diagnostic */
-       int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+       int (*get_info)(struct sock *sk, struct sk_buff *skb);
        size_t (*get_info_size)(const struct sock *sk);
        /* clone ulp */
        void (*clone)(const struct request_sock *req, struct sock *newsk,
index 5ec1e71a09de7698616dff799a935da15083deef..c38f4fe5e64cf4f14b668328ab0cfac76ea5d496 100644 (file)
@@ -100,10 +100,6 @@ struct scsi_vpd {
        unsigned char   data[];
 };
 
-enum scsi_vpd_parameters {
-       SCSI_VPD_HEADER_SIZE = 4,
-};
-
 struct scsi_device {
        struct Scsi_Host *host;
        struct request_queue *request_queue;
@@ -208,6 +204,7 @@ struct scsi_device {
        unsigned use_10_for_rw:1; /* first try 10-byte read / write */
        unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
        unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
+       unsigned read_before_ms:1;      /* perform a READ before MODE SENSE */
        unsigned no_report_opcodes:1;   /* no REPORT SUPPORTED OPERATION CODES */
        unsigned no_write_same:1;       /* no WRITE SAME command */
        unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
index a3995925cb057021dc779344d19f7e3724f6df3c..1f4258308b967a9ca8e17bbf61ba4ef07b6d786b 100644 (file)
@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
        TP_ARGS(q),
 
        TP_STRUCT__entry(
-               __string(       dev,            qdisc_dev(q)    )
-               __string(       kind,           q->ops->id      )
-               __field(        u32,            parent          )
-               __field(        u32,            handle          )
+               __string(       dev,            qdisc_dev(q)->name      )
+               __string(       kind,           q->ops->id              )
+               __field(        u32,            parent                  )
+               __field(        u32,            handle                  )
        ),
 
        TP_fast_assign(
-               __assign_str(dev, qdisc_dev(q));
+               __assign_str(dev, qdisc_dev(q)->name);
                __assign_str(kind, q->ops->id);
                __entry->parent = q->parent;
                __entry->handle = q->handle;
@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
        TP_ARGS(q),
 
        TP_STRUCT__entry(
-               __string(       dev,            qdisc_dev(q)    )
-               __string(       kind,           q->ops->id      )
-               __field(        u32,            parent          )
-               __field(        u32,            handle          )
+               __string(       dev,            qdisc_dev(q)->name      )
+               __string(       kind,           q->ops->id              )
+               __field(        u32,            parent                  )
+               __field(        u32,            handle                  )
        ),
 
        TP_fast_assign(
-               __assign_str(dev, qdisc_dev(q));
+               __assign_str(dev, qdisc_dev(q)->name);
                __assign_str(kind, q->ops->id);
                __entry->parent = q->parent;
                __entry->handle = q->handle;
index 0bade1592f34f21690eab41de48595d7aaa24fe4..77d7ff0d5b110da4a05a4a7730d01bbd2d7c581e 100644 (file)
@@ -54,6 +54,20 @@ extern "C" {
  */
 #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX   17
 
+/*
+ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+ *
+ * Query the VRAM BAR size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_USED
+ *
+ * Get remaining VRAM size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_USED 19
+
 struct drm_nouveau_getparam {
        __u64 param;
        __u64 value;
index 9fa3ae324731a6a96d47d81e18566b321f2f0bca..bb0c8a9941164228fef069433194aa2e549a0174 100644 (file)
@@ -831,11 +831,6 @@ struct drm_xe_vm_destroy {
  *  - %DRM_XE_VM_BIND_OP_PREFETCH
  *
  * and the @flags can be:
- *  - %DRM_XE_VM_BIND_FLAG_READONLY
- *  - %DRM_XE_VM_BIND_FLAG_ASYNC
- *  - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
- *    MAP operation immediately rather than deferring the MAP to the page
- *    fault handler.
  *  - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
  *    tables are setup with a special bit which indicates writes are
  *    dropped and all reads return zero. In the future, the NULL flags
@@ -928,9 +923,8 @@ struct drm_xe_vm_bind_op {
        /** @op: Bind operation to perform */
        __u32 op;
 
-#define DRM_XE_VM_BIND_FLAG_READONLY   (1 << 0)
-#define DRM_XE_VM_BIND_FLAG_IMMEDIATE  (1 << 1)
 #define DRM_XE_VM_BIND_FLAG_NULL       (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE   (1 << 3)
        /** @flags: Bind flags */
        __u32 flags;
 
@@ -1045,20 +1039,6 @@ struct drm_xe_exec_queue_create {
 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY              0
 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE             1
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT    2
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE           3
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT           4
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER           5
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY            6
-#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY       7
-/* Monitor 128KB contiguous region with 4K sub-granularity */
-#define     DRM_XE_ACC_GRANULARITY_128K                                0
-/* Monitor 2MB contiguous region with 64KB sub-granularity */
-#define     DRM_XE_ACC_GRANULARITY_2M                          1
-/* Monitor 16MB contiguous region with 512KB sub-granularity */
-#define     DRM_XE_ACC_GRANULARITY_16M                         2
-/* Monitor 64MB contiguous region with 2M sub-granularity */
-#define     DRM_XE_ACC_GRANULARITY_64M                         3
 
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
index c4c53a9ab9595b2a5b95e5b22cafa5bd2cd6fd3c..ff8d21f9e95b7798eaf3e00635050e1631d6697a 100644 (file)
@@ -145,7 +145,7 @@ struct in6_flowlabel_req {
 #define IPV6_TLV_PADN          1
 #define IPV6_TLV_ROUTERALERT   5
 #define IPV6_TLV_CALIPSO       7       /* RFC 5570 */
-#define IPV6_TLV_IOAM          49      /* TEMPORARY IANA allocation for IOAM */
+#define IPV6_TLV_IOAM          49      /* RFC 9486 */
 #define IPV6_TLV_JUMBO         194
 #define IPV6_TLV_HAO           201     /* home address option */
 
index 8426d59cc634d6dc86cc9954072a425b277c24ea..bee58f7468c36a8f030c73f1c16bbfb2ab9acab2 100644 (file)
@@ -876,14 +876,14 @@ config CC_IMPLICIT_FALLTHROUGH
        default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
-# Currently, disable gcc-11+ array-bounds globally.
+# Currently, disable gcc-10+ array-bounds globally.
 # It's still broken in gcc-13, so no upper bound yet.
-config GCC11_NO_ARRAY_BOUNDS
+config GCC10_NO_ARRAY_BOUNDS
        def_bool y
 
 config CC_NO_ARRAY_BOUNDS
        bool
-       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+       default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
 
 # Currently, disable -Wstringop-overflow for GCC globally.
 config GCC_NO_STRINGOP_OVERFLOW
index 8a0bb80fe48a344964e4029fec5e895ee512babf..ef82ffc90cbe9d7aeab50c4856b45a52621a90a9 100644 (file)
@@ -178,7 +178,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
                                    void **frames, int n,
                                    struct xdp_cpumap_stats *stats)
 {
-       struct xdp_rxq_info rxq;
+       struct xdp_rxq_info rxq = {};
        struct xdp_buff xdp;
        int i, nframes = 0;
 
index be72824f32b2cc5e3dfcb8d2bd613b86116a498c..d19cd863d294ea1b589aeae327ae6b10e7211a93 100644 (file)
@@ -1101,6 +1101,7 @@ struct bpf_hrtimer {
        struct bpf_prog *prog;
        void __rcu *callback_fn;
        void *value;
+       struct rcu_head rcu;
 };
 
 /* the actual struct hidden inside uapi struct bpf_timer */
@@ -1332,6 +1333,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
 
        if (in_nmi())
                return -EOPNOTSUPP;
+       rcu_read_lock();
        __bpf_spin_lock_irqsave(&timer->lock);
        t = timer->timer;
        if (!t) {
@@ -1353,6 +1355,7 @@ out:
         * if it was running.
         */
        ret = ret ?: hrtimer_cancel(&t->timer);
+       rcu_read_unlock();
        return ret;
 }
 
@@ -1407,7 +1410,7 @@ out:
         */
        if (this_cpu_read(hrtimer_running) != t)
                hrtimer_cancel(&t->timer);
-       kfree(t);
+       kfree_rcu(t, rcu);
 }
 
 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
index e5c3500443c6e71f4fca7a6403dc33aa054d6fd8..ec4e97c61eefe667955e984b934f354b9162b52d 100644 (file)
@@ -978,6 +978,8 @@ __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
        BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) !=
                                        __alignof__(struct bpf_iter_task));
 
+       kit->pos = NULL;
+
        switch (flags) {
        case BPF_TASK_ITER_ALL_THREADS:
        case BPF_TASK_ITER_ALL_PROCS:
index 65f598694d550359f2b926ef26ae30d0c80c6f69..ddea9567f755946501cd2dc92aef56057ddee41d 100644 (file)
@@ -5227,7 +5227,9 @@ BTF_ID(struct, prog_test_ref_kfunc)
 #ifdef CONFIG_CGROUPS
 BTF_ID(struct, cgroup)
 #endif
+#ifdef CONFIG_BPF_JIT
 BTF_ID(struct, bpf_cpumask)
+#endif
 BTF_ID(struct, task_struct)
 BTF_SET_END(rcu_protected_types)
 
@@ -16600,6 +16602,9 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
 {
        int i;
 
+       if (old->callback_depth > cur->callback_depth)
+               return false;
+
        for (i = 0; i < MAX_BPF_REG; i++)
                if (!regsafe(env, &old->regs[i], &cur->regs[i],
                             &env->idmap_scratch, exact))
index ba36c073304a3eee081b770b12dacb0e5b1a60cd..927bef3a598ad5ce4b0fee8dfce70d0a89c1964c 100644 (file)
@@ -2562,7 +2562,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
                update_partition_sd_lb(cs, old_prs);
 out_free:
        free_cpumasks(NULL, &tmp);
-       return 0;
+       return retval;
 }
 
 /**
@@ -2598,9 +2598,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
                return 0;
 
-       if (alloc_cpumasks(NULL, &tmp))
-               return -ENOMEM;
-
        if (*buf)
                compute_effective_exclusive_cpumask(trialcs, NULL);
 
@@ -2615,6 +2612,9 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (retval)
                return retval;
 
+       if (alloc_cpumasks(NULL, &tmp))
+               return -ENOMEM;
+
        if (old_prs) {
                if (cpumask_empty(trialcs->effective_xcpus)) {
                        invalidate = true;
index 2ad881d07752c15f60a4c14bee21051117d5aeb2..4e715b9b278e7fd7fbea70110f5a829635a4bc01 100644 (file)
        | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK                     \
        | MEMBARRIER_CMD_GET_REGISTRATIONS)
 
+static DEFINE_MUTEX(membarrier_ipi_mutex);
+#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
+
 static void ipi_mb(void *info)
 {
        smp_mb();       /* IPIs should be serializing but paranoid. */
@@ -259,6 +262,7 @@ static int membarrier_global_expedited(void)
        if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
                return -ENOMEM;
 
+       SERIALIZE_IPI();
        cpus_read_lock();
        rcu_read_lock();
        for_each_online_cpu(cpu) {
@@ -347,6 +351,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
        if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
                return -ENOMEM;
 
+       SERIALIZE_IPI();
        cpus_read_lock();
 
        if (cpu_id >= 0) {
@@ -460,6 +465,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
         * between threads which are users of @mm has its membarrier state
         * updated.
         */
+       SERIALIZE_IPI();
        cpus_read_lock();
        rcu_read_lock();
        for_each_online_cpu(cpu) {
index 6cd2a4e3afb8fb6045dbf27543a67147dea20900..9ff0182458408438ddc5d7c720d866d0adc02dfd 100644 (file)
@@ -189,9 +189,6 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
 {
        int size;
 
-       if (num <= 0)
-               return -EINVAL;
-
        if (!fp->exit_handler) {
                fp->rethook = NULL;
                return 0;
@@ -199,15 +196,16 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
 
        /* Initialize rethook if needed */
        if (fp->nr_maxactive)
-               size = fp->nr_maxactive;
+               num = fp->nr_maxactive;
        else
-               size = num * num_possible_cpus() * 2;
-       if (size <= 0)
+               num *= num_possible_cpus() * 2;
+       if (num <= 0)
                return -EINVAL;
 
+       size = sizeof(struct fprobe_rethook_node) + fp->entry_data_size;
+
        /* Initialize rethook */
-       fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler,
-                               sizeof(struct fprobe_rethook_node), size);
+       fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler, size, num);
        if (IS_ERR(fp->rethook))
                return PTR_ERR(fp->rethook);
 
index fd4bfe3ecf014f6b3c83f9a7fa043b7df44dac32..aa332ace108b18169b3c67198a4fc0b621670b00 100644 (file)
@@ -384,7 +384,6 @@ struct rb_irq_work {
        struct irq_work                 work;
        wait_queue_head_t               waiters;
        wait_queue_head_t               full_waiters;
-       long                            wait_index;
        bool                            waiters_pending;
        bool                            full_waiters_pending;
        bool                            wakeup_full;
@@ -756,8 +755,19 @@ static void rb_wake_up_waiters(struct irq_work *work)
 
        wake_up_all(&rbwork->waiters);
        if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
+               /* Only cpu_buffer sets the above flags */
+               struct ring_buffer_per_cpu *cpu_buffer =
+                       container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
+
+               /* Called from interrupt context */
+               raw_spin_lock(&cpu_buffer->reader_lock);
                rbwork->wakeup_full = false;
                rbwork->full_waiters_pending = false;
+
+               /* Waking up all waiters, they will reset the shortest full */
+               cpu_buffer->shortest_full = 0;
+               raw_spin_unlock(&cpu_buffer->reader_lock);
+
                wake_up_all(&rbwork->full_waiters);
        }
 }
@@ -798,14 +808,40 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
                rbwork = &cpu_buffer->irq_work;
        }
 
-       rbwork->wait_index++;
-       /* make sure the waiters see the new index */
-       smp_wmb();
-
        /* This can be called in any context */
        irq_work_queue(&rbwork->work);
 }
 
+static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       bool ret = false;
+
+       /* Reads of all CPUs always waits for any data */
+       if (cpu == RING_BUFFER_ALL_CPUS)
+               return !ring_buffer_empty(buffer);
+
+       cpu_buffer = buffer->buffers[cpu];
+
+       if (!ring_buffer_empty_cpu(buffer, cpu)) {
+               unsigned long flags;
+               bool pagebusy;
+
+               if (!full)
+                       return true;
+
+               raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+               pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+               ret = !pagebusy && full_hit(buffer, cpu, full);
+
+               if (!cpu_buffer->shortest_full ||
+                   cpu_buffer->shortest_full > full)
+                       cpu_buffer->shortest_full = full;
+               raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+       }
+       return ret;
+}
+
 /**
  * ring_buffer_wait - wait for input to the ring buffer
  * @buffer: buffer to wait on
@@ -821,7 +857,6 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
        struct ring_buffer_per_cpu *cpu_buffer;
        DEFINE_WAIT(wait);
        struct rb_irq_work *work;
-       long wait_index;
        int ret = 0;
 
        /*
@@ -840,81 +875,54 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
                work = &cpu_buffer->irq_work;
        }
 
-       wait_index = READ_ONCE(work->wait_index);
-
-       while (true) {
-               if (full)
-                       prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
-               else
-                       prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
-
-               /*
-                * The events can happen in critical sections where
-                * checking a work queue can cause deadlocks.
-                * After adding a task to the queue, this flag is set
-                * only to notify events to try to wake up the queue
-                * using irq_work.
-                *
-                * We don't clear it even if the buffer is no longer
-                * empty. The flag only causes the next event to run
-                * irq_work to do the work queue wake up. The worse
-                * that can happen if we race with !trace_empty() is that
-                * an event will cause an irq_work to try to wake up
-                * an empty queue.
-                *
-                * There's no reason to protect this flag either, as
-                * the work queue and irq_work logic will do the necessary
-                * synchronization for the wake ups. The only thing
-                * that is necessary is that the wake up happens after
-                * a task has been queued. It's OK for spurious wake ups.
-                */
-               if (full)
-                       work->full_waiters_pending = true;
-               else
-                       work->waiters_pending = true;
-
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-               if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
-                       break;
-
-               if (cpu != RING_BUFFER_ALL_CPUS &&
-                   !ring_buffer_empty_cpu(buffer, cpu)) {
-                       unsigned long flags;
-                       bool pagebusy;
-                       bool done;
-
-                       if (!full)
-                               break;
-
-                       raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-                       pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
-                       done = !pagebusy && full_hit(buffer, cpu, full);
+       if (full)
+               prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+       else
+               prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 
-                       if (!cpu_buffer->shortest_full ||
-                           cpu_buffer->shortest_full > full)
-                               cpu_buffer->shortest_full = full;
-                       raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-                       if (done)
-                               break;
-               }
+       /*
+        * The events can happen in critical sections where
+        * checking a work queue can cause deadlocks.
+        * After adding a task to the queue, this flag is set
+        * only to notify events to try to wake up the queue
+        * using irq_work.
+        *
+        * We don't clear it even if the buffer is no longer
+        * empty. The flag only causes the next event to run
+        * irq_work to do the work queue wake up. The worse
+        * that can happen if we race with !trace_empty() is that
+        * an event will cause an irq_work to try to wake up
+        * an empty queue.
+        *
+        * There's no reason to protect this flag either, as
+        * the work queue and irq_work logic will do the necessary
+        * synchronization for the wake ups. The only thing
+        * that is necessary is that the wake up happens after
+        * a task has been queued. It's OK for spurious wake ups.
+        */
+       if (full)
+               work->full_waiters_pending = true;
+       else
+               work->waiters_pending = true;
 
-               schedule();
+       if (rb_watermark_hit(buffer, cpu, full))
+               goto out;
 
-               /* Make sure to see the new wait index */
-               smp_rmb();
-               if (wait_index != work->wait_index)
-                       break;
+       if (signal_pending(current)) {
+               ret = -EINTR;
+               goto out;
        }
 
+       schedule();
+ out:
        if (full)
                finish_wait(&work->full_waiters, &wait);
        else
                finish_wait(&work->waiters, &wait);
 
+       if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
+               ret = -EINTR;
+
        return ret;
 }
 
@@ -937,28 +945,33 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table, int full)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       struct rb_irq_work *work;
+       struct rb_irq_work *rbwork;
 
        if (cpu == RING_BUFFER_ALL_CPUS) {
-               work = &buffer->irq_work;
+               rbwork = &buffer->irq_work;
                full = 0;
        } else {
                if (!cpumask_test_cpu(cpu, buffer->cpumask))
                        return EPOLLERR;
 
                cpu_buffer = buffer->buffers[cpu];
-               work = &cpu_buffer->irq_work;
+               rbwork = &cpu_buffer->irq_work;
        }
 
        if (full) {
-               poll_wait(filp, &work->full_waiters, poll_table);
-               work->full_waiters_pending = true;
+               unsigned long flags;
+
+               poll_wait(filp, &rbwork->full_waiters, poll_table);
+
+               raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+               rbwork->full_waiters_pending = true;
                if (!cpu_buffer->shortest_full ||
                    cpu_buffer->shortest_full > full)
                        cpu_buffer->shortest_full = full;
+               raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
        } else {
-               poll_wait(filp, &work->waiters, poll_table);
-               work->waiters_pending = true;
+               poll_wait(filp, &rbwork->waiters, poll_table);
+               rbwork->waiters_pending = true;
        }
 
        /*
@@ -5877,6 +5890,10 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
        if (psize <= BUF_PAGE_HDR_SIZE)
                return -EINVAL;
 
+       /* Size of a subbuf cannot be greater than the write counter */
+       if (psize > RB_WRITE_MASK + 1)
+               return -EINVAL;
+
        old_order = buffer->subbuf_order;
        old_size = buffer->subbuf_size;
 
index 8198bfc54b58d9729ab6f6318eb9720a32d40375..c9c8983073485bb3645062760ffc4b0dcef94e9d 100644 (file)
@@ -7293,6 +7293,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
+#define TRACE_MARKER_MAX_SIZE          4096
+
 static ssize_t
 tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
@@ -7320,6 +7322,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if ((ssize_t)cnt < 0)
                return -EINVAL;
 
+       if (cnt > TRACE_MARKER_MAX_SIZE)
+               cnt = TRACE_MARKER_MAX_SIZE;
+
        meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
  again:
        size = cnt + meta_size;
@@ -7328,11 +7333,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (cnt < FAULTED_SIZE)
                size += FAULTED_SIZE - cnt;
 
-       if (size > TRACE_SEQ_BUFFER_SIZE) {
-               cnt -= size - TRACE_SEQ_BUFFER_SIZE;
-               goto again;
-       }
-
        buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                            tracing_gen_ctx());
@@ -8393,6 +8393,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        return size;
 }
 
+static int tracing_buffers_flush(struct file *file, fl_owner_t id)
+{
+       struct ftrace_buffer_info *info = file->private_data;
+       struct trace_iterator *iter = &info->iter;
+
+       iter->wait_index++;
+       /* Make sure the waiters see the new wait_index */
+       smp_wmb();
+
+       ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+
+       return 0;
+}
+
 static int tracing_buffers_release(struct inode *inode, struct file *file)
 {
        struct ftrace_buffer_info *info = file->private_data;
@@ -8404,12 +8418,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
 
        __trace_array_put(iter->tr);
 
-       iter->wait_index++;
-       /* Make sure the waiters see the new wait_index */
-       smp_wmb();
-
-       ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
-
        if (info->spare)
                ring_buffer_free_read_page(iter->array_buffer->buffer,
                                           info->spare_cpu, info->spare);
@@ -8625,6 +8633,7 @@ static const struct file_operations tracing_buffers_fops = {
        .read           = tracing_buffers_read,
        .poll           = tracing_buffers_poll,
        .release        = tracing_buffers_release,
+       .flush          = tracing_buffers_flush,
        .splice_read    = tracing_buffers_splice_read,
        .unlocked_ioctl = tracing_buffers_ioctl,
        .llseek         = no_llseek,
index 3e7fa44dc2b24850f8b836f6bd223807fbcf0c48..d8b302d0108302d9ef2debe735c4b7778a217f90 100644 (file)
@@ -1587,12 +1587,11 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
 {
        struct print_entry *field;
        struct trace_seq *s = &iter->seq;
-       int max = iter->ent_size - offsetof(struct print_entry, buf);
 
        trace_assign_type(field, iter->ent);
 
        seq_print_ip_sym(s, field->ip, flags);
-       trace_seq_printf(s, ": %.*s", max, field->buf);
+       trace_seq_printf(s, ": %s", field->buf);
 
        return trace_handle_return(s);
 }
@@ -1601,11 +1600,10 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
                                         struct trace_event *event)
 {
        struct print_entry *field;
-       int max = iter->ent_size - offsetof(struct print_entry, buf);
 
        trace_assign_type(field, iter->ent);
 
-       trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
+       trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
 
        return trace_handle_return(&iter->seq);
 }
index 975a07f9f1cc08838d272f83d5f04a85ff2f5cd2..ef36b829ae1f55bcfe4c58b567ded4fc348db0af 100644 (file)
@@ -2235,6 +2235,7 @@ config TEST_DIV64
 config TEST_IOV_ITER
        tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
        depends on KUNIT
+       depends on MMU
        default KUNIT_ALL_TESTS
        help
          Enable this to turn on testing of the operation of the I/O iterator
index 225bb77014600f796e972a9c0f03638c23750a06..bf70850035c76f468c7c0af023454bf5bc6716e3 100644 (file)
@@ -215,7 +215,7 @@ static const u32 init_sums_no_overflow[] = {
        0xffff0000, 0xfffffffb,
 };
 
-static const __sum16 expected_csum_ipv6_magic[] = {
+static const u16 expected_csum_ipv6_magic[] = {
        0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f,  0x1034, 0x8422, 0x6fc0,
        0xd2f6, 0xbeb5, 0x9d3,  0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
        0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
@@ -241,7 +241,7 @@ static const __sum16 expected_csum_ipv6_magic[] = {
        0x3845, 0x1014
 };
 
-static const __sum16 expected_fast_csum[] = {
+static const u16 expected_fast_csum[] = {
        0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e,  0xe902, 0xa5e9, 0x87a5, 0x7187,
        0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
        0xedbe, 0xabee, 0x6aac, 0xe6b,  0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
@@ -577,7 +577,8 @@ static void test_csum_no_carry_inputs(struct kunit *test)
 
 static void test_ip_fast_csum(struct kunit *test)
 {
-       __sum16 csum_result, expected;
+       __sum16 csum_result;
+       u16 expected;
 
        for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
                for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
@@ -586,7 +587,7 @@ static void test_ip_fast_csum(struct kunit *test)
                                expected_fast_csum[(len - IPv4_MIN_WORDS) *
                                                   NUM_IP_FAST_CSUM_TESTS +
                                                   index];
-                       CHECK_EQ(expected, csum_result);
+                       CHECK_EQ(to_sum16(expected), csum_result);
                }
        }
 }
@@ -598,7 +599,7 @@ static void test_csum_ipv6_magic(struct kunit *test)
        const struct in6_addr *daddr;
        unsigned int len;
        unsigned char proto;
-       unsigned int csum;
+       __wsum csum;
 
        const int daddr_offset = sizeof(struct in6_addr);
        const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
@@ -611,10 +612,10 @@ static void test_csum_ipv6_magic(struct kunit *test)
                saddr = (const struct in6_addr *)(random_buf + i);
                daddr = (const struct in6_addr *)(random_buf + i +
                                                  daddr_offset);
-               len = *(unsigned int *)(random_buf + i + len_offset);
+               len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
                proto = *(random_buf + i + proto_offset);
-               csum = *(unsigned int *)(random_buf + i + csum_offset);
-               CHECK_EQ(expected_csum_ipv6_magic[i],
+               csum = *(__wsum *)(random_buf + i + csum_offset);
+               CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
                         csum_ipv6_magic(saddr, daddr, len, proto, csum));
        }
 #endif /* !CONFIG_NET */
index e0aa6b440ca5f4a4f3560100985e39c068c7a6a8..cf2eb2b2f983797190a4000f5a6a388ac23aae36 100644 (file)
@@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
        WARN_ON(direction & ~(READ | WRITE));
        *i = (struct iov_iter) {
                .iter_type = ITER_IOVEC,
-               .copy_mc = false,
                .nofault = false,
                .data_source = direction,
                .__iov = iov,
@@ -244,27 +243,9 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
 #endif /* CONFIG_ARCH_HAS_COPY_MC */
 
-static __always_inline
-size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
-                          size_t len, void *to, void *priv2)
-{
-       return copy_mc_to_kernel(to + progress, iter_from, len);
-}
-
-static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
-{
-       if (unlikely(i->count < bytes))
-               bytes = i->count;
-       if (unlikely(!bytes))
-               return 0;
-       return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
-}
-
 static __always_inline
 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
-       if (unlikely(iov_iter_is_copy_mc(i)))
-               return __copy_from_iter_mc(addr, bytes, i);
        return iterate_and_advance(i, bytes, addr,
                                   copy_from_user_iter, memcpy_from_iter);
 }
@@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
        WARN_ON(direction & ~(READ | WRITE));
        *i = (struct iov_iter){
                .iter_type = ITER_KVEC,
-               .copy_mc = false,
                .data_source = direction,
                .kvec = kvec,
                .nr_segs = nr_segs,
@@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
        WARN_ON(direction & ~(READ | WRITE));
        *i = (struct iov_iter){
                .iter_type = ITER_BVEC,
-               .copy_mc = false,
                .data_source = direction,
                .bvec = bvec,
                .nr_segs = nr_segs,
@@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
        BUG_ON(direction & ~1);
        *i = (struct iov_iter) {
                .iter_type = ITER_XARRAY,
-               .copy_mc = false,
                .data_source = direction,
                .xarray = xarray,
                .xarray_start = start,
@@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
        BUG_ON(direction != READ);
        *i = (struct iov_iter){
                .iter_type = ITER_DISCARD,
-               .copy_mc = false,
                .data_source = false,
                .count = count,
                .iov_offset = 0
index ed2ab43e1b22c0156e5d361c6bfa7eb745759232..be9c576b6e2dc6d35d67d31f15014ab747f478ce 100644 (file)
@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
        [NLA_S16]       = sizeof(s16),
        [NLA_S32]       = sizeof(s32),
        [NLA_S64]       = sizeof(s64),
+       [NLA_BE16]      = sizeof(__be16),
+       [NLA_BE32]      = sizeof(__be32),
 };
 
 static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_S16]       = sizeof(s16),
        [NLA_S32]       = sizeof(s32),
        [NLA_S64]       = sizeof(s64),
+       [NLA_BE16]      = sizeof(__be16),
+       [NLA_BE32]      = sizeof(__be32),
 };
 
 /*
index 5caa1f566553843911ffdf2edafd32ee70277ea8..4a7055a63d9f8a8a6723563fd8a30115653eea83 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
+#include <linux/poison.h>
 #include <linux/printk.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
                               STACK_DEPOT_EXTRA_BITS)
-#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
-/*
- * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
- * traces. As KMSAN does not support evicting stack traces from the stack
- * depot, the stack depot capacity might be reached quickly with large stack
- * records. Adjust the maximum number of stack depot pools for this case.
- */
-#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
-#else
 #define DEPOT_POOLS_CAP 8192
-#endif
 #define DEPOT_MAX_POOLS \
        (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
         (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
@@ -93,9 +84,6 @@ struct stack_record {
        };
 };
 
-#define DEPOT_STACK_RECORD_SIZE \
-       ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
-
 static bool stack_depot_disabled;
 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
 static bool __stack_depot_early_init_passed __initdata;
@@ -121,32 +109,31 @@ static void *stack_pools[DEPOT_MAX_POOLS];
 static void *new_pool;
 /* Number of pools in stack_pools. */
 static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
 /* Freelist of stack records within stack_pools. */
 static LIST_HEAD(free_stacks);
-/*
- * Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool. This flag marks whether this extra pool
- * needs to be allocated. It has the value 0 when either an extra pool is not
- * yet allocated or if the limit on the number of pools is reached.
- */
-static bool new_pool_required = true;
 /* The lock must be held when performing pool or freelist modifications. */
 static DEFINE_RAW_SPINLOCK(pool_lock);
 
 /* Statistics counters for debugfs. */
 enum depot_counter_id {
-       DEPOT_COUNTER_ALLOCS,
-       DEPOT_COUNTER_FREES,
-       DEPOT_COUNTER_INUSE,
+       DEPOT_COUNTER_REFD_ALLOCS,
+       DEPOT_COUNTER_REFD_FREES,
+       DEPOT_COUNTER_REFD_INUSE,
        DEPOT_COUNTER_FREELIST_SIZE,
+       DEPOT_COUNTER_PERSIST_COUNT,
+       DEPOT_COUNTER_PERSIST_BYTES,
        DEPOT_COUNTER_COUNT,
 };
 static long counters[DEPOT_COUNTER_COUNT];
 static const char *const counter_names[] = {
-       [DEPOT_COUNTER_ALLOCS]          = "allocations",
-       [DEPOT_COUNTER_FREES]           = "frees",
-       [DEPOT_COUNTER_INUSE]           = "in_use",
+       [DEPOT_COUNTER_REFD_ALLOCS]     = "refcounted_allocations",
+       [DEPOT_COUNTER_REFD_FREES]      = "refcounted_frees",
+       [DEPOT_COUNTER_REFD_INUSE]      = "refcounted_in_use",
        [DEPOT_COUNTER_FREELIST_SIZE]   = "freelist_size",
+       [DEPOT_COUNTER_PERSIST_COUNT]   = "persistent_count",
+       [DEPOT_COUNTER_PERSIST_BYTES]   = "persistent_bytes",
 };
 static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
 
@@ -294,48 +281,52 @@ out_unlock:
 EXPORT_SYMBOL_GPL(stack_depot_init);
 
 /*
- * Initializes new stack depot @pool, release all its entries to the freelist,
- * and update the list of pools.
+ * Initializes new stack pool, and updates the list of pools.
  */
-static void depot_init_pool(void *pool)
+static bool depot_init_pool(void **prealloc)
 {
-       int offset;
-
        lockdep_assert_held(&pool_lock);
 
-       /* Initialize handles and link stack records into the freelist. */
-       for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
-            offset += DEPOT_STACK_RECORD_SIZE) {
-               struct stack_record *stack = pool + offset;
-
-               stack->handle.pool_index = pools_num;
-               stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
-               stack->handle.extra = 0;
-
-               /*
-                * Stack traces of size 0 are never saved, and we can simply use
-                * the size field as an indicator if this is a new unused stack
-                * record in the freelist.
-                */
-               stack->size = 0;
+       if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+               /* Bail out if we reached the pool limit. */
+               WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+               WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+               WARN_ONCE(1, "Stack depot reached limit capacity");
+               return false;
+       }
 
-               INIT_LIST_HEAD(&stack->hash_list);
-               /*
-                * Add to the freelist front to prioritize never-used entries:
-                * required in case there are entries in the freelist, but their
-                * RCU cookie still belongs to the current RCU grace period
-                * (there can still be concurrent readers).
-                */
-               list_add(&stack->free_list, &free_stacks);
-               counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+       if (!new_pool && *prealloc) {
+               /* We have preallocated memory, use it. */
+               WRITE_ONCE(new_pool, *prealloc);
+               *prealloc = NULL;
        }
 
+       if (!new_pool)
+               return false; /* new_pool and *prealloc are NULL */
+
        /* Save reference to the pool to be used by depot_fetch_stack(). */
-       stack_pools[pools_num] = pool;
+       stack_pools[pools_num] = new_pool;
+
+       /*
+        * Stack depot tries to keep an extra pool allocated even before it runs
+        * out of space in the currently used pool.
+        *
+        * To indicate that a new preallocation is needed new_pool is reset to
+        * NULL; do not reset to NULL if we have reached the maximum number of
+        * pools.
+        */
+       if (pools_num < DEPOT_MAX_POOLS)
+               WRITE_ONCE(new_pool, NULL);
+       else
+               WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
 
        /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
        WRITE_ONCE(pools_num, pools_num + 1);
        ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+       pool_offset = 0;
+
+       return true;
 }
 
 /* Keeps the preallocated memory to be used for a new stack depot pool. */
@@ -347,63 +338,51 @@ static void depot_keep_new_pool(void **prealloc)
         * If a new pool is already saved or the maximum number of
         * pools is reached, do not use the preallocated memory.
         */
-       if (!new_pool_required)
+       if (new_pool)
                return;
 
-       /*
-        * Use the preallocated memory for the new pool
-        * as long as we do not exceed the maximum number of pools.
-        */
-       if (pools_num < DEPOT_MAX_POOLS) {
-               new_pool = *prealloc;
-               *prealloc = NULL;
-       }
-
-       /*
-        * At this point, either a new pool is kept or the maximum
-        * number of pools is reached. In either case, take note that
-        * keeping another pool is not required.
-        */
-       WRITE_ONCE(new_pool_required, false);
+       WRITE_ONCE(new_pool, *prealloc);
+       *prealloc = NULL;
 }
 
 /*
- * Try to initialize a new stack depot pool from either a previous or the
- * current pre-allocation, and release all its entries to the freelist.
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
  */
-static bool depot_try_init_pool(void **prealloc)
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
 {
+       struct stack_record *stack;
+       void *current_pool;
+       u32 pool_index;
+
        lockdep_assert_held(&pool_lock);
 
-       /* Check if we have a new pool saved and use it. */
-       if (new_pool) {
-               depot_init_pool(new_pool);
-               new_pool = NULL;
+       if (pool_offset + size > DEPOT_POOL_SIZE) {
+               if (!depot_init_pool(prealloc))
+                       return NULL;
+       }
 
-               /* Take note that we might need a new new_pool. */
-               if (pools_num < DEPOT_MAX_POOLS)
-                       WRITE_ONCE(new_pool_required, true);
+       if (WARN_ON_ONCE(pools_num < 1))
+               return NULL;
+       pool_index = pools_num - 1;
+       current_pool = stack_pools[pool_index];
+       if (WARN_ON_ONCE(!current_pool))
+               return NULL;
 
-               return true;
-       }
+       stack = current_pool + pool_offset;
 
-       /* Bail out if we reached the pool limit. */
-       if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
-               WARN_ONCE(1, "Stack depot reached limit capacity");
-               return false;
-       }
+       /* Pre-initialize handle once. */
+       stack->handle.pool_index = pool_index;
+       stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+       stack->handle.extra = 0;
+       INIT_LIST_HEAD(&stack->hash_list);
 
-       /* Check if we have preallocated memory and use it. */
-       if (*prealloc) {
-               depot_init_pool(*prealloc);
-               *prealloc = NULL;
-               return true;
-       }
+       pool_offset += size;
 
-       return false;
+       return stack;
 }
 
-/* Try to find next free usable entry. */
+/* Try to find next free usable entry from the freelist. */
 static struct stack_record *depot_pop_free(void)
 {
        struct stack_record *stack;
@@ -420,7 +399,7 @@ static struct stack_record *depot_pop_free(void)
         * check the first entry.
         */
        stack = list_first_entry(&free_stacks, struct stack_record, free_list);
-       if (stack->size && !poll_state_synchronize_rcu(stack->rcu_state))
+       if (!poll_state_synchronize_rcu(stack->rcu_state))
                return NULL;
 
        list_del(&stack->free_list);
@@ -429,48 +408,73 @@ static struct stack_record *depot_pop_free(void)
        return stack;
 }
 
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
+{
+       const size_t used = flex_array_size(s, entries, nr_entries);
+       const size_t unused = sizeof(s->entries) - used;
+
+       WARN_ON_ONCE(sizeof(s->entries) < used);
+
+       return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
+}
+
 /* Allocates a new stack in a stack depot pool. */
 static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
 {
-       struct stack_record *stack;
+       struct stack_record *stack = NULL;
+       size_t record_size;
 
        lockdep_assert_held(&pool_lock);
 
        /* This should already be checked by public API entry points. */
-       if (WARN_ON_ONCE(!size))
+       if (WARN_ON_ONCE(!nr_entries))
                return NULL;
 
-       /* Check if we have a stack record to save the stack trace. */
-       stack = depot_pop_free();
-       if (!stack) {
-               /* No usable entries on the freelist - try to refill the freelist. */
-               if (!depot_try_init_pool(prealloc))
-                       return NULL;
+       /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+       if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+               nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
+
+       if (flags & STACK_DEPOT_FLAG_GET) {
+               /*
+                * Evictable entries have to allocate the max. size so they may
+                * safely be re-used by differently sized allocations.
+                */
+               record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
                stack = depot_pop_free();
-               if (WARN_ON(!stack))
-                       return NULL;
+       } else {
+               record_size = depot_stack_record_size(stack, nr_entries);
        }
 
-       /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
-       if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
-               size = CONFIG_STACKDEPOT_MAX_FRAMES;
+       if (!stack) {
+               stack = depot_pop_free_pool(prealloc, record_size);
+               if (!stack)
+                       return NULL;
+       }
 
        /* Save the stack trace. */
        stack->hash = hash;
-       stack->size = size;
-       /* stack->handle is already filled in by depot_init_pool(). */
-       refcount_set(&stack->count, 1);
-       memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+       stack->size = nr_entries;
+       /* stack->handle is already filled in by depot_pop_free_pool(). */
+       memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+       if (flags & STACK_DEPOT_FLAG_GET) {
+               refcount_set(&stack->count, 1);
+               counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+               counters[DEPOT_COUNTER_REFD_INUSE]++;
+       } else {
+               /* Warn on attempts to switch to refcounting this entry. */
+               refcount_set(&stack->count, REFCOUNT_SATURATED);
+               counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+               counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
+       }
 
        /*
         * Let KMSAN know the stored stack record is initialized. This shall
         * prevent false positive reports if instrumented code accesses it.
         */
-       kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
+       kmsan_unpoison_memory(stack, record_size);
 
-       counters[DEPOT_COUNTER_ALLOCS]++;
-       counters[DEPOT_COUNTER_INUSE]++;
        return stack;
 }
 
@@ -538,8 +542,8 @@ static void depot_free_stack(struct stack_record *stack)
        list_add_tail(&stack->free_list, &free_stacks);
 
        counters[DEPOT_COUNTER_FREELIST_SIZE]++;
-       counters[DEPOT_COUNTER_FREES]++;
-       counters[DEPOT_COUNTER_INUSE]--;
+       counters[DEPOT_COUNTER_REFD_FREES]++;
+       counters[DEPOT_COUNTER_REFD_INUSE]--;
 
        printk_deferred_exit();
        raw_spin_unlock_irqrestore(&pool_lock, flags);
@@ -660,7 +664,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
         * Allocate memory for a new pool if required now:
         * we won't be able to do that under the lock.
         */
-       if (unlikely(can_alloc && READ_ONCE(new_pool_required))) {
+       if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
                /*
                 * Zero out zone modifiers, as we don't have specific zone
                 * requirements. Keep the flags related to allocation in atomic
@@ -681,7 +685,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
        found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
        if (!found) {
                struct stack_record *new =
-                       depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+                       depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
 
                if (new) {
                        /*
index 4add68d40e8d99c72bd6af648510aadd587dddfc..b961db601df4194f4cc69535bf154bef4a2624f0 100644 (file)
@@ -2723,16 +2723,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
                unsigned int alloc_flags, const struct alloc_context *ac,
                enum compact_priority prio, struct page **capture)
 {
-       int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
        struct zoneref *z;
        struct zone *zone;
        enum compact_result rc = COMPACT_SKIPPED;
 
-       /*
-        * Check if the GFP flags allow compaction - GFP_NOIO is really
-        * tricky context because the migration might require IO
-        */
-       if (!may_perform_io)
+       if (!gfp_compaction_allowed(gfp_mask))
                return COMPACT_SKIPPED;
 
        trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
index 36f6f1d21ff069de12575a4f0d932e0dfc316c11..5b325749fc12597ddd273ae605bdb1c04a93f99e 100644 (file)
@@ -1026,6 +1026,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
        damon_for_each_scheme(s, c) {
                struct damos_quota *quota = &s->quota;
 
+               if (c->passed_sample_intervals != s->next_apply_sis)
+                       continue;
+
                if (!s->wmarks.activated)
                        continue;
 
@@ -1176,10 +1179,6 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
                if (c->passed_sample_intervals != s->next_apply_sis)
                        continue;
 
-               s->next_apply_sis +=
-                       (s->apply_interval_us ? s->apply_interval_us :
-                        c->attrs.aggr_interval) / sample_interval;
-
                if (!s->wmarks.activated)
                        continue;
 
@@ -1195,6 +1194,14 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
                damon_for_each_region_safe(r, next_r, t)
                        damon_do_apply_schemes(c, t, r);
        }
+
+       damon_for_each_scheme(s, c) {
+               if (c->passed_sample_intervals != s->next_apply_sis)
+                       continue;
+               s->next_apply_sis +=
+                       (s->apply_interval_us ? s->apply_interval_us :
+                        c->attrs.aggr_interval) / sample_interval;
+       }
 }
 
 /*
index f2e5f9431892eb207bec1da87224282e3de27371..3de2916a65c38c372b5ed8472b7a87b34026aed7 100644 (file)
@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
        return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
 }
 
+static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
+               struct damos_quota *src)
+{
+       dst->total_charged_sz = src->total_charged_sz;
+       dst->total_charged_ns = src->total_charged_ns;
+       dst->charged_sz = src->charged_sz;
+       dst->charged_from = src->charged_from;
+       dst->charge_target_from = src->charge_target_from;
+       dst->charge_addr_from = src->charge_addr_from;
+}
+
 static int damon_lru_sort_apply_parameters(void)
 {
-       struct damos *scheme;
+       struct damos *scheme, *hot_scheme, *cold_scheme;
+       struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
        unsigned int hot_thres, cold_thres;
        int err = 0;
 
@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
        if (err)
                return err;
 
+       damon_for_each_scheme(scheme, ctx) {
+               if (!old_hot_scheme) {
+                       old_hot_scheme = scheme;
+                       continue;
+               }
+               old_cold_scheme = scheme;
+       }
+
        hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
                hot_thres_access_freq / 1000;
-       scheme = damon_lru_sort_new_hot_scheme(hot_thres);
-       if (!scheme)
+       hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+       if (!hot_scheme)
                return -ENOMEM;
-       damon_set_schemes(ctx, &scheme, 1);
+       if (old_hot_scheme)
+               damon_lru_sort_copy_quota_status(&hot_scheme->quota,
+                               &old_hot_scheme->quota);
 
        cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
-       scheme = damon_lru_sort_new_cold_scheme(cold_thres);
-       if (!scheme)
+       cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+       if (!cold_scheme) {
+               damon_destroy_scheme(hot_scheme);
                return -ENOMEM;
-       damon_add_scheme(ctx, scheme);
+       }
+       if (old_cold_scheme)
+               damon_lru_sort_copy_quota_status(&cold_scheme->quota,
+                               &old_cold_scheme->quota);
+
+       damon_set_schemes(ctx, &hot_scheme, 1);
+       damon_add_scheme(ctx, cold_scheme);
 
        return damon_set_region_biggest_system_ram_default(target,
                                        &monitor_region_start,
index ab974e477d2f2850f642fbbafac48a8b3a5d136b..66e190f0374ac84b47100b8ba21fe4c32e104891 100644 (file)
@@ -150,9 +150,20 @@ static struct damos *damon_reclaim_new_scheme(void)
                        &damon_reclaim_wmarks);
 }
 
+static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
+               struct damos_quota *src)
+{
+       dst->total_charged_sz = src->total_charged_sz;
+       dst->total_charged_ns = src->total_charged_ns;
+       dst->charged_sz = src->charged_sz;
+       dst->charged_from = src->charged_from;
+       dst->charge_target_from = src->charge_target_from;
+       dst->charge_addr_from = src->charge_addr_from;
+}
+
 static int damon_reclaim_apply_parameters(void)
 {
-       struct damos *scheme;
+       struct damos *scheme, *old_scheme;
        struct damos_filter *filter;
        int err = 0;
 
@@ -164,6 +175,11 @@ static int damon_reclaim_apply_parameters(void)
        scheme = damon_reclaim_new_scheme();
        if (!scheme)
                return -ENOMEM;
+       if (!list_empty(&ctx->schemes)) {
+               damon_for_each_scheme(old_scheme, ctx)
+                       damon_reclaim_copy_quota_status(&scheme->quota,
+                                       &old_scheme->quota);
+       }
        if (skip_anon) {
                filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
                if (!filter) {
index dd2fb512700920803b10621b82ffaa88bff30a92..ae0f0b314f3a9a5ec251021d0fb68d423fa53cd7 100644 (file)
@@ -1905,6 +1905,10 @@ void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
        damon_for_each_scheme(scheme, ctx) {
                struct damon_sysfs_scheme *sysfs_scheme;
 
+               /* user could have removed the scheme sysfs dir */
+               if (i >= sysfs_schemes->nr)
+                       break;
+
                sysfs_scheme = sysfs_schemes->schemes_arr[i];
                damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
                                &scheme->quota);
index 5662e29fe25335cf9e6227ae9fd4f22971095adb..65c19025da3dfee99ba0c1129874283bfcd5c72f 100644 (file)
@@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
        vaddr &= HPAGE_PUD_MASK;
 
        pud = pfn_pud(args->pud_pfn, args->page_prot);
+       /*
+        * Some architectures have debug checks to make sure
+        * huge pud mapping are only found with devmap entries
+        * For now test with only devmap entries.
+        */
+       pud = pud_mkdevmap(pud);
        set_pud_at(args->mm, vaddr, args->pudp, pud);
        flush_dcache_page(page);
        pudp_set_wrprotect(args->mm, vaddr, args->pudp);
@@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
        WARN_ON(!pud_none(pud));
 #endif /* __PAGETABLE_PMD_FOLDED */
        pud = pfn_pud(args->pud_pfn, args->page_prot);
+       pud = pud_mkdevmap(pud);
        pud = pud_wrprotect(pud);
        pud = pud_mkclean(pud);
        set_pud_at(args->mm, vaddr, args->pudp, pud);
@@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
        pud = pfn_pud(args->pud_pfn, args->page_prot);
+       pud = pud_mkdevmap(pud);
        pud = pud_mkyoung(pud);
        set_pud_at(args->mm, vaddr, args->pudp, pud);
        flush_dcache_page(page);
index 750e779c23db74730fa7743c2307d1b996729d62..4a30de98a8c75daec31d1d79d15a9d9514e9fd1d 100644 (file)
@@ -4111,28 +4111,40 @@ static void filemap_cachestat(struct address_space *mapping,
 
        rcu_read_lock();
        xas_for_each(&xas, folio, last_index) {
+               int order;
                unsigned long nr_pages;
                pgoff_t folio_first_index, folio_last_index;
 
+               /*
+                * Don't deref the folio. It is not pinned, and might
+                * get freed (and reused) underneath us.
+                *
+                * We *could* pin it, but that would be expensive for
+                * what should be a fast and lightweight syscall.
+                *
+                * Instead, derive all information of interest from
+                * the rcu-protected xarray.
+                */
+
                if (xas_retry(&xas, folio))
                        continue;
 
+               order = xa_get_order(xas.xa, xas.xa_index);
+               nr_pages = 1 << order;
+               folio_first_index = round_down(xas.xa_index, 1 << order);
+               folio_last_index = folio_first_index + nr_pages - 1;
+
+               /* Folios might straddle the range boundaries, only count covered pages */
+               if (folio_first_index < first_index)
+                       nr_pages -= first_index - folio_first_index;
+
+               if (folio_last_index > last_index)
+                       nr_pages -= folio_last_index - last_index;
+
                if (xa_is_value(folio)) {
                        /* page is evicted */
                        void *shadow = (void *)folio;
                        bool workingset; /* not used */
-                       int order = xa_get_order(xas.xa, xas.xa_index);
-
-                       nr_pages = 1 << order;
-                       folio_first_index = round_down(xas.xa_index, 1 << order);
-                       folio_last_index = folio_first_index + nr_pages - 1;
-
-                       /* Folios might straddle the range boundaries, only count covered pages */
-                       if (folio_first_index < first_index)
-                               nr_pages -= first_index - folio_first_index;
-
-                       if (folio_last_index > last_index)
-                               nr_pages -= folio_last_index - last_index;
 
                        cs->nr_evicted += nr_pages;
 
@@ -4150,24 +4162,13 @@ static void filemap_cachestat(struct address_space *mapping,
                        goto resched;
                }
 
-               nr_pages = folio_nr_pages(folio);
-               folio_first_index = folio_pgoff(folio);
-               folio_last_index = folio_first_index + nr_pages - 1;
-
-               /* Folios might straddle the range boundaries, only count covered pages */
-               if (folio_first_index < first_index)
-                       nr_pages -= first_index - folio_first_index;
-
-               if (folio_last_index > last_index)
-                       nr_pages -= folio_last_index - last_index;
-
                /* page is in cache */
                cs->nr_cache += nr_pages;
 
-               if (folio_test_dirty(folio))
+               if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
                        cs->nr_dirty += nr_pages;
 
-               if (folio_test_writeback(folio))
+               if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
                        cs->nr_writeback += nr_pages;
 
 resched:
index 610efae912209472d818628778f1ebce5b0e2cc1..6ca63e8dda741b5e4094f7205f0b74a163be2e43 100644 (file)
@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
 {
        depot_stack_handle_t stack;
 
-       stack = kasan_save_stack(flags,
-                       STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+       stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
        kasan_set_track(track, stack);
 }
 
@@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
                return true;
 
        /*
-        * If the object is not put into quarantine, it will likely be quickly
-        * reallocated. Thus, release its metadata now.
+        * Note: Keep per-object metadata to allow KASAN print stack traces for
+        * use-after-free-before-realloc bugs.
         */
-       kasan_release_object_meta(cache, object);
 
        /* Let slab put the object onto the freelist. */
        return false;
index df6627f62402c01dab04e6955bf80e7fb4b4b2ae..1900f857603456ec20c1f7bb0841362624c11260 100644 (file)
@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
        if (alloc_meta) {
                /* Zero out alloc meta to mark it as invalid. */
                __memset(alloc_meta, 0, sizeof(*alloc_meta));
-
-               /*
-                * Prepare the lock for saving auxiliary stack traces.
-                * Temporarily disable KASAN bug reporting to allow instrumented
-                * raw_spin_lock_init to access aux_lock, which resides inside
-                * of a redzone.
-                */
-               kasan_disable_current();
-               raw_spin_lock_init(&alloc_meta->aux_lock);
-               kasan_enable_current();
        }
 
        /*
@@ -506,47 +496,23 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
 
 static void release_alloc_meta(struct kasan_alloc_meta *meta)
 {
-       /* Evict the stack traces from stack depot. */
-       stack_depot_put(meta->alloc_track.stack);
-       stack_depot_put(meta->aux_stack[0]);
-       stack_depot_put(meta->aux_stack[1]);
-
-       /*
-        * Zero out alloc meta to mark it as invalid but keep aux_lock
-        * initialized to avoid having to reinitialize it when another object
-        * is allocated in the same slot.
-        */
-       __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
-       __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
+       /* Zero out alloc meta to mark it as invalid. */
+       __memset(meta, 0, sizeof(*meta));
 }
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
+       if (!kasan_arch_is_ready())
+               return;
+
        /* Check if free meta is valid. */
        if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
                return;
 
-       /* Evict the stack trace from the stack depot. */
-       stack_depot_put(meta->free_track.stack);
-
        /* Mark free meta as invalid. */
        *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
 }
 
-void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
-{
-       struct kasan_alloc_meta *alloc_meta;
-       struct kasan_free_meta *free_meta;
-
-       alloc_meta = kasan_get_alloc_meta(cache, object);
-       if (alloc_meta)
-               release_alloc_meta(alloc_meta);
-
-       free_meta = kasan_get_free_meta(cache, object);
-       if (free_meta)
-               release_free_meta(object, free_meta);
-}
-
 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
 {
        struct kasan_cache *info = &cache->kasan_info;
@@ -571,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
        struct kmem_cache *cache;
        struct kasan_alloc_meta *alloc_meta;
        void *object;
-       depot_stack_handle_t new_handle, old_handle;
-       unsigned long flags;
 
        if (is_kfence_address(addr) || !slab)
                return;
@@ -583,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
        if (!alloc_meta)
                return;
 
-       new_handle = kasan_save_stack(0, depot_flags);
-
-       /*
-        * Temporarily disable KASAN bug reporting to allow instrumented
-        * spinlock functions to access aux_lock, which resides inside of a
-        * redzone.
-        */
-       kasan_disable_current();
-       raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
-       old_handle = alloc_meta->aux_stack[1];
        alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
-       alloc_meta->aux_stack[0] = new_handle;
-       raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
-       kasan_enable_current();
-
-       stack_depot_put(old_handle);
+       alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
 }
 
 void kasan_record_aux_stack(void *addr)
 {
-       return __kasan_record_aux_stack(addr,
-                       STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+       return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
 }
 
 void kasan_record_aux_stack_noalloc(void *addr)
 {
-       return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
+       return __kasan_record_aux_stack(addr, 0);
 }
 
 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -620,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
        if (!alloc_meta)
                return;
 
-       /* Evict previous stack traces (might exist for krealloc or mempool). */
+       /* Invalidate previous stack traces (might exist for krealloc or mempool). */
        release_alloc_meta(alloc_meta);
 
        kasan_save_track(&alloc_meta->alloc_track, flags);
@@ -634,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
        if (!free_meta)
                return;
 
-       /* Evict previous stack trace (might exist for mempool). */
+       /* Invalidate previous stack trace (might exist for mempool). */
        release_free_meta(object, free_meta);
 
        kasan_save_track(&free_meta->free_track, 0);
index d0f172f2b9783f1b1e73ea82ed5d3e6aaf2bec75..fb2b9ac0659a7add8f4ca95b9dcdc38b937cd216 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/kasan.h>
 #include <linux/kasan-tags.h>
 #include <linux/kfence.h>
-#include <linux/spinlock.h>
 #include <linux/stackdepot.h>
 
 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -265,13 +264,6 @@ struct kasan_global {
 struct kasan_alloc_meta {
        struct kasan_track alloc_track;
        /* Free track is stored in kasan_free_meta. */
-       /*
-        * aux_lock protects aux_stack from accesses from concurrent
-        * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
-        * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
-        * non-sleepable contexts.
-        */
-       raw_spinlock_t aux_lock;
        depot_stack_handle_t aux_stack[2];
 };
 
@@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
                                                const void *object);
 void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
-void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
 #else
 static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
-static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
 #endif
 
 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
index 3ba02efb952aac15b4e511ad985caae0d0935bac..6958aa713c67ee7b0d2c74af676ecb82788d22cd 100644 (file)
@@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
        void *object = qlink_to_object(qlink, cache);
        struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
 
-       kasan_release_object_meta(cache, object);
+       /*
+        * Note: Keep per-object metadata to allow KASAN print stack traces for
+        * use-after-free-before-realloc bugs.
+        */
 
        /*
         * If init_on_free is enabled and KASAN's free metadata is stored in
index 4dcb2ee35eca856a43694f4402dea0c1c9bf6d8a..d09136e040d3cc37b1b7b74a7cdd5d2c92eb8cb7 100644 (file)
@@ -180,8 +180,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
 /*
  * Address comparison utilities
  */
-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
-                                      phys_addr_t base2, phys_addr_t size2)
+unsigned long __init_memblock
+memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
+                      phys_addr_t size2)
 {
        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 }
@@ -2249,6 +2250,7 @@ static const char * const flagname[] = {
        [ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
        [ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
        [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
+       [ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
 };
 
 static int memblock_debug_show(struct seq_file *m, void *private)
index 1ed40f9d3a277ec8912c77326c5527a259a96c47..61932c9215e7734e4dfc7dc6e427c3692d1c3c6f 100644 (file)
@@ -7971,9 +7971,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
 
 static int __init setup_swap_account(char *s)
 {
-       pr_warn_once("The swapaccount= commandline option is deprecated. "
-                    "Please report your usecase to linux-mm@kvack.org if you "
-                    "depend on this functionality.\n");
+       bool res;
+
+       if (!kstrtobool(s, &res) && !res)
+               pr_warn_once("The swapaccount=0 commandline option is deprecated "
+                            "in favor of configuring swap control via cgroupfs. "
+                            "Please report your usecase to linux-mm@kvack.org if you "
+                            "depend on this functionality.\n");
        return 1;
 }
 __setup("swapaccount=", setup_swap_account);
index 15f8b10ea17c4f28e857009372f50bfd774001b9..0bfc8b007c01a3323a15a17d51c4da46a6207540 100644 (file)
@@ -3799,6 +3799,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        struct page *page;
        struct swap_info_struct *si = NULL;
        rmap_t rmap_flags = RMAP_NONE;
+       bool need_clear_cache = false;
        bool exclusive = false;
        swp_entry_t entry;
        pte_t pte;
@@ -3867,6 +3868,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        if (!folio) {
                if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
                    __swap_count(entry) == 1) {
+                       /*
+                        * Prevent parallel swapin from proceeding with
+                        * the cache flag. Otherwise, another thread may
+                        * finish swapin first, free the entry, and swapout
+                        * reusing the same entry. It's undetectable as
+                        * pte_same() returns true due to entry reuse.
+                        */
+                       if (swapcache_prepare(entry)) {
+                               /* Relax a bit to prevent rapid repeated page faults */
+                               schedule_timeout_uninterruptible(1);
+                               goto out;
+                       }
+                       need_clear_cache = true;
+
                        /* skip swapcache */
                        folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
                                                vma, vmf->address, false);
@@ -4117,6 +4132,9 @@ unlock:
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
+       /* Clear the swap cache pin for direct swapin after PTL unlock */
+       if (need_clear_cache)
+               swapcache_clear(si, entry);
        if (si)
                put_swap_device(si);
        return ret;
@@ -4131,6 +4149,8 @@ out_release:
                folio_unlock(swapcache);
                folio_put(swapcache);
        }
+       if (need_clear_cache)
+               swapcache_clear(si, entry);
        if (si)
                put_swap_device(si);
        return ret;
index cc9f2bcd73b492aebacab4b812a515cf7e70b92b..c27b1f8097d4a72e569ce5a06be42b93184e9db0 100644 (file)
@@ -2519,6 +2519,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
                        if (managed_zone(pgdat->node_zones + z))
                                break;
                }
+
+               /*
+                * If there are no managed zones, it should not proceed
+                * further.
+                */
+               if (z < 0)
+                       return 0;
+
                wakeup_kswapd(pgdat->node_zones + z, 0,
                              folio_order(folio), ZONE_MOVABLE);
                return 0;
index d89770eaab6b6111117783ca7ff532871c1d71a5..3281287771c9c6100ebefde692bca06e247ae0f8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -954,13 +954,21 @@ static struct vm_area_struct
        } else if (merge_prev) {                        /* case 2 */
                if (curr) {
                        vma_start_write(curr);
-                       err = dup_anon_vma(prev, curr, &anon_dup);
                        if (end == curr->vm_end) {      /* case 7 */
+                               /*
+                                * can_vma_merge_after() assumed we would not be
+                                * removing prev vma, so it skipped the check
+                                * for vm_ops->close, but we are removing curr
+                                */
+                               if (curr->vm_ops && curr->vm_ops->close)
+                                       err = -EINVAL;
                                remove = curr;
                        } else {                        /* case 5 */
                                adjust = curr;
                                adj_start = (end - curr->vm_start);
                        }
+                       if (!err)
+                               err = dup_anon_vma(prev, curr, &anon_dup);
                }
        } else { /* merge_next */
                vma_start_write(next);
index 150d4f23b01048ed7af53a74ec3e12a208fc17b5..a663202045dc437a4ff6186afb48e728fa30839a 100644 (file)
@@ -4041,6 +4041,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
 {
        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
+       bool can_compact = gfp_compaction_allowed(gfp_mask);
        const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
        struct page *page = NULL;
        unsigned int alloc_flags;
@@ -4111,7 +4112,7 @@ restart:
         * Don't try this for allocations that are allowed to ignore
         * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
         */
-       if (can_direct_reclaim &&
+       if (can_direct_reclaim && can_compact &&
                        (costly_order ||
                           (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
                        && !gfp_pfmemalloc_allowed(gfp_mask)) {
@@ -4209,9 +4210,10 @@ retry:
 
        /*
         * Do not retry costly high order allocations unless they are
-        * __GFP_RETRY_MAYFAIL
+        * __GFP_RETRY_MAYFAIL and we can compact
         */
-       if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
+       if (costly_order && (!can_compact ||
+                            !(gfp_mask & __GFP_RETRY_MAYFAIL)))
                goto nopage;
 
        if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
@@ -4224,7 +4226,7 @@ retry:
         * implementation of the compaction depends on the sufficient amount
         * of free memory (see __compaction_suitable)
         */
-       if (did_some_progress > 0 &&
+       if (did_some_progress > 0 && can_compact &&
                        should_compact_retry(ac, order, alloc_flags,
                                compact_result, &compact_priority,
                                &compaction_retries))
index 758c46ca671ed110ae8e25fad48196d3feed03dc..fc2f6ade7f80b399707bcc67c44f813aea0b846d 100644 (file)
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -41,6 +41,7 @@ void __delete_from_swap_cache(struct folio *folio,
 void delete_from_swap_cache(struct folio *folio);
 void clear_shadow_from_swap_cache(int type, unsigned long begin,
                                  unsigned long end);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
 struct folio *swap_cache_get_folio(swp_entry_t entry,
                struct vm_area_struct *vma, unsigned long addr);
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -97,6 +98,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
        return 0;
 }
 
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+}
+
 static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
                struct vm_area_struct *vma, unsigned long addr)
 {
index e671266ad77241f461a17cbb2e486fe48a423f69..7255c01a1e4e16d758186019f904e70a7890a5cc 100644 (file)
@@ -680,9 +680,10 @@ skip:
        /* The page was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
@@ -855,9 +856,10 @@ skip:
        /* The folio was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
index 556ff7347d5f04402b61cc5bd9d0d123a36dc1d5..746aa9da530255035b4624fefff862d416af836d 100644 (file)
@@ -3365,6 +3365,19 @@ int swapcache_prepare(swp_entry_t entry)
        return __swap_duplicate(entry, SWAP_HAS_CACHE);
 }
 
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+       struct swap_cluster_info *ci;
+       unsigned long offset = swp_offset(entry);
+       unsigned char usage;
+
+       ci = lock_cluster_or_swap_info(si, offset);
+       usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
+       unlock_cluster_or_swap_info(si, ci);
+       if (!usage)
+               free_swap_slot(entry);
+}
+
 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 {
        return swap_type_to_swap_info(swp_type(entry));
index 7cf7d43842590ccd99bf37795918a7054b61a8c4..313f1c42768a621d59385a0673e0cdf85d5c1720 100644 (file)
@@ -914,9 +914,6 @@ static int move_present_pte(struct mm_struct *mm,
                goto out;
        }
 
-       folio_move_anon_rmap(src_folio, dst_vma);
-       WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
-
        orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
        /* Folio got pinned from under us. Put it back and fail the move. */
        if (folio_maybe_dma_pinned(src_folio)) {
@@ -925,6 +922,9 @@ static int move_present_pte(struct mm_struct *mm,
                goto out;
        }
 
+       folio_move_anon_rmap(src_folio, dst_vma);
+       WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
        orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
        /* Follow mremap() behavior and treat the entry dirty after the move */
        orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
index 4f9c854ce6cc66c2d971767a8f00e51eeab8a65e..4255619a1a314717df613e20090b160fce72a7e9 100644 (file)
@@ -5753,7 +5753,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 /* Use reclaim/compaction for costly allocs or under memory pressure */
 static bool in_reclaim_compaction(struct scan_control *sc)
 {
-       if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
+       if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
                        (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
                         sc->priority < DEF_PRIORITY - 2))
                return true;
@@ -5998,6 +5998,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 {
        unsigned long watermark;
 
+       if (!gfp_compaction_allowed(sc->gfp_mask))
+               return false;
+
        /* Allocation can already succeed, nothing to do */
        if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
                              sc->reclaim_idx, 0))
index 350dd2fc815994739d2012e0bcf483445350bb88..db4625af65fb7f6655a057e145bbe20dd64f7ae9 100644 (file)
@@ -377,10 +377,9 @@ void zswap_folio_swapin(struct folio *folio)
 {
        struct lruvec *lruvec;
 
-       if (folio) {
-               lruvec = folio_lruvec(folio);
-               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
-       }
+       VM_WARN_ON_ONCE(!folio_test_locked(folio));
+       lruvec = folio_lruvec(folio);
+       atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 }
 
 /*********************************
@@ -1440,6 +1439,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
        if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
                spin_unlock(&tree->lock);
                delete_from_swap_cache(folio);
+               folio_unlock(folio);
+               folio_put(folio);
                return -ENOMEM;
        }
        spin_unlock(&tree->lock);
@@ -1517,7 +1518,7 @@ bool zswap_store(struct folio *folio)
        if (folio_test_large(folio))
                return false;
 
-       if (!zswap_enabled || !tree)
+       if (!tree)
                return false;
 
        /*
@@ -1532,6 +1533,10 @@ bool zswap_store(struct folio *folio)
                zswap_invalidate_entry(tree, dupentry);
        }
        spin_unlock(&tree->lock);
+
+       if (!zswap_enabled)
+               return false;
+
        objcg = get_obj_cgroup_from_folio(folio);
        if (objcg && !obj_cgroup_may_zswap(objcg)) {
                memcg = get_mem_cgroup_from_objcg(objcg);
index 65601aa52e0d8b669ac8aaec116301398a5e865b..2821a42cefdc6e0f83fa4a765bc881a67795a2b5 100644 (file)
@@ -1049,6 +1049,7 @@ static void hci_error_reset(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
 
+       hci_dev_hold(hdev);
        BT_DBG("%s", hdev->name);
 
        if (hdev->hw_error)
@@ -1056,10 +1057,10 @@ static void hci_error_reset(struct work_struct *work)
        else
                bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
 
-       if (hci_dev_do_close(hdev))
-               return;
+       if (!hci_dev_do_close(hdev))
+               hci_dev_do_open(hdev);
 
-       hci_dev_do_open(hdev);
+       hci_dev_put(hdev);
 }
 
 void hci_uuids_clear(struct hci_dev *hdev)
index ef8c3bed73617efa01052f7c84f170bee4666eef..2a5f5a7d2412be4aef32e8bfeb69cab0f6ad4fec 100644 (file)
@@ -5329,9 +5329,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (!conn || !hci_conn_ssp_enabled(conn))
+       if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
                goto unlock;
 
+       /* Assume remote supports SSP since it has triggered this event */
+       set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+
        hci_conn_hold(conn);
 
        if (!hci_dev_test_flag(hdev, HCI_MGMT))
@@ -6794,6 +6797,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_UNKNOWN_CONN_ID);
 
+       if (max > hcon->le_conn_max_interval)
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_INVALID_LL_PARAMS);
+
        if (hci_check_conn_params(min, max, latency, timeout))
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_INVALID_LL_PARAMS);
@@ -7420,10 +7427,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
         * keep track of the bdaddr of the connection event that woke us up.
         */
        if (event == HCI_EV_CONN_REQUEST) {
-               bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+               bacpy(&hdev->wake_addr, &conn_request->bdaddr);
                hdev->wake_addr_type = BDADDR_BREDR;
        } else if (event == HCI_EV_CONN_COMPLETE) {
-               bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+               bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
                hdev->wake_addr_type = BDADDR_BREDR;
        } else if (event == HCI_EV_LE_META) {
                struct hci_ev_le_meta *le_ev = (void *)skb->data;
index a6fc8a2a5c673d5266ceb98bef1d69b70ae19e4c..5716345a26dfb757b540137fa7616f4af49d013c 100644 (file)
@@ -2206,8 +2206,11 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
 
        /* During suspend, only wakeable devices can be in acceptlist */
        if (hdev->suspended &&
-           !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
+           !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
+               hci_le_del_accept_list_sync(hdev, &params->addr,
+                                           params->addr_type);
                return 0;
+       }
 
        /* Select filter policy to accept all advertising */
        if (*num_entries >= hdev->le_accept_list_size)
@@ -5559,7 +5562,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
 
        bt_dev_dbg(hdev, "");
 
-       if (hci_dev_test_flag(hdev, HCI_INQUIRY))
+       if (test_bit(HCI_INQUIRY, &hdev->flags))
                return 0;
 
        hci_dev_lock(hdev);
index 60298975d5c45620f21ca5fe161da1a9fdf55eec..656f49b299d20d9141b9579aef84acf3b81bff7e 100644 (file)
@@ -5613,7 +5613,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
        memset(&rsp, 0, sizeof(rsp));
 
-       err = hci_check_conn_params(min, max, latency, to_multiplier);
+       if (max > hcon->le_conn_max_interval) {
+               BT_DBG("requested connection interval exceeds current bounds.");
+               err = -EINVAL;
+       } else {
+               err = hci_check_conn_params(min, max, latency, to_multiplier);
+       }
+
        if (err)
                rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
        else
index bb72ff6eb22f4b30864aefd2588cce982d37d153..ee3b4aad8bd8d65239efc591cf33a631690a270f 100644 (file)
@@ -1045,6 +1045,8 @@ static void rpa_expired(struct work_struct *work)
        hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
 }
 
+static int set_discoverable_sync(struct hci_dev *hdev, void *data);
+
 static void discov_off(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1063,7 +1065,7 @@ static void discov_off(struct work_struct *work)
        hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
        hdev->discov_timeout = 0;
 
-       hci_update_discoverable(hdev);
+       hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
 
        mgmt_new_settings(hdev);
 
index 053ef8f25fae47b369068adb49f1391b32fd7bc9..1d34d8497033299907d341212c2977b2b1d9b870 100644 (file)
@@ -1941,7 +1941,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
        /* Get data directly from socket receive queue without copying it. */
        while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
                skb_orphan(skb);
-               if (!skb_linearize(skb)) {
+               if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
                        s = rfcomm_recv_frame(s, skb);
                        if (!s)
                                break;
index ed17208907578a231d283c04bd97ce48bebdffaa..35e10c5a766d550e0c5cb85cf5a0c4835b52a89d 100644 (file)
 #include <linux/sysctl.h>
 #endif
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_core.h>
+#endif
+
 static unsigned int brnf_net_id __read_mostly;
 
 struct brnf_net {
@@ -553,6 +557,90 @@ static unsigned int br_nf_pre_routing(void *priv,
        return NF_STOLEN;
 }
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+/* conntracks' nf_confirm logic cannot handle cloned skbs referencing
+ * the same nf_conn entry, which will happen for multicast (broadcast)
+ * Frames on bridges.
+ *
+ * Example:
+ *      macvlan0
+ *      br0
+ *  ethX  ethY
+ *
+ * ethX (or Y) receives multicast or broadcast packet containing
+ * an IP packet, not yet in conntrack table.
+ *
+ * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
+ *    -> skb->_nfct now references a unconfirmed entry
+ * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
+ *    interface.
+ * 3. skb gets passed up the stack.
+ * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
+ *    and schedules a work queue to send them out on the lower devices.
+ *
+ *    The clone skb->_nfct is not a copy, it is the same entry as the
+ *    original skb.  The macvlan rx handler then returns RX_HANDLER_PASS.
+ * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
+ *
+ * The Macvlan broadcast worker and normal confirm path will race.
+ *
+ * This race will not happen if step 2 already confirmed a clone. In that
+ * case later steps perform skb_clone() with skb->_nfct already confirmed (in
+ * hash table).  This works fine.
+ *
+ * But such confirmation won't happen when eb/ip/nftables rules dropped the
+ * packets before they reached the nf_confirm step in postrouting.
+ *
+ * Work around this problem by explicit confirmation of the entry at
+ * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
+ * entry.
+ *
+ */
+static unsigned int br_nf_local_in(void *priv,
+                                  struct sk_buff *skb,
+                                  const struct nf_hook_state *state)
+{
+       struct nf_conntrack *nfct = skb_nfct(skb);
+       const struct nf_ct_hook *ct_hook;
+       struct nf_conn *ct;
+       int ret;
+
+       if (!nfct || skb->pkt_type == PACKET_HOST)
+               return NF_ACCEPT;
+
+       ct = container_of(nfct, struct nf_conn, ct_general);
+       if (likely(nf_ct_is_confirmed(ct)))
+               return NF_ACCEPT;
+
+       WARN_ON_ONCE(skb_shared(skb));
+       WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
+
+       /* We can't call nf_confirm here, it would create a dependency
+        * on nf_conntrack module.
+        */
+       ct_hook = rcu_dereference(nf_ct_hook);
+       if (!ct_hook) {
+               skb->_nfct = 0ul;
+               nf_conntrack_put(nfct);
+               return NF_ACCEPT;
+       }
+
+       nf_bridge_pull_encap_header(skb);
+       ret = ct_hook->confirm(skb);
+       switch (ret & NF_VERDICT_MASK) {
+       case NF_STOLEN:
+               return NF_STOLEN;
+       default:
+               nf_bridge_push_encap_header(skb);
+               break;
+       }
+
+       ct = container_of(nfct, struct nf_conn, ct_general);
+       WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
+
+       return ret;
+}
+#endif
 
 /* PF_BRIDGE/FORWARD *************************************************/
 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -964,6 +1052,14 @@ static const struct nf_hook_ops br_nf_ops[] = {
                .hooknum = NF_BR_PRE_ROUTING,
                .priority = NF_BR_PRI_BRNF,
        },
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       {
+               .hook = br_nf_local_in,
+               .pf = NFPROTO_BRIDGE,
+               .hooknum = NF_BR_LOCAL_IN,
+               .priority = NF_BR_PRI_LAST,
+       },
+#endif
        {
                .hook = br_nf_forward,
                .pf = NFPROTO_BRIDGE,
index ee84e783e1dff5b67994a3ba5a4e5d8aa875eeef..7b41ee8740cbbaf6b959d9273c49ebcd4830a5c8 100644 (file)
@@ -595,21 +595,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
 }
 
 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
+                                     struct net_device *dev,
+                                     unsigned long action,
                                      enum switchdev_obj_id id,
                                      const struct net_bridge_mdb_entry *mp,
                                      struct net_device *orig_dev)
 {
-       struct switchdev_obj_port_mdb *mdb;
+       struct switchdev_obj_port_mdb mdb = {
+               .obj = {
+                       .id = id,
+                       .orig_dev = orig_dev,
+               },
+       };
+       struct switchdev_obj_port_mdb *pmdb;
 
-       mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
-       if (!mdb)
-               return -ENOMEM;
+       br_switchdev_mdb_populate(&mdb, mp);
+
+       if (action == SWITCHDEV_PORT_OBJ_ADD &&
+           switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
+               /* This event is already in the deferred queue of
+                * events, so this replay must be elided, lest the
+                * driver receives duplicate events for it. This can
+                * only happen when replaying additions, since
+                * modifications are always immediately visible in
+                * br->mdb_list, whereas actual event delivery may be
+                * delayed.
+                */
+               return 0;
+       }
 
-       mdb->obj.id = id;
-       mdb->obj.orig_dev = orig_dev;
-       br_switchdev_mdb_populate(mdb, mp);
-       list_add_tail(&mdb->obj.list, mdb_list);
+       pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
+       if (!pmdb)
+               return -ENOMEM;
 
+       list_add_tail(&pmdb->obj.list, mdb_list);
        return 0;
 }
 
@@ -677,51 +696,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
        if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
                return 0;
 
-       /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
-        * because the write-side protection is br->multicast_lock. But we
-        * need to emulate the [ blocking ] calling context of a regular
-        * switchdev event, so since both br->multicast_lock and RCU read side
-        * critical sections are atomic, we have no choice but to pick the RCU
-        * read side lock, queue up all our events, leave the critical section
-        * and notify switchdev from blocking context.
+       if (adding)
+               action = SWITCHDEV_PORT_OBJ_ADD;
+       else
+               action = SWITCHDEV_PORT_OBJ_DEL;
+
+       /* br_switchdev_mdb_queue_one() will take care to not queue a
+        * replay of an event that is already pending in the switchdev
+        * deferred queue. In order to safely determine that, there
+        * must be no new deferred MDB notifications enqueued for the
+        * duration of the MDB scan. Therefore, grab the write-side
+        * lock to avoid racing with any concurrent IGMP/MLD snooping.
         */
-       rcu_read_lock();
+       spin_lock_bh(&br->multicast_lock);
 
-       hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
+       hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
                struct net_bridge_port_group __rcu * const *pp;
                const struct net_bridge_port_group *p;
 
                if (mp->host_joined) {
-                       err = br_switchdev_mdb_queue_one(&mdb_list,
+                       err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
                                                         SWITCHDEV_OBJ_ID_HOST_MDB,
                                                         mp, br_dev);
                        if (err) {
-                               rcu_read_unlock();
+                               spin_unlock_bh(&br->multicast_lock);
                                goto out_free_mdb;
                        }
                }
 
-               for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
+               for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
                     pp = &p->next) {
                        if (p->key.port->dev != dev)
                                continue;
 
-                       err = br_switchdev_mdb_queue_one(&mdb_list,
+                       err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
                                                         SWITCHDEV_OBJ_ID_PORT_MDB,
                                                         mp, dev);
                        if (err) {
-                               rcu_read_unlock();
+                               spin_unlock_bh(&br->multicast_lock);
                                goto out_free_mdb;
                        }
                }
        }
 
-       rcu_read_unlock();
-
-       if (adding)
-               action = SWITCHDEV_PORT_OBJ_ADD;
-       else
-               action = SWITCHDEV_PORT_OBJ_DEL;
+       spin_unlock_bh(&br->multicast_lock);
 
        list_for_each_entry(obj, &mdb_list, list) {
                err = br_switchdev_mdb_replay_one(nb, dev,
@@ -786,6 +804,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
        br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
 
        br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
+
+       /* Make sure that the device leaving this bridge has seen all
+        * relevant events before it is disassociated. In the normal
+        * case, when the device is directly attached to the bridge,
+        * this is covered by del_nbp(). If the association was indirect
+        * however, e.g. via a team or bond, and the device is leaving
+        * that intermediate device, then the bridge port remains in
+        * place.
+        */
+       switchdev_deferred_process();
 }
 
 /* Let the bridge know that this port is offloaded, so that it can assign a
index abb090f94ed2609eeb9cd54b4e5faed1c3cb7bfe..6f877e31709bad3646ea15bf3a96999ed275bdc1 100644 (file)
@@ -291,6 +291,30 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
        return nf_conntrack_in(skb, &bridge_state);
 }
 
+static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
+                                   const struct nf_hook_state *state)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       if (skb->pkt_type == PACKET_HOST)
+               return NF_ACCEPT;
+
+       /* nf_conntrack_confirm() cannot handle concurrent clones,
+        * this happens for broad/multicast frames with e.g. macvlan on top
+        * of the bridge device.
+        */
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+               return NF_ACCEPT;
+
+       /* let inet prerouting call conntrack again */
+       skb->_nfct = 0;
+       nf_ct_put(ct);
+
+       return NF_ACCEPT;
+}
+
 static void nf_ct_bridge_frag_save(struct sk_buff *skb,
                                   struct nf_bridge_frag_data *data)
 {
@@ -385,6 +409,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
                .hooknum        = NF_BR_PRE_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK,
        },
+       {
+               .hook           = nf_ct_bridge_in,
+               .pf             = NFPROTO_BRIDGE,
+               .hooknum        = NF_BR_LOCAL_IN,
+               .priority       = NF_IP_PRI_CONNTRACK_CONFIRM,
+       },
        {
                .hook           = nf_ct_bridge_post,
                .pf             = NFPROTO_BRIDGE,
index a0ca5414b333df92b3aa0085a95b928cdc0609a5..bd608ffa06279704b5f4f43e5e369035e3ff032c 100644 (file)
@@ -2034,6 +2034,9 @@ static int prepare_sparse_read_data(struct ceph_connection *con)
        if (!con_secure(con))
                con->in_data_crc = -1;
 
+       ceph_msg_data_cursor_init(&con->v2.in_cursor, msg,
+                                 msg->sparse_read_total);
+
        reset_in_kvecs(con);
        con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
        con->v2.data_len_remain = data_len(msg);
index 73a0219730075e666c4f11f668a50dbf9f9afa97..76e6438f4858e246dfebb78364a253e77f9a86b4 100644 (file)
@@ -9074,28 +9074,6 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
 }
 EXPORT_SYMBOL(netdev_port_same_parent_id);
 
-static void netdev_dpll_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
-{
-#if IS_ENABLED(CONFIG_DPLL)
-       rtnl_lock();
-       dev->dpll_pin = dpll_pin;
-       rtnl_unlock();
-#endif
-}
-
-void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
-{
-       WARN_ON(!dpll_pin);
-       netdev_dpll_pin_assign(dev, dpll_pin);
-}
-EXPORT_SYMBOL(netdev_dpll_pin_set);
-
-void netdev_dpll_pin_clear(struct net_device *dev)
-{
-       netdev_dpll_pin_assign(dev, NULL);
-}
-EXPORT_SYMBOL(netdev_dpll_pin_clear);
-
 /**
  *     dev_change_proto_down - set carrier according to proto_down.
  *
index ffe5244e5597e806e1cbd2dc82894276e107e91c..278294aca66ababdf5f7d383833ff5496255b274 100644 (file)
@@ -94,11 +94,12 @@ netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
                        state->pp_id = pool->user.id;
                        err = fill(skb, pool, info);
                        if (err)
-                               break;
+                               goto out;
                }
 
                state->pp_id = 0;
        }
+out:
        mutex_unlock(&page_pools_lock);
        rtnl_unlock();
 
index 9c4f427f3a5057b52ec05405e8b15b8ca2246b4b..bd50e9fe3234b6c252a199f05adcae2a09b1bbdd 100644 (file)
@@ -1057,7 +1057,7 @@ static size_t rtnl_dpll_pin_size(const struct net_device *dev)
 {
        size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
 
-       size += dpll_msg_pin_handle_size(netdev_dpll_pin(dev));
+       size += dpll_netdev_pin_handle_size(dev);
 
        return size;
 }
@@ -1792,7 +1792,7 @@ static int rtnl_fill_dpll_pin(struct sk_buff *skb,
        if (!dpll_pin_nest)
                return -EMSGSIZE;
 
-       ret = dpll_msg_add_pin_handle(skb, netdev_dpll_pin(dev));
+       ret = dpll_netdev_add_pin_handle(skb, dev);
        if (ret < 0)
                goto nest_cancel;
 
@@ -5169,10 +5169,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct ifinfomsg *ifm;
        struct net_device *dev;
-       struct nlattr *br_spec, *attr = NULL;
+       struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
        int rem, err = -EOPNOTSUPP;
        u16 flags = 0;
-       bool have_flags = false;
 
        if (nlmsg_len(nlh) < sizeof(*ifm))
                return -EINVAL;
@@ -5190,11 +5189,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
-                       if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
+                       if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
                                if (nla_len(attr) < sizeof(flags))
                                        return -EINVAL;
 
-                               have_flags = true;
+                               br_flags_attr = attr;
                                flags = nla_get_u16(attr);
                        }
 
@@ -5238,8 +5237,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
                }
        }
 
-       if (have_flags)
-               memcpy(nla_data(attr), &flags, sizeof(flags));
+       if (br_flags_attr)
+               memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
 out:
        return err;
 }
index 93ecfceac1bc49bd843728518215ade5ced374a5..4d75ef9d24bfa7cbffe642448f5116ac0b943ed2 100644 (file)
@@ -1226,8 +1226,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
 
                rcu_read_lock();
                psock = sk_psock(sk);
-               if (psock)
-                       psock->saved_data_ready(sk);
+               if (psock) {
+                       read_lock_bh(&sk->sk_callback_lock);
+                       sk_psock_data_ready(sk, psock);
+                       read_unlock_bh(&sk->sk_callback_lock);
+               }
                rcu_read_unlock();
        }
 }
index 0a7f46c37f0cfc169e11377107c8342c229da0de..5e78798456fd81dbd34e94021531340f7ba5ab0a 100644 (file)
@@ -1188,6 +1188,17 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
                 */
                WRITE_ONCE(sk->sk_txrehash, (u8)val);
                return 0;
+       case SO_PEEK_OFF:
+               {
+               int (*set_peek_off)(struct sock *sk, int val);
+
+               set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
+               if (set_peek_off)
+                       ret = set_peek_off(sk, val);
+               else
+                       ret = -EOPNOTSUPP;
+               return ret;
+               }
        }
 
        sockopt_lock_sock(sk);
@@ -1430,18 +1441,6 @@ set_sndbuf:
                sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
                break;
 
-       case SO_PEEK_OFF:
-               {
-               int (*set_peek_off)(struct sock *sk, int val);
-
-               set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
-               if (set_peek_off)
-                       ret = set_peek_off(sk, val);
-               else
-                       ret = -EOPNOTSUPP;
-               break;
-               }
-
        case SO_NOFCS:
                sock_valbool_flag(sk, SOCK_NOFCS, valbool);
                break;
index 6a58342752b4690d1f13d19eb94ee1d44b9cda61..7f0b093208d75b91e25cb78a73bece8ef2577831 100644 (file)
@@ -529,14 +529,20 @@ static int __init devlink_init(void)
 {
        int err;
 
-       err = genl_register_family(&devlink_nl_family);
-       if (err)
-               goto out;
        err = register_pernet_subsys(&devlink_pernet_ops);
        if (err)
                goto out;
+       err = genl_register_family(&devlink_nl_family);
+       if (err)
+               goto out_unreg_pernet_subsys;
        err = register_netdevice_notifier(&devlink_port_netdevice_nb);
+       if (!err)
+               return 0;
+
+       genl_unregister_family(&devlink_nl_family);
 
+out_unreg_pernet_subsys:
+       unregister_pernet_subsys(&devlink_pernet_ops);
 out:
        WARN_ON(err);
        return err;
index 78592912f657c934885077c900ef95b9d79aed4c..4b2d46ccfe484f1ae2c21b5b2921a113d59e13f5 100644 (file)
@@ -583,7 +583,7 @@ devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
 
        xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
                err = devlink_nl_port_fill(msg, devlink_port,
-                                          DEVLINK_CMD_NEW,
+                                          DEVLINK_CMD_PORT_NEW,
                                           NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, flags,
                                           cb->extack);
index 80cdc6f6b34c97601961179c4839dc68c0a6d2e1..5d68cb181695d9a9f83809142a0300b8ddad5f53 100644 (file)
@@ -83,7 +83,7 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
                return false;
 
        /* Get next tlv */
-       total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
+       total_length += hsr_sup_tag->tlv.HSR_TLV_length;
        if (!pskb_may_pull(skb, total_length))
                return false;
        skb_pull(skb, total_length);
@@ -435,7 +435,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
                        continue;
 
                /* Don't send frame over port where it has been sent before.
-                * Also fro SAN, this shouldn't be done.
+                * Also for SAN, this shouldn't be done.
                 */
                if (!frame->is_from_san &&
                    hsr_register_frame_out(port, frame->node_src,
index 9456f5bb35e5d9e97d6c05be21561b435e2b704a..0d0d725b46ad0c56b19b6356f6d3e6be8bdcae83 100644 (file)
@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
        if (neigh) {
                if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
                        read_lock_bh(&neigh->lock);
-                       memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+                       memcpy(r->arp_ha.sa_data, neigh->ha,
+                              min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
                        r->arp_flags = arp_state_to_flags(neigh);
                        read_unlock_bh(&neigh->lock);
                        r->arp_ha.sa_family = dev->type;
index ca0ff15dc8fa358b81a804eda7398ecd10f00743..bc74f131fe4dfad327e71c1a8f0a4b66cdc526e5 100644 (file)
@@ -1825,6 +1825,21 @@ done:
        return err;
 }
 
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet_base_seq(const struct net *net)
+{
+       u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
+                 net->dev_base_seq;
+
+       /* Must not return 0 (see nl_dump_check_consistent()).
+        * Chose a value far away from 0.
+        */
+       if (!res)
+               res = 0x80000000;
+       return res;
+}
+
 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
 {
        const struct nlmsghdr *nlh = cb->nlh;
@@ -1876,8 +1891,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                idx = 0;
                head = &tgt_net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
-                         tgt_net->dev_base_seq;
+               cb->seq = inet_base_seq(tgt_net);
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
@@ -2278,8 +2292,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
-                         net->dev_base_seq;
+               cb->seq = inet_base_seq(net);
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
index 93e9193df54461b25c61089bd5db4dd33c32dab6..308ff34002ea6b5e0620004f65ffd833087afbc1 100644 (file)
@@ -1130,10 +1130,33 @@ ok:
        return 0;
 
 error:
+       if (sk_hashed(sk)) {
+               spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
+
+               sock_prot_inuse_add(net, sk->sk_prot, -1);
+
+               spin_lock(lock);
+               sk_nulls_del_node_init_rcu(sk);
+               spin_unlock(lock);
+
+               sk->sk_hash = 0;
+               inet_sk(sk)->inet_sport = 0;
+               inet_sk(sk)->inet_num = 0;
+
+               if (tw)
+                       inet_twsk_bind_unhash(tw, hinfo);
+       }
+
        spin_unlock(&head2->lock);
        if (tb_created)
                inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
-       spin_unlock_bh(&head->lock);
+       spin_unlock(&head->lock);
+
+       if (tw)
+               inet_twsk_deschedule_put(tw);
+
+       local_bh_enable();
+
        return -ENOMEM;
 }
 
index a4513ffb66cbb74c14112bbc4c1d36d02e7f659b..1b6981de3f29514dac72161be02f3ac6e4625551 100644 (file)
@@ -554,6 +554,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
        return 0;
 }
 
+static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
+{
+       /* we must cap headroom to some upperlimit, else pskb_expand_head
+        * will overflow header offsets in skb_headers_offset_update().
+        */
+       static const unsigned int max_allowed = 512;
+
+       if (headroom > max_allowed)
+               headroom = max_allowed;
+
+       if (headroom > READ_ONCE(dev->needed_headroom))
+               WRITE_ONCE(dev->needed_headroom, headroom);
+}
+
 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                       u8 proto, int tunnel_hlen)
 {
@@ -632,13 +646,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
-       if (headroom > READ_ONCE(dev->needed_headroom))
-               WRITE_ONCE(dev->needed_headroom, headroom);
-
-       if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+       if (skb_cow_head(skb, headroom)) {
                ip_rt_put(rt);
                goto tx_dropped;
        }
+
+       ip_tunnel_adj_headroom(dev, headroom);
+
        iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
                      df, !net_eq(tunnel->net, dev_net(dev)));
        return;
@@ -818,16 +832,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
-       if (max_headroom > READ_ONCE(dev->needed_headroom))
-               WRITE_ONCE(dev->needed_headroom, max_headroom);
 
-       if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+       if (skb_cow_head(skb, max_headroom)) {
                ip_rt_put(rt);
                DEV_STATS_INC(dev, tx_dropped);
                kfree_skb(skb);
                return;
        }
 
+       ip_tunnel_adj_headroom(dev, max_headroom);
+
        iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
                      df, !net_eq(tunnel->net, dev_net(dev)));
        return;
index f631b0a21af4c7a520212c94ed0580f86d269ed2..e474b201900f9317069a31e4b507964fe11b2297 100644 (file)
@@ -1589,12 +1589,7 @@ int udp_init_sock(struct sock *sk)
 
 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
 {
-       if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
-               bool slow = lock_sock_fast(sk);
-
-               sk_peek_offset_bwd(sk, len);
-               unlock_sock_fast(sk, slow);
-       }
+       sk_peek_offset_bwd(sk, len);
 
        if (!skb_unref(skb))
                return;
index 733ace18806c61f487d83081dc6d39d079959f77..055230b669cf21d87738a4371543c599c3476f98 100644 (file)
@@ -708,6 +708,22 @@ errout:
        return err;
 }
 
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet6_base_seq(const struct net *net)
+{
+       u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
+                 net->dev_base_seq;
+
+       /* Must not return 0 (see nl_dump_check_consistent()).
+        * Chose a value far away from 0.
+        */
+       if (!res)
+               res = 0x80000000;
+       return res;
+}
+
+
 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                                      struct netlink_callback *cb)
 {
@@ -741,8 +757,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
-                         net->dev_base_seq;
+               cb->seq = inet6_base_seq(net);
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
@@ -5362,7 +5377,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
        }
 
        rcu_read_lock();
-       cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
+       cb->seq = inet6_base_seq(tgt_net);
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &tgt_net->dev_index_head[h];
@@ -5494,9 +5509,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
        }
 
        addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
-       if (!addr)
-               return -EINVAL;
-
+       if (!addr) {
+               err = -EINVAL;
+               goto errout;
+       }
        ifm = nlmsg_data(nlh);
        if (ifm->ifa_index)
                dev = dev_get_by_index(tgt_net, ifm->ifa_index);
index 4952ae792450575d275f1565d2bc198e440b67f6..02e9ffb63af1971c0949ccd0c392b995efb41ccb 100644 (file)
@@ -177,6 +177,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
                                case IPV6_TLV_IOAM:
                                        if (!ipv6_hop_ioam(skb, off))
                                                return false;
+
+                                       nh = skb_network_header(skb);
                                        break;
                                case IPV6_TLV_JUMBO:
                                        if (!ipv6_hop_jumbo(skb, off))
@@ -943,6 +945,14 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
                if (!skb_valid_dst(skb))
                        ip6_route_input(skb);
 
+               /* About to mangle packet header */
+               if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
+                       goto drop;
+
+               /* Trace pointer may have changed */
+               trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
+                                                  + optoff + sizeof(*hdr));
+
                ioam6_fill_trace_data(skb, ns, trace, true);
                break;
        default:
index ea1dec8448fce8ccf29be650301e937cfce6bd7a..ef815ba583a8f4ed0ca523a13c515f108132a939 100644 (file)
@@ -5332,19 +5332,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
        err_nh = NULL;
        list_for_each_entry(nh, &rt6_nh_list, next) {
                err = __ip6_ins_rt(nh->fib6_info, info, extack);
-               fib6_info_release(nh->fib6_info);
-
-               if (!err) {
-                       /* save reference to last route successfully inserted */
-                       rt_last = nh->fib6_info;
-
-                       /* save reference to first route for notification */
-                       if (!rt_notif)
-                               rt_notif = nh->fib6_info;
-               }
 
-               /* nh->fib6_info is used or freed at this point, reset to NULL*/
-               nh->fib6_info = NULL;
                if (err) {
                        if (replace && nhn)
                                NL_SET_ERR_MSG_MOD(extack,
@@ -5352,6 +5340,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                        err_nh = nh;
                        goto add_errout;
                }
+               /* save reference to last route successfully inserted */
+               rt_last = nh->fib6_info;
+
+               /* save reference to first route for notification */
+               if (!rt_notif)
+                       rt_notif = nh->fib6_info;
 
                /* Because each route is added like a single route we remove
                 * these flags after the first nexthop: if there is a collision,
@@ -5412,8 +5406,7 @@ add_errout:
 
 cleanup:
        list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
-               if (nh->fib6_info)
-                       fib6_info_release(nh->fib6_info);
+               fib6_info_release(nh->fib6_info);
                list_del(&nh->next);
                kfree(nh);
        }
index 29346a6eec9ffed46b00153c4a6cb0295a327ceb..35508abd76f43d771ed7e66f29bc143af4a81977 100644 (file)
@@ -512,22 +512,24 @@ int __init seg6_init(void)
 {
        int err;
 
-       err = genl_register_family(&seg6_genl_family);
+       err = register_pernet_subsys(&ip6_segments_ops);
        if (err)
                goto out;
 
-       err = register_pernet_subsys(&ip6_segments_ops);
+       err = genl_register_family(&seg6_genl_family);
        if (err)
-               goto out_unregister_genl;
+               goto out_unregister_pernet;
 
 #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
        err = seg6_iptunnel_init();
        if (err)
-               goto out_unregister_pernet;
+               goto out_unregister_genl;
 
        err = seg6_local_init();
-       if (err)
-               goto out_unregister_pernet;
+       if (err) {
+               seg6_iptunnel_exit();
+               goto out_unregister_genl;
+       }
 #endif
 
 #ifdef CONFIG_IPV6_SEG6_HMAC
@@ -548,11 +550,11 @@ out_unregister_iptun:
 #endif
 #endif
 #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
-out_unregister_pernet:
-       unregister_pernet_subsys(&ip6_segments_ops);
-#endif
 out_unregister_genl:
        genl_unregister_family(&seg6_genl_family);
+#endif
+out_unregister_pernet:
+       unregister_pernet_subsys(&ip6_segments_ops);
        goto out;
 }
 
index 6334f64f04d5f28c7e01e959d18b343d7c641336..b0b3e9c5af44fdd83b0a108bdeb0f3f6a3ffb85e 100644 (file)
@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
 static LIST_HEAD(iucv_handler_list);
 
 /*
- * iucv_path_table: an array of iucv_path structures.
+ * iucv_path_table: array of pointers to iucv_path structures.
  */
 static struct iucv_path **iucv_path_table;
 static unsigned long iucv_max_pathid;
@@ -544,7 +544,7 @@ static int iucv_enable(void)
 
        cpus_read_lock();
        rc = -ENOMEM;
-       alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+       alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
        iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
        if (!iucv_path_table)
                goto out;
index dd3153966173db09d42de02fa3ad4d44d05620f4..7bf14cf9ffaa967483ac0ee01e3f8e835754cd57 100644 (file)
@@ -627,7 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
 back_from_confirm:
        lock_sock(sk);
-       ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
+       ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
        err = ip6_append_data(sk, ip_generic_getfrag, msg,
                              ulen, transhdrlen, &ipc6,
                              &fl6, (struct rt6_info *)dst,
index d5ea5f5bcf3a069e1d4dc5dd2638275e58aae51f..9d33fd2377c88af8ec38b6e398d103449f3b03b8 100644 (file)
@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
                rcu_read_unlock();
        }
 
-       drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+       if (sta->uploaded)
+               drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
 }
 
 int ieee80211_rate_control_register(const struct rate_control_ops *ops)
index 7a47a58aa54b446acf7451ba6bdc1b834adda327..ceee44ea09d97b025a490058403cf435e3337ef5 100644 (file)
@@ -663,7 +663,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
        spin_unlock_irqrestore(&mns->keys_lock, flags);
 
        if (!tagbits) {
-               kfree(key);
+               mctp_key_unref(key);
                return ERR_PTR(-EBUSY);
        }
 
@@ -888,7 +888,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
                dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
                if (!dev) {
                        rcu_read_unlock();
-                       return rc;
+                       goto out_free;
                }
                rt->dev = __mctp_dev_get(dev);
                rcu_read_unlock();
@@ -903,7 +903,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
                rt->mtu = 0;
 
        } else {
-               return -EINVAL;
+               rc = -EINVAL;
+               goto out_free;
        }
 
        spin_lock_irqsave(&rt->dev->addrs_lock, flags);
@@ -966,12 +967,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
                rc = mctp_do_fragment_route(rt, skb, mtu, tag);
        }
 
+       /* route output functions consume the skb, even on error */
+       skb = NULL;
+
 out_release:
        if (!ext_rt)
                mctp_route_release(rt);
 
        mctp_dev_put(tmp_rt.dev);
 
+out_free:
+       kfree_skb(skb);
        return rc;
 }
 
index a536586742f28c1ddd54c79e62eb56fea267a8fa..7017dd60659dc7133318c1c82e3f429bea3a5d57 100644 (file)
 #include <uapi/linux/mptcp.h>
 #include "protocol.h"
 
-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
 {
        struct mptcp_subflow_context *sf;
        struct nlattr *start;
        u32 flags = 0;
+       bool slow;
        int err;
 
+       if (inet_sk_state_load(sk) == TCP_LISTEN)
+               return 0;
+
        start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
        if (!start)
                return -EMSGSIZE;
 
+       slow = lock_sock_fast(sk);
        rcu_read_lock();
        sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
        if (!sf) {
@@ -63,17 +68,19 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
                        sf->map_data_len) ||
            nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
            nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
-           nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
+           nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
                err = -EMSGSIZE;
                goto nla_failure;
        }
 
        rcu_read_unlock();
+       unlock_sock_fast(sk, slow);
        nla_nest_end(skb, start);
        return 0;
 
 nla_failure:
        rcu_read_unlock();
+       unlock_sock_fast(sk, slow);
        nla_nest_cancel(skb, start);
        return err;
 }
index e3e96a49f92296aed056137a815f0e2a30b8407c..63fc0758c22d45e356d4edadff991b7e88ec8659 100644 (file)
@@ -981,10 +981,10 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
        if (mp_opt->deny_join_id0)
                WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
 
-set_fully_established:
        if (unlikely(!READ_ONCE(msk->pm.server_side)))
                pr_warn_once("bogus mpc option on established client sk");
 
+set_fully_established:
        mptcp_data_lock((struct sock *)msk);
        __mptcp_subflow_fully_established(msk, subflow, mp_opt);
        mptcp_data_unlock((struct sock *)msk);
index 287a60381eae6e39c68d49a65530ea5bdc8a6675..58d17d9604e78fde24795219e53e18646c53b0de 100644 (file)
@@ -396,19 +396,6 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
        }
 }
 
-static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
-                                 const struct mptcp_addr_info *addr)
-{
-       int i;
-
-       for (i = 0; i < nr; i++) {
-               if (addrs[i].id == addr->id)
-                       return true;
-       }
-
-       return false;
-}
-
 /* Fill all the remote addresses into the array addrs[],
  * and return the array size.
  */
@@ -440,18 +427,34 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
                msk->pm.subflows++;
                addrs[i++] = remote;
        } else {
+               DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+
+               /* Forbid creation of new subflows matching existing
+                * ones, possibly already created by incoming ADD_ADDR
+                */
+               bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+               mptcp_for_each_subflow(msk, subflow)
+                       if (READ_ONCE(subflow->local_id) == local->id)
+                               __set_bit(subflow->remote_id, unavail_id);
+
                mptcp_for_each_subflow(msk, subflow) {
                        ssk = mptcp_subflow_tcp_sock(subflow);
                        remote_address((struct sock_common *)ssk, &addrs[i]);
-                       addrs[i].id = subflow->remote_id;
+                       addrs[i].id = READ_ONCE(subflow->remote_id);
                        if (deny_id0 && !addrs[i].id)
                                continue;
 
+                       if (test_bit(addrs[i].id, unavail_id))
+                               continue;
+
                        if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
                                continue;
 
-                       if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
-                           msk->pm.subflows < subflows_max) {
+                       if (msk->pm.subflows < subflows_max) {
+                               /* forbid creating multiple address towards
+                                * this id
+                                */
+                               __set_bit(addrs[i].id, unavail_id);
                                msk->pm.subflows++;
                                i++;
                        }
@@ -799,18 +802,18 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
 
                mptcp_for_each_subflow_safe(msk, subflow, tmp) {
                        struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+                       u8 remote_id = READ_ONCE(subflow->remote_id);
                        int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
-                       u8 id = subflow->local_id;
+                       u8 id = subflow_get_local_id(subflow);
 
-                       if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
+                       if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
                                continue;
                        if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
                                continue;
 
                        pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
                                 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
-                                i, rm_id, subflow->local_id, subflow->remote_id,
-                                msk->mpc_endpoint_id);
+                                i, rm_id, id, remote_id, msk->mpc_endpoint_id);
                        spin_unlock_bh(&msk->pm.lock);
                        mptcp_subflow_shutdown(sk, ssk, how);
 
@@ -901,7 +904,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
 }
 
 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
-                                            struct mptcp_pm_addr_entry *entry)
+                                            struct mptcp_pm_addr_entry *entry,
+                                            bool needs_id)
 {
        struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
        unsigned int addr_max;
@@ -949,7 +953,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
                }
        }
 
-       if (!entry->addr.id) {
+       if (!entry->addr.id && needs_id) {
 find_next:
                entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
                                                    MPTCP_PM_MAX_ADDR_ID + 1,
@@ -960,7 +964,7 @@ find_next:
                }
        }
 
-       if (!entry->addr.id)
+       if (!entry->addr.id && needs_id)
                goto out;
 
        __set_bit(entry->addr.id, pernet->id_bitmap);
@@ -1092,7 +1096,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
        entry->ifindex = 0;
        entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
        entry->lsk = NULL;
-       ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+       ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
        if (ret < 0)
                kfree(entry);
 
@@ -1285,6 +1289,18 @@ next:
        return 0;
 }
 
+static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
+                                     struct genl_info *info)
+{
+       struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
+
+       if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
+                                        mptcp_pm_address_nl_policy, info->extack) &&
+           tb[MPTCP_PM_ADDR_ATTR_ID])
+               return true;
+       return false;
+}
+
 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
@@ -1326,7 +1342,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
                        goto out_free;
                }
        }
-       ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+       ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
+                                               !mptcp_pm_has_addr_attr_id(attr, info));
        if (ret < 0) {
                GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
                goto out_free;
@@ -1980,7 +1997,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
        if (WARN_ON_ONCE(!sf))
                return -EINVAL;
 
-       if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
+       if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
                return -EMSGSIZE;
 
        if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
index 4f3901d5b8ef8bd75759cae5de602c5112355b53..bc97cc30f013abdba076aa93596dd213e9353eb8 100644 (file)
@@ -26,7 +26,8 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
 }
 
 static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
-                                                   struct mptcp_pm_addr_entry *entry)
+                                                   struct mptcp_pm_addr_entry *entry,
+                                                   bool needs_id)
 {
        DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
        struct mptcp_pm_addr_entry *match = NULL;
@@ -41,7 +42,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
        spin_lock_bh(&msk->pm.lock);
        list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
                addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
-               if (addr_match && entry->addr.id == 0)
+               if (addr_match && entry->addr.id == 0 && needs_id)
                        entry->addr.id = e->addr.id;
                id_match = (e->addr.id == entry->addr.id);
                if (addr_match && id_match) {
@@ -64,7 +65,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
                }
 
                *e = *entry;
-               if (!e->addr.id)
+               if (!e->addr.id && needs_id)
                        e->addr.id = find_next_zero_bit(id_bitmap,
                                                        MPTCP_PM_MAX_ADDR_ID + 1,
                                                        1);
@@ -153,7 +154,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
        if (new_entry.addr.port == msk_sport)
                new_entry.addr.port = 0;
 
-       return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
+       return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
 }
 
 int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
@@ -198,7 +199,7 @@ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
                goto announce_err;
        }
 
-       err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
+       err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
        if (err < 0) {
                GENL_SET_ERR_MSG(info, "did not match address and id");
                goto announce_err;
@@ -233,7 +234,7 @@ static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
 
        lock_sock(sk);
        mptcp_for_each_subflow(msk, subflow) {
-               if (subflow->local_id == 0) {
+               if (READ_ONCE(subflow->local_id) == 0) {
                        has_id_0 = true;
                        break;
                }
@@ -378,7 +379,7 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
        }
 
        local.addr = addr_l;
-       err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
+       err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
        if (err < 0) {
                GENL_SET_ERR_MSG(info, "did not match address and id");
                goto create_err;
@@ -494,6 +495,16 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info
                goto destroy_err;
        }
 
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+       if (addr_l.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
+               ipv6_addr_set_v4mapped(addr_l.addr.s_addr, &addr_l.addr6);
+               addr_l.family = AF_INET6;
+       }
+       if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr6)) {
+               ipv6_addr_set_v4mapped(addr_r.addr.s_addr, &addr_r.addr6);
+               addr_r.family = AF_INET6;
+       }
+#endif
        if (addr_l.family != addr_r.family) {
                GENL_SET_ERR_MSG(info, "address families do not match");
                err = -EINVAL;
index 8ef2927ebca297bf60d51fae91732e09562fd496..7833a49f6214a194a282bba92671e9cdd945ad92 100644 (file)
@@ -85,7 +85,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
        subflow->subflow_id = msk->subflow_id++;
 
        /* This is the first subflow, always with id 0 */
-       subflow->local_id_valid = 1;
+       WRITE_ONCE(subflow->local_id, 0);
        mptcp_sock_graft(msk->first, sk->sk_socket);
        iput(SOCK_INODE(ssock));
 
@@ -1260,6 +1260,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                mpext = mptcp_get_ext(skb);
                if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
                        TCP_SKB_CB(skb)->eor = 1;
+                       tcp_mark_push(tcp_sk(ssk), skb);
                        goto alloc_skb;
                }
 
@@ -3177,8 +3178,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
 
        return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
 }
+
+static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
+{
+       const struct ipv6_pinfo *np = inet6_sk(sk);
+       struct ipv6_txoptions *opt;
+       struct ipv6_pinfo *newnp;
+
+       newnp = inet6_sk(newsk);
+
+       rcu_read_lock();
+       opt = rcu_dereference(np->opt);
+       if (opt) {
+               opt = ipv6_dup_options(newsk, opt);
+               if (!opt)
+                       net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
+       }
+       RCU_INIT_POINTER(newnp->opt, opt);
+       rcu_read_unlock();
+}
 #endif
 
+static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
+{
+       struct ip_options_rcu *inet_opt, *newopt = NULL;
+       const struct inet_sock *inet = inet_sk(sk);
+       struct inet_sock *newinet;
+
+       newinet = inet_sk(newsk);
+
+       rcu_read_lock();
+       inet_opt = rcu_dereference(inet->inet_opt);
+       if (inet_opt) {
+               newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
+                                     inet_opt->opt.optlen, GFP_ATOMIC);
+               if (newopt)
+                       memcpy(newopt, inet_opt, sizeof(*inet_opt) +
+                              inet_opt->opt.optlen);
+               else
+                       net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
+       }
+       RCU_INIT_POINTER(newinet->inet_opt, newopt);
+       rcu_read_unlock();
+}
+
 struct sock *mptcp_sk_clone_init(const struct sock *sk,
                                 const struct mptcp_options_received *mp_opt,
                                 struct sock *ssk,
@@ -3199,6 +3242,13 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
 
        __mptcp_init_sock(nsk);
 
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+       if (nsk->sk_family == AF_INET6)
+               mptcp_copy_ip6_options(nsk, sk);
+       else
+#endif
+               mptcp_copy_ip_options(nsk, sk);
+
        msk = mptcp_sk(nsk);
        msk->local_key = subflow_req->local_key;
        msk->token = subflow_req->token;
@@ -3210,7 +3260,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
        msk->write_seq = subflow_req->idsn + 1;
        msk->snd_nxt = msk->write_seq;
        msk->snd_una = msk->write_seq;
-       msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
+       msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
        msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
        mptcp_init_sched(msk, mptcp_sk(sk)->sched);
 
index ed50f2015dc389d035e919a5d509e12899e22687..07f6242afc1ae09d3c17aadfe7bb104eb3cf177c 100644 (file)
@@ -491,10 +491,9 @@ struct mptcp_subflow_context {
                remote_key_valid : 1,        /* received the peer key from */
                disposable : 1,     /* ctx can be free at ulp release time */
                stale : 1,          /* unable to snd/rcv data, do not use for xmit */
-               local_id_valid : 1, /* local_id is correctly initialized */
                valid_csum_seen : 1,        /* at least one csum validated */
                is_mptfo : 1,       /* subflow is doing TFO */
-               __unused : 9;
+               __unused : 10;
        bool    data_avail;
        bool    scheduled;
        u32     remote_nonce;
@@ -505,7 +504,7 @@ struct mptcp_subflow_context {
                u8      hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
                u64     iasn;       /* initial ack sequence number, MPC subflows only */
        };
-       u8      local_id;
+       s16     local_id;           /* if negative not initialized yet */
        u8      remote_id;
        u8      reset_seen:1;
        u8      reset_transient:1;
@@ -556,6 +555,7 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
 {
        memset(&subflow->reset, 0, sizeof(subflow->reset));
        subflow->request_mptcp = 1;
+       WRITE_ONCE(subflow->local_id, -1);
 }
 
 static inline u64
@@ -790,6 +790,16 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
               READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
 }
 
+static inline void mptcp_write_space(struct sock *sk)
+{
+       if (sk_stream_is_writeable(sk)) {
+               /* pairs with memory barrier in mptcp_poll */
+               smp_mb();
+               if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
+                       sk_stream_write_space(sk);
+       }
+}
+
 static inline void __mptcp_sync_sndbuf(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -808,6 +818,7 @@ static inline void __mptcp_sync_sndbuf(struct sock *sk)
 
        /* the msk max wmem limit is <nr_subflows> * tcp wmem[2] */
        WRITE_ONCE(sk->sk_sndbuf, new_sndbuf);
+       mptcp_write_space(sk);
 }
 
 /* The called held both the msk socket and the subflow socket locks,
@@ -838,16 +849,6 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
        local_bh_enable();
 }
 
-static inline void mptcp_write_space(struct sock *sk)
-{
-       if (sk_stream_is_writeable(sk)) {
-               /* pairs with memory barrier in mptcp_poll */
-               smp_mb();
-               if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
-                       sk_stream_write_space(sk);
-       }
-}
-
 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
 
 #define MPTCP_TOKEN_MAX_RETRIES        4
@@ -1022,6 +1023,15 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
 int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
 
+static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
+{
+       int local_id = READ_ONCE(subflow->local_id);
+
+       if (local_id < 0)
+               return 0;
+       return local_id;
+}
+
 void __init mptcp_pm_nl_init(void);
 void mptcp_pm_nl_work(struct mptcp_sock *msk);
 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
index c34ecadee1200a4804ea732df0b3d8a7b4f6e174..71ba86246ff893c5bf65f77802510b52c3d68fd4 100644 (file)
@@ -535,7 +535,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                subflow->backup = mp_opt.backup;
                subflow->thmac = mp_opt.thmac;
                subflow->remote_nonce = mp_opt.nonce;
-               subflow->remote_id = mp_opt.join_id;
+               WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
                pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
                         subflow, subflow->thmac, subflow->remote_nonce,
                         subflow->backup);
@@ -577,8 +577,8 @@ do_reset:
 
 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
 {
-       subflow->local_id = local_id;
-       subflow->local_id_valid = 1;
+       WARN_ON_ONCE(local_id < 0 || local_id > 255);
+       WRITE_ONCE(subflow->local_id, local_id);
 }
 
 static int subflow_chk_local_id(struct sock *sk)
@@ -587,7 +587,7 @@ static int subflow_chk_local_id(struct sock *sk)
        struct mptcp_sock *msk = mptcp_sk(subflow->conn);
        int err;
 
-       if (likely(subflow->local_id_valid))
+       if (likely(subflow->local_id >= 0))
                return 0;
 
        err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
@@ -1567,7 +1567,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
                 remote_token, local_id, remote_id);
        subflow->remote_token = remote_token;
-       subflow->remote_id = remote_id;
+       WRITE_ONCE(subflow->remote_id, remote_id);
        subflow->request_join = 1;
        subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
        subflow->subflow_id = msk->subflow_id++;
@@ -1731,6 +1731,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
        pr_debug("subflow=%p", ctx);
 
        ctx->tcp_sock = sk;
+       WRITE_ONCE(ctx->local_id, -1);
 
        return ctx;
 }
@@ -1966,14 +1967,14 @@ static void subflow_ulp_clone(const struct request_sock *req,
                new_ctx->idsn = subflow_req->idsn;
 
                /* this is the first subflow, id is always 0 */
-               new_ctx->local_id_valid = 1;
+               subflow_set_local_id(new_ctx, 0);
        } else if (subflow_req->mp_join) {
                new_ctx->ssn_offset = subflow_req->ssn_offset;
                new_ctx->mp_join = 1;
                new_ctx->fully_established = 1;
                new_ctx->remote_key_valid = 1;
                new_ctx->backup = subflow_req->backup;
-               new_ctx->remote_id = subflow_req->remote_id;
+               WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
                new_ctx->token = subflow_req->token;
                new_ctx->thmac = subflow_req->thmac;
 
index 2e5f3864d353a39cfde138b725e790d7290b82c9..5b876fa7f9af9e5dfe950929b29f0fc92daf9bab 100644 (file)
@@ -2756,6 +2756,7 @@ static const struct nf_ct_hook nf_conntrack_hook = {
        .get_tuple_skb  = nf_conntrack_get_tuple_skb,
        .attach         = nf_conntrack_attach,
        .set_closing    = nf_conntrack_set_closing,
+       .confirm        = __nf_conntrack_confirm,
 };
 
 void nf_conntrack_init_end(void)
index e697a824b0018e1f1e26e3d547c1e80c6ca49e39..540d97715bd23d6f53f29fc7df39f09cd6b2f5c0 100644 (file)
@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        /* Get fields bitmap */
        if (nf_h323_error_boundary(bs, 0, f->sz))
                return H323_ERROR_BOUND;
+       if (f->sz > 32)
+               return H323_ERROR_RANGE;
        bmp = get_bitmap(bs, f->sz);
        if (base)
                *(unsigned int *)base = bmp;
@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        bmp2_len = get_bits(bs, 7) + 1;
        if (nf_h323_error_boundary(bs, 0, bmp2_len))
                return H323_ERROR_BOUND;
+       if (bmp2_len > 32)
+               return H323_ERROR_RANGE;
        bmp2 = get_bitmap(bs, bmp2_len);
        bmp |= bmp2 >> f->sz;
        if (base)
index 920a5a29ae1dceba6849aaad6d62701567d3ec99..a0571339239c40ded96c4a9466d53d5de2887ed5 100644 (file)
@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
        return 0;
 }
 
+static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
+                                            enum flow_offload_tuple_dir dir)
+{
+       struct dst_entry *dst = route->tuple[dir].dst;
+
+       route->tuple[dir].dst = NULL;
+
+       return dst;
+}
+
 static int flow_offload_fill_route(struct flow_offload *flow,
-                                  const struct nf_flow_route *route,
+                                  struct nf_flow_route *route,
                                   enum flow_offload_tuple_dir dir)
 {
        struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
-       struct dst_entry *dst = route->tuple[dir].dst;
+       struct dst_entry *dst = nft_route_dst_fetch(route, dir);
        int i, j = 0;
 
        switch (flow_tuple->l3proto) {
@@ -122,6 +132,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
                       ETH_ALEN);
                flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
                flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
+               dst_release(dst);
                break;
        case FLOW_OFFLOAD_XMIT_XFRM:
        case FLOW_OFFLOAD_XMIT_NEIGH:
@@ -146,7 +157,7 @@ static void nft_flow_dst_release(struct flow_offload *flow,
 }
 
 void flow_offload_route_init(struct flow_offload *flow,
-                           const struct nf_flow_route *route)
+                            struct nf_flow_route *route)
 {
        flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
        flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
index f8e3f70c35bd558aec8a2b2149a7ff7db0eb3d1f..1683dc196b5921da91c8dd81b99ac9106c7493fe 100644 (file)
@@ -684,15 +684,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
        return err;
 }
 
-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
-                                  struct nft_flowtable *flowtable)
+static struct nft_trans *
+nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+                       struct nft_flowtable *flowtable)
 {
        struct nft_trans *trans;
 
        trans = nft_trans_alloc(ctx, msg_type,
                                sizeof(struct nft_trans_flowtable));
        if (trans == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        if (msg_type == NFT_MSG_NEWFLOWTABLE)
                nft_activate_next(ctx->net, flowtable);
@@ -701,22 +702,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_commit_list_add_tail(ctx->net, trans);
 
-       return 0;
+       return trans;
 }
 
 static int nft_delflowtable(struct nft_ctx *ctx,
                            struct nft_flowtable *flowtable)
 {
-       int err;
+       struct nft_trans *trans;
 
-       err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
-       if (err < 0)
-               return err;
+       trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
        nft_deactivate_next(ctx->net, flowtable);
        nft_use_dec(&ctx->table->use);
 
-       return err;
+       return 0;
 }
 
 static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
@@ -1251,6 +1252,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        return 0;
 
 err_register_hooks:
+       ctx->table->flags |= NFT_TABLE_F_DORMANT;
        nft_trans_destroy(trans);
        return ret;
 }
@@ -2080,7 +2082,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
        struct nft_hook *hook;
        int err;
 
-       hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
+       hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
        if (!hook) {
                err = -ENOMEM;
                goto err_hook_alloc;
@@ -2503,19 +2505,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        RCU_INIT_POINTER(chain->blob_gen_0, blob);
        RCU_INIT_POINTER(chain->blob_gen_1, blob);
 
-       err = nf_tables_register_hook(net, table, chain);
-       if (err < 0)
-               goto err_destroy_chain;
-
        if (!nft_use_inc(&table->use)) {
                err = -EMFILE;
-               goto err_use;
+               goto err_destroy_chain;
        }
 
        trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-               goto err_unregister_hook;
+               goto err_trans;
        }
 
        nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
@@ -2523,17 +2521,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                nft_trans_chain_policy(trans) = policy;
 
        err = nft_chain_add(table, chain);
-       if (err < 0) {
-               nft_trans_destroy(trans);
-               goto err_unregister_hook;
-       }
+       if (err < 0)
+               goto err_chain_add;
+
+       /* This must be LAST to ensure no packets are walking over this chain. */
+       err = nf_tables_register_hook(net, table, chain);
+       if (err < 0)
+               goto err_register_hook;
 
        return 0;
 
-err_unregister_hook:
+err_register_hook:
+       nft_chain_del(chain);
+err_chain_add:
+       nft_trans_destroy(trans);
+err_trans:
        nft_use_dec_restore(&table->use);
-err_use:
-       nf_tables_unregister_hook(net, table, chain);
 err_destroy_chain:
        nf_tables_chain_destroy(ctx);
 
@@ -4998,6 +5001,12 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
                             (NFT_SET_EVAL | NFT_SET_OBJECT))
                        return -EOPNOTSUPP;
+               if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
+                            (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
+                       return -EOPNOTSUPP;
+               if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
+                            (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
+                       return -EOPNOTSUPP;
        }
 
        desc.dtype = 0;
@@ -5421,6 +5430,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
 
        if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
                list_del_rcu(&set->list);
+               set->dead = 1;
                if (event)
                        nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
                                             GFP_KERNEL);
@@ -8455,9 +8465,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
        u8 family = info->nfmsg->nfgen_family;
        const struct nf_flowtable_type *type;
        struct nft_flowtable *flowtable;
-       struct nft_hook *hook, *next;
        struct net *net = info->net;
        struct nft_table *table;
+       struct nft_trans *trans;
        struct nft_ctx ctx;
        int err;
 
@@ -8537,34 +8547,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
        err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
                                       extack, true);
        if (err < 0)
-               goto err4;
+               goto err_flowtable_parse_hooks;
 
        list_splice(&flowtable_hook.list, &flowtable->hook_list);
        flowtable->data.priority = flowtable_hook.priority;
        flowtable->hooknum = flowtable_hook.num;
 
+       trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+       if (IS_ERR(trans)) {
+               err = PTR_ERR(trans);
+               goto err_flowtable_trans;
+       }
+
+       /* This must be LAST to ensure no packets are walking over this flowtable. */
        err = nft_register_flowtable_net_hooks(ctx.net, table,
                                               &flowtable->hook_list,
                                               flowtable);
-       if (err < 0) {
-               nft_hooks_destroy(&flowtable->hook_list);
-               goto err4;
-       }
-
-       err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
        if (err < 0)
-               goto err5;
+               goto err_flowtable_hooks;
 
        list_add_tail_rcu(&flowtable->list, &table->flowtables);
 
        return 0;
-err5:
-       list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
-               nft_unregister_flowtable_hook(net, flowtable, hook);
-               list_del_rcu(&hook->list);
-               kfree_rcu(hook, rcu);
-       }
-err4:
+
+err_flowtable_hooks:
+       nft_trans_destroy(trans);
+err_flowtable_trans:
+       nft_hooks_destroy(&flowtable->hook_list);
+err_flowtable_parse_hooks:
        flowtable->data.type->free(&flowtable->data);
 err3:
        module_put(type->owner);
index 1f9474fefe84923e7769efd8ea5c703f5783e37d..d3d11dede54507262022725a5e54a12f0def7f89 100644 (file)
@@ -359,10 +359,20 @@ static int nft_target_validate(const struct nft_ctx *ctx,
 
        if (ctx->family != NFPROTO_IPV4 &&
            ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET &&
            ctx->family != NFPROTO_BRIDGE &&
            ctx->family != NFPROTO_ARP)
                return -EOPNOTSUPP;
 
+       ret = nft_chain_validate_hooks(ctx->chain,
+                                      (1 << NF_INET_PRE_ROUTING) |
+                                      (1 << NF_INET_LOCAL_IN) |
+                                      (1 << NF_INET_FORWARD) |
+                                      (1 << NF_INET_LOCAL_OUT) |
+                                      (1 << NF_INET_POST_ROUTING));
+       if (ret)
+               return ret;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
@@ -610,10 +620,20 @@ static int nft_match_validate(const struct nft_ctx *ctx,
 
        if (ctx->family != NFPROTO_IPV4 &&
            ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET &&
            ctx->family != NFPROTO_BRIDGE &&
            ctx->family != NFPROTO_ARP)
                return -EOPNOTSUPP;
 
+       ret = nft_chain_validate_hooks(ctx->chain,
+                                      (1 << NF_INET_PRE_ROUTING) |
+                                      (1 << NF_INET_LOCAL_IN) |
+                                      (1 << NF_INET_FORWARD) |
+                                      (1 << NF_INET_LOCAL_OUT) |
+                                      (1 << NF_INET_POST_ROUTING));
+       if (ret)
+               return ret;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
index bfd3e5a14dab68484469bdba71af37a460822549..255640013ab84542b76026e9fc4ae4a2f61b2c99 100644 (file)
@@ -1256,14 +1256,13 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
        switch (priv->l3num) {
        case NFPROTO_IPV4:
        case NFPROTO_IPV6:
-               if (priv->l3num != ctx->family)
-                       return -EINVAL;
+               if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
+                       break;
 
-               fallthrough;
-       case NFPROTO_INET:
-               break;
+               return -EINVAL;
+       case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
        default:
-               return -EOPNOTSUPP;
+               return -EAFNOSUPPORT;
        }
 
        priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
index 9c962347cf859f16fc76e4d8a2fd22cdb3d142d6..ff315351269fe643073bb2984485b3a76566b1c8 100644 (file)
@@ -167,7 +167,7 @@ static inline u32 netlink_group_mask(u32 group)
 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
                                           gfp_t gfp_mask)
 {
-       unsigned int len = skb_end_offset(skb);
+       unsigned int len = skb->len;
        struct sk_buff *new;
 
        new = alloc_skb(len, gfp_mask);
index 0eed00184adf454d2e06bb44330c079a402a959e..104a80b75477f60199c69811ff48e65af293297c 100644 (file)
@@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
        nr_init_timers(sk);
 
        nr->t1     =
-               msecs_to_jiffies(sysctl_netrom_transport_timeout);
+               msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
        nr->t2     =
-               msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
+               msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
        nr->n2     =
-               msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
+               msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
        nr->t4     =
-               msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
+               msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
        nr->idle   =
-               msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
-       nr->window = sysctl_netrom_transport_requested_window_size;
+               msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
+       nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
 
        nr->bpqext = 1;
        nr->state  = NR_STATE_0;
@@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
                 * G8PZT's Xrouter which is sending packets with command type 7
                 * as an extension of the protocol.
                 */
-               if (sysctl_netrom_reset_circuit &&
+               if (READ_ONCE(sysctl_netrom_reset_circuit) &&
                    (frametype != NR_RESET || flags != 0))
                        nr_transmit_reset(skb, 1);
 
index 3aaac4a22b38763cd855e494b2c19fdf3bcbd54e..2c34389c3ce6f16acf669fa14c7b38a7d63dda4b 100644 (file)
@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
        buff[6] |= AX25_SSSID_SPARE;
        buff    += AX25_ADDR_LEN;
 
-       *buff++ = sysctl_netrom_network_ttl_initialiser;
+       *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
 
        *buff++ = NR_PROTO_IP;
        *buff++ = NR_PROTO_IP;
index 2f084b6f69d7e05b51d1673157e8e72f3b9e2635..97944db6b5ac64387d8c8d53dc551bb11f1789f1 100644 (file)
@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
                break;
 
        case NR_RESET:
-               if (sysctl_netrom_reset_circuit)
+               if (READ_ONCE(sysctl_netrom_reset_circuit))
                        nr_disconnect(sk, ECONNRESET);
                break;
 
@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
                break;
 
        case NR_RESET:
-               if (sysctl_netrom_reset_circuit)
+               if (READ_ONCE(sysctl_netrom_reset_circuit))
                        nr_disconnect(sk, ECONNRESET);
                break;
 
@@ -262,7 +262,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
                break;
 
        case NR_RESET:
-               if (sysctl_netrom_reset_circuit)
+               if (READ_ONCE(sysctl_netrom_reset_circuit))
                        nr_disconnect(sk, ECONNRESET);
                break;
 
index 44929657f5b717de639c13334f17f42f652a78cc..5e531394a724b7f919f22ae2be42c8feaafdc22e 100644 (file)
@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
        dptr[6] |= AX25_SSSID_SPARE;
        dptr += AX25_ADDR_LEN;
 
-       *dptr++ = sysctl_netrom_network_ttl_initialiser;
+       *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
 
        if (!nr_route_frame(skb, NULL)) {
                kfree_skb(skb);
index baea3cbd76ca5bb5803974a75c3f10f87cc80140..70480869ad1c566a8ab8a28c0d39bdae056ec596 100644 (file)
@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
                nr_neigh->digipeat = NULL;
                nr_neigh->ax25     = NULL;
                nr_neigh->dev      = dev;
-               nr_neigh->quality  = sysctl_netrom_default_path_quality;
+               nr_neigh->quality  = READ_ONCE(sysctl_netrom_default_path_quality);
                nr_neigh->locked   = 0;
                nr_neigh->count    = 0;
                nr_neigh->number   = nr_neigh_no++;
@@ -728,7 +728,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
        nr_neigh->ax25 = NULL;
        ax25_cb_put(ax25);
 
-       if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
+       if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
                nr_neigh_put(nr_neigh);
                return;
        }
@@ -766,7 +766,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
        if (ax25 != NULL) {
                ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
                                  ax25->ax25_dev->dev, 0,
-                                 sysctl_netrom_obsolescence_count_initialiser);
+                                 READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
                if (ret)
                        return ret;
        }
@@ -780,7 +780,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
                return ret;
        }
 
-       if (!sysctl_netrom_routing_control && ax25 != NULL)
+       if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
                return 0;
 
        /* Its Time-To-Live has expired */
index e2d2af924cff4a4103e59e04a6efe69c6fcca23e..c3bbd5880850bb047c7375e2c1b8acf6e80e0231 100644 (file)
@@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
                *dptr++ = nr->my_id;
                *dptr++ = frametype;
                *dptr++ = nr->window;
-               if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
+               if (nr->bpqext)
+                       *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
                break;
 
        case NR_DISCREQ:
@@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
        dptr[6] |= AX25_SSSID_SPARE;
        dptr += AX25_ADDR_LEN;
 
-       *dptr++ = sysctl_netrom_network_ttl_initialiser;
+       *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
 
        if (mine) {
                *dptr++ = 0;
index 3aa50dc7535b7761c77652d2f38826419b57c26a..976fe250b50955ec51b0c5d73f2dfa132990b60b 100644 (file)
@@ -34,10 +34,10 @@ static int pn_ioctl(struct sock *sk, int cmd, int *karg)
 
        switch (cmd) {
        case SIOCINQ:
-               lock_sock(sk);
+               spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                *karg = skb ? skb->len : 0;
-               release_sock(sk);
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
                return 0;
 
        case SIOCPNADDRESOURCE:
index faba31f2eff2903bee7082b295f137ff848a1e10..3dd5f52bc1b58e3f1ee4e235126438c723f1f73c 100644 (file)
@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
        return 0;
 }
 
+static unsigned int pep_first_packet_length(struct sock *sk)
+{
+       struct pep_sock *pn = pep_sk(sk);
+       struct sk_buff_head *q;
+       struct sk_buff *skb;
+       unsigned int len = 0;
+       bool found = false;
+
+       if (sock_flag(sk, SOCK_URGINLINE)) {
+               q = &pn->ctrlreq_queue;
+               spin_lock_bh(&q->lock);
+               skb = skb_peek(q);
+               if (skb) {
+                       len = skb->len;
+                       found = true;
+               }
+               spin_unlock_bh(&q->lock);
+       }
+
+       if (likely(!found)) {
+               q = &sk->sk_receive_queue;
+               spin_lock_bh(&q->lock);
+               skb = skb_peek(q);
+               if (skb)
+                       len = skb->len;
+               spin_unlock_bh(&q->lock);
+       }
+
+       return len;
+}
+
 static int pep_ioctl(struct sock *sk, int cmd, int *karg)
 {
        struct pep_sock *pn = pep_sk(sk);
@@ -929,15 +960,7 @@ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
                        break;
                }
 
-               lock_sock(sk);
-               if (sock_flag(sk, SOCK_URGINLINE) &&
-                   !skb_queue_empty(&pn->ctrlreq_queue))
-                       *karg = skb_peek(&pn->ctrlreq_queue)->len;
-               else if (!skb_queue_empty(&sk->sk_receive_queue))
-                       *karg = skb_peek(&sk->sk_receive_queue)->len;
-               else
-                       *karg = 0;
-               release_sock(sk);
+               *karg = pep_first_packet_length(sk);
                ret = 0;
                break;
 
index fba82d36593add3317e89104aabbd69a0281cb33..a4e3c5de998be4c756cb0dc423ee9a7e7fa3e1a9 100644 (file)
@@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
                        kfree(sg);
                }
                ret = PTR_ERR(trans_private);
+               /* Trigger connection so that its ready for the next retry */
+               if (ret == -ENODEV)
+                       rds_conn_connect_if_down(cp->cp_conn);
                goto out;
        }
 
index 5e57a1581dc60571406e2faeeeba63af1e8aa29c..2899def23865fa47ce55faa7c7eb72fb52bc432b 100644 (file)
@@ -1313,12 +1313,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* Parse any control messages the user may have included. */
        ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
-       if (ret) {
-               /* Trigger connection so that its ready for the next retry */
-               if (ret ==  -EAGAIN)
-                       rds_conn_connect_if_down(conn);
+       if (ret)
                goto out;
-       }
 
        if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
                printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
index 0a1a9e40f237012ecaa561bd563162bbc1802f9b..6faa7d00da09771ae130581604c3b14c50472966 100644 (file)
@@ -232,18 +232,14 @@ release_idr:
        return err;
 }
 
-static bool is_mirred_nested(void)
-{
-       return unlikely(__this_cpu_read(mirred_nest_level) > 1);
-}
-
-static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+static int
+tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
 {
        int err;
 
        if (!want_ingress)
                err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
-       else if (is_mirred_nested())
+       else if (!at_ingress)
                err = netif_rx(skb);
        else
                err = netif_receive_skb(skb);
@@ -270,8 +266,7 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
        if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
                net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
                                       dev->name);
-               err = -ENODEV;
-               goto out;
+               goto err_cant_do;
        }
 
        /* we could easily avoid the clone only if called by ingress and clsact;
@@ -283,10 +278,8 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
                tcf_mirred_can_reinsert(retval);
        if (!dont_clone) {
                skb_to_send = skb_clone(skb, GFP_ATOMIC);
-               if (!skb_to_send) {
-                       err =  -ENOMEM;
-                       goto out;
-               }
+               if (!skb_to_send)
+                       goto err_cant_do;
        }
 
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
@@ -319,19 +312,20 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
 
                skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
 
-               err = tcf_mirred_forward(want_ingress, skb_to_send);
+               err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
        } else {
-               err = tcf_mirred_forward(want_ingress, skb_to_send);
+               err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
        }
-
-       if (err) {
-out:
+       if (err)
                tcf_action_inc_overlimit_qstats(&m->common);
-               if (is_redirect)
-                       retval = TC_ACT_SHOT;
-       }
 
        return retval;
+
+err_cant_do:
+       if (is_redirect)
+               retval = TC_ACT_SHOT;
+       tcf_action_inc_overlimit_qstats(&m->common);
+       return retval;
 }
 
 static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
index efb9d2811b73d18862f824b0b7a8b4e6b905271d..6ee7064c82fcc3bdb7596e2ad8fe33bc6456102d 100644 (file)
@@ -2460,8 +2460,11 @@ unbind_filter:
        }
 
 errout_idr:
-       if (!fold)
+       if (!fold) {
+               spin_lock(&tp->lock);
                idr_remove(&head->handle_idr, fnew->handle);
+               spin_unlock(&tp->lock);
+       }
        __fl_put(fnew);
 errout_tb:
        kfree(tb);
index 5b045284849e03151b172cf55248492aed2b3472..c9189a970eec317745a06c27064f504a6ff2e3d2 100644 (file)
 #include <linux/rtnetlink.h>
 #include <net/switchdev.h>
 
+static bool switchdev_obj_eq(const struct switchdev_obj *a,
+                            const struct switchdev_obj *b)
+{
+       const struct switchdev_obj_port_vlan *va, *vb;
+       const struct switchdev_obj_port_mdb *ma, *mb;
+
+       if (a->id != b->id || a->orig_dev != b->orig_dev)
+               return false;
+
+       switch (a->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               va = SWITCHDEV_OBJ_PORT_VLAN(a);
+               vb = SWITCHDEV_OBJ_PORT_VLAN(b);
+               return va->flags == vb->flags &&
+                       va->vid == vb->vid &&
+                       va->changed == vb->changed;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
+       case SWITCHDEV_OBJ_ID_HOST_MDB:
+               ma = SWITCHDEV_OBJ_PORT_MDB(a);
+               mb = SWITCHDEV_OBJ_PORT_MDB(b);
+               return ma->vid == mb->vid &&
+                       ether_addr_equal(ma->addr, mb->addr);
+       default:
+               break;
+       }
+
+       BUG();
+}
+
 static LIST_HEAD(deferred);
 static DEFINE_SPINLOCK(deferred_lock);
 
@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 
+/**
+ *     switchdev_port_obj_act_is_deferred - Is object action pending?
+ *
+ *     @dev: port device
+ *     @nt: type of action; add or delete
+ *     @obj: object to test
+ *
+ *     Returns true if a deferred item is pending, which is
+ *     equivalent to the action @nt on an object @obj.
+ *
+ *     rtnl_lock must be held.
+ */
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+                                       enum switchdev_notifier_type nt,
+                                       const struct switchdev_obj *obj)
+{
+       struct switchdev_deferred_item *dfitem;
+       bool found = false;
+
+       ASSERT_RTNL();
+
+       spin_lock_bh(&deferred_lock);
+
+       list_for_each_entry(dfitem, &deferred, list) {
+               if (dfitem->dev != dev)
+                       continue;
+
+               if ((dfitem->func == switchdev_port_obj_add_deferred &&
+                    nt == SWITCHDEV_PORT_OBJ_ADD) ||
+                   (dfitem->func == switchdev_port_obj_del_deferred &&
+                    nt == SWITCHDEV_PORT_OBJ_DEL)) {
+                       if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
+       spin_unlock_bh(&deferred_lock);
+
+       return found;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
+
 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
 
index 1c2c6800949dd4c2800f76326b7500f9743c4720..b4674f03d71a9fb9a5526555d7aca9b9cc5e665c 100644 (file)
@@ -1003,7 +1003,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
        return 0;
 }
 
-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
+static int tls_get_info(struct sock *sk, struct sk_buff *skb)
 {
        u16 version, cipher_type;
        struct tls_context *ctx;
index 9fbc70200cd0f9057b2e34ea3170c001fc4f972c..211f57164cb611fd2665f682906be96aa35463ed 100644 (file)
@@ -52,6 +52,7 @@ struct tls_decrypt_arg {
        struct_group(inargs,
        bool zc;
        bool async;
+       bool async_done;
        u8 tail;
        );
 
@@ -274,22 +275,30 @@ static int tls_do_decryption(struct sock *sk,
                DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
                atomic_inc(&ctx->decrypt_pending);
        } else {
+               DECLARE_CRYPTO_WAIT(wait);
+
                aead_request_set_callback(aead_req,
                                          CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                         crypto_req_done, &ctx->async_wait);
+                                         crypto_req_done, &wait);
+               ret = crypto_aead_decrypt(aead_req);
+               if (ret == -EINPROGRESS || ret == -EBUSY)
+                       ret = crypto_wait_req(ret, &wait);
+               return ret;
        }
 
        ret = crypto_aead_decrypt(aead_req);
+       if (ret == -EINPROGRESS)
+               return 0;
+
        if (ret == -EBUSY) {
                ret = tls_decrypt_async_wait(ctx);
-               ret = ret ?: -EINPROGRESS;
+               darg->async_done = true;
+               /* all completions have run, we're not doing async anymore */
+               darg->async = false;
+               return ret;
        }
-       if (ret == -EINPROGRESS) {
-               if (darg->async)
-                       return 0;
 
-               ret = crypto_wait_req(ret, &ctx->async_wait);
-       }
+       atomic_dec(&ctx->decrypt_pending);
        darg->async = false;
 
        return ret;
@@ -1588,8 +1597,11 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
        /* Prepare and submit AEAD request */
        err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
                                data_len + prot->tail_size, aead_req, darg);
-       if (err)
+       if (err) {
+               if (darg->async_done)
+                       goto exit_free_skb;
                goto exit_free_pages;
+       }
 
        darg->skb = clear_skb ?: tls_strp_msg(ctx);
        clear_skb = NULL;
@@ -1601,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
                return err;
        }
 
+       if (unlikely(darg->async_done))
+               return 0;
+
        if (prot->tail_size)
                darg->tail = dctx->tail;
 
@@ -1772,7 +1787,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
                           u8 *control,
                           size_t skip,
                           size_t len,
-                          bool is_peek)
+                          bool is_peek,
+                          bool *more)
 {
        struct sk_buff *skb = skb_peek(&ctx->rx_list);
        struct tls_msg *tlm;
@@ -1785,7 +1801,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
 
                err = tls_record_content_type(msg, tlm, control);
                if (err <= 0)
-                       goto out;
+                       goto more;
 
                if (skip < rxm->full_len)
                        break;
@@ -1803,12 +1819,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
 
                err = tls_record_content_type(msg, tlm, control);
                if (err <= 0)
-                       goto out;
+                       goto more;
 
                err = skb_copy_datagram_msg(skb, rxm->offset + skip,
                                            msg, chunk);
                if (err < 0)
-                       goto out;
+                       goto more;
 
                len = len - chunk;
                copied = copied + chunk;
@@ -1844,6 +1860,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
 
 out:
        return copied ? : err;
+more:
+       if (more)
+               *more = true;
+       goto out;
 }
 
 static bool
@@ -1943,10 +1963,12 @@ int tls_sw_recvmsg(struct sock *sk,
        struct strp_msg *rxm;
        struct tls_msg *tlm;
        ssize_t copied = 0;
+       ssize_t peeked = 0;
        bool async = false;
        int target, err;
        bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        bool is_peek = flags & MSG_PEEK;
+       bool rx_more = false;
        bool released = true;
        bool bpf_strp_enabled;
        bool zc_capable;
@@ -1966,12 +1988,12 @@ int tls_sw_recvmsg(struct sock *sk,
                goto end;
 
        /* Process pending decrypted records. It must be non-zero-copy */
-       err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
+       err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
        if (err < 0)
                goto end;
 
        copied = err;
-       if (len <= copied)
+       if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
                goto end;
 
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
@@ -2064,6 +2086,8 @@ put_on_rx_list:
                                decrypted += chunk;
                                len -= chunk;
                                __skb_queue_tail(&ctx->rx_list, skb);
+                               if (unlikely(control != TLS_RECORD_TYPE_DATA))
+                                       break;
                                continue;
                        }
 
@@ -2087,8 +2111,10 @@ put_on_rx_list:
                        if (err < 0)
                                goto put_on_rx_list_err;
 
-                       if (is_peek)
+                       if (is_peek) {
+                               peeked += chunk;
                                goto put_on_rx_list;
+                       }
 
                        if (partially_consumed) {
                                rxm->offset += chunk;
@@ -2127,11 +2153,11 @@ recv_end:
 
                /* Drain records from the rx_list & copy if required */
                if (is_peek || is_kvec)
-                       err = process_rx_list(ctx, msg, &control, copied,
-                                             decrypted, is_peek);
+                       err = process_rx_list(ctx, msg, &control, copied + peeked,
+                                             decrypted - peeked, is_peek, NULL);
                else
                        err = process_rx_list(ctx, msg, &control, 0,
-                                             async_copy_bytes, is_peek);
+                                             async_copy_bytes, is_peek, NULL);
        }
 
        copied += decrypted;
index 30b178ebba60aa810e8442a326a14edcee071061..0748e7ea5210e7d597acf87fc6caf1ea2156562e 100644 (file)
@@ -782,19 +782,6 @@ static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
                                  int);
 
-static int unix_set_peek_off(struct sock *sk, int val)
-{
-       struct unix_sock *u = unix_sk(sk);
-
-       if (mutex_lock_interruptible(&u->iolock))
-               return -EINTR;
-
-       WRITE_ONCE(sk->sk_peek_off, val);
-       mutex_unlock(&u->iolock);
-
-       return 0;
-}
-
 #ifdef CONFIG_PROC_FS
 static int unix_count_nr_fds(struct sock *sk)
 {
@@ -862,7 +849,7 @@ static const struct proto_ops unix_stream_ops = {
        .read_skb =     unix_stream_read_skb,
        .mmap =         sock_no_mmap,
        .splice_read =  unix_stream_splice_read,
-       .set_peek_off = unix_set_peek_off,
+       .set_peek_off = sk_set_peek_off,
        .show_fdinfo =  unix_show_fdinfo,
 };
 
@@ -886,7 +873,7 @@ static const struct proto_ops unix_dgram_ops = {
        .read_skb =     unix_read_skb,
        .recvmsg =      unix_dgram_recvmsg,
        .mmap =         sock_no_mmap,
-       .set_peek_off = unix_set_peek_off,
+       .set_peek_off = sk_set_peek_off,
        .show_fdinfo =  unix_show_fdinfo,
 };
 
@@ -909,7 +896,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .sendmsg =      unix_seqpacket_sendmsg,
        .recvmsg =      unix_seqpacket_recvmsg,
        .mmap =         sock_no_mmap,
-       .set_peek_off = unix_set_peek_off,
+       .set_peek_off = sk_set_peek_off,
        .show_fdinfo =  unix_show_fdinfo,
 };
 
index 2ff7ddbaa782e341e1614a4d5bd295c87664e7dd..2a81880dac7b7b464b5ae9443fa3b2863cd76471 100644 (file)
@@ -284,9 +284,17 @@ void unix_gc(void)
         * which are creating the cycle(s).
         */
        skb_queue_head_init(&hitlist);
-       list_for_each_entry(u, &gc_candidates, link)
+       list_for_each_entry(u, &gc_candidates, link) {
                scan_children(&u->sk, inc_inflight, &hitlist);
 
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+               if (u->oob_skb) {
+                       kfree_skb(u->oob_skb);
+                       u->oob_skb = NULL;
+               }
+#endif
+       }
+
        /* not_cycle_list contains those sockets which do not make up a
         * cycle.  Restore these to the inflight list.
         */
@@ -314,18 +322,6 @@ void unix_gc(void)
        /* Here we are. Hitlist is filled. Die. */
        __skb_queue_purge(&hitlist);
 
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
-       while (!list_empty(&gc_candidates)) {
-               u = list_entry(gc_candidates.next, struct unix_sock, link);
-               if (u->oob_skb) {
-                       struct sk_buff *skb = u->oob_skb;
-
-                       u->oob_skb = NULL;
-                       kfree_skb(skb);
-               }
-       }
-#endif
-
        spin_lock(&unix_gc_lock);
 
        /* There could be io_uring registered files, just push them back to
index b09700400d09744ee1b0c990e46806264df25e3b..bd54a928bab4120134711f54e677cb1f60c4ba7b 100644 (file)
@@ -4197,6 +4197,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
 
                if (ntype != NL80211_IFTYPE_MESH_POINT)
                        return -EINVAL;
+               if (otype != NL80211_IFTYPE_MESH_POINT)
+                       return -EINVAL;
                if (netif_running(dev))
                        return -EBUSY;
 
index 1eadfac03cc41d35709c001a77759a23f7dbdc39..b78c0e095e221fd775b9e1eafa4dd15485915079 100644 (file)
@@ -722,7 +722,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
                        memcpy(vaddr, buffer, len);
                        kunmap_local(vaddr);
 
-                       skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
+                       skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
+                       refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
                }
 
                if (first_frag && desc->options & XDP_TX_METADATA) {
index 3784534c918552dc2db6d84b4ba00e1337d63b74..653e51ae39648da177b84c82881932e9987eaa99 100644 (file)
@@ -407,7 +407,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
        struct net_device *dev = x->xso.dev;
 
-       if (!x->type_offload || x->encap)
+       if (!x->type_offload)
                return false;
 
        if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
index 662c83beb345ed2037d146c976180b1b62c26794..e5722c95b8bb38c528cc518cdc3a05e08a338264 100644 (file)
@@ -704,9 +704,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
        struct xfrm_state *x = skb_dst(skb)->xfrm;
+       int family;
        int err;
 
-       switch (x->outer_mode.family) {
+       family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
+               : skb_dst(skb)->ops->family;
+
+       switch (family) {
        case AF_INET:
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
index 1b7e75159727791ef5ed03299729711ed775a16e..da6ecc6b3e153db74765a500afe3b4a255fdba44 100644 (file)
@@ -2694,7 +2694,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                        if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
                                mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
-                       family = xfrm[i]->props.family;
+                       if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+                               family = xfrm[i]->props.family;
+
                        oif = fl->flowi_oif ? : fl->flowi_l3mdev;
                        dst = xfrm_dst_lookup(xfrm[i], tos, oif,
                                              &saddr, &daddr, family, mark);
@@ -3416,7 +3418,7 @@ decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reve
        }
 
        fl4->flowi4_proto = flkeys->basic.ip_proto;
-       fl4->flowi4_tos = flkeys->ip.tos;
+       fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
index f037be190baeacf8a7fc4c26240dd224a39cb984..912c1189ba41c1cdca51f0212f7ea1ae293f9370 100644 (file)
@@ -2017,6 +2017,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
        if (xp->xfrm_nr == 0)
                return 0;
 
+       if (xp->xfrm_nr > XFRM_MAX_DEPTH)
+               return -ENOBUFS;
+
        for (i = 0; i < xp->xfrm_nr; i++) {
                struct xfrm_user_tmpl *up = &vec[i];
                struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
index 5a84b6443875c47013348bfed85ebefe8c6da4db..3ee8ecfb8c044c3bf65461e81af5a9e95391fa44 100644 (file)
@@ -33,7 +33,7 @@ ld-option = $(success,$(LD) -v $(1))
 
 # $(as-instr,<instr>)
 # Return y if the assembler supports <instr>, n otherwise
-as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler-with-cpp -o /dev/null -)
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -)
 
 # check if $(CC) and $(LD) exist
 $(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
index 8fcb427405a6f17f61655a6d0881c433f22e1dd6..92be0c9a13eeb51beca06abe15bfe22c6e72bfcb 100644 (file)
@@ -38,7 +38,7 @@ as-option = $(call try-run,\
 # Usage: aflags-y += $(call as-instr,instr,option1,option2)
 
 as-instr = $(call try-run,\
-       printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
+       printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
 
 # __cc-option
 # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
index 61b7dddedc461e2ece91a7b25bcf14987fc98886..0669bac5e900e134c45a025697bae3b6251c09b1 100755 (executable)
@@ -513,7 +513,7 @@ eBPF programs can have an associated license, passed along with the bytecode
 instructions to the kernel when the programs are loaded. The format for that
 string is identical to the one in use for kernel modules (Dual licenses, such
 as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
-programs that are compatible with the GNU Privacy License (GPL).
+programs that are compatible with the GNU General Public License (GNU GPL).
 
 In order to use such helpers, the eBPF program must be loaded with the correct
 license string passed (via **attr**) to the **bpf**\\ () system call, and this
index c8047f4441e60ea944ae6fccedf0e4d0d7632dcd..e8316beb17a714588fa5358a8b514a9d383fecbf 100644 (file)
@@ -82,7 +82,7 @@ lx-symbols command."""
         self.module_files_updated = True
 
     def _get_module_file(self, module_name):
-        module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
+        module_pattern = r".*/{0}\.ko(?:.debug)?$".format(
             module_name.replace("_", r"[_\-]"))
         for name in self.module_files:
             if re.match(module_pattern, name) and os.path.exists(name):
index 98e1150bee9d0cbecb79c7e81cb05159f6160b04..9a3dcaafb5b1ee20c4d2d5d355d81302ead8d427 100644 (file)
@@ -784,7 +784,7 @@ static int apparmor_getselfattr(unsigned int attr, struct lsm_ctx __user *lx,
        int error = -ENOENT;
        struct aa_task_ctx *ctx = task_ctx(current);
        struct aa_label *label = NULL;
-       char *value;
+       char *value = NULL;
 
        switch (attr) {
        case LSM_ATTR_CURRENT:
index df387de29bfa54bf4bf0f7607a6837ec589ebfd5..45c3e5dda355e23f823086816d00e78071b1c15c 100644 (file)
@@ -179,7 +179,8 @@ static int __init integrity_add_key(const unsigned int id, const void *data,
                                   KEY_ALLOC_NOT_IN_QUOTA);
        if (IS_ERR(key)) {
                rc = PTR_ERR(key);
-               pr_err("Problem loading X.509 certificate %d\n", rc);
+               if (id != INTEGRITY_KEYRING_MACHINE)
+                       pr_err("Problem loading X.509 certificate %d\n", rc);
        } else {
                pr_notice("Loaded X.509 cert '%s'\n",
                          key_ref_to_ptr(key)->description);
index fc520a06f9af107310aa81050b8ad21accc6640d..0171f7eb6ee15d384835a6cd68a2afcff1f3b653 100644 (file)
@@ -737,8 +737,8 @@ static int current_check_refer_path(struct dentry *const old_dentry,
        bool allow_parent1, allow_parent2;
        access_mask_t access_request_parent1, access_request_parent2;
        struct path mnt_dir;
-       layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
-               layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
+       layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
+                    layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
 
        if (!dom)
                return 0;
index a6bf90ace84c74bdb11330d7bb278183dfb13275..338b023a8c3edb5918d59a2bc07e23221507ff45 100644 (file)
@@ -6559,7 +6559,7 @@ static int selinux_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
                               size_t *size, u32 flags)
 {
        int rc;
-       char *val;
+       char *val = NULL;
        int val_len;
 
        val_len = selinux_lsm_getattr(attr, current, &val);
index 57ee70ae50f24ac771a7bd74d224c17b1ff03d25..ea3140d510ecbfee06666df588a795b9f5bfc5ce 100644 (file)
@@ -2649,13 +2649,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
 {
        int error = buffer_len;
        size_t avail_len = buffer_len;
-       char *cp0 = head->write_buf;
+       char *cp0;
        int idx;
 
        if (!head->write)
                return -EINVAL;
        if (mutex_lock_interruptible(&head->io_sem))
                return -EINTR;
+       cp0 = head->write_buf;
        head->read_user_buf_avail = 0;
        idx = tomoyo_read_lock();
        /* Read a line and dispatch it to the policy handler. */
index 90360f8b3e81b9374680058256f59efd96ad822d..69c68d8e7a6b54fc1fcfc3fac3a73d58403071f5 100644 (file)
@@ -199,13 +199,6 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
                }
        },
-       {
-               .driver_data = &acp6x_card,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
-               }
-       },
        {
                .driver_data = &acp6x_card,
                .matches = {
@@ -318,6 +311,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "M7600RE"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
index 694b8e31390248b88e4bc8aba843316244f0bdb4..7af6a349b1d41fb60d9450a312927e4774889a34 100644 (file)
@@ -162,7 +162,6 @@ static int snd_acp6x_probe(struct pci_dev *pci,
        /* Yellow Carp device check */
        switch (pci->revision) {
        case 0x60:
-       case 0x63:
        case 0x6f:
                break;
        default:
index 420bbf588efeaf1c0cc1d7700fa3d572ad6e816e..e100cc9f5c1929e3fa3c1111b0757c099873e333 100644 (file)
@@ -1429,7 +1429,7 @@ err_unprepare_mclk:
        return ret;
 }
 
-static void __exit adc3xxx_i2c_remove(struct i2c_client *client)
+static void adc3xxx_i2c_remove(struct i2c_client *client)
 {
        struct adc3xxx *adc3xxx = i2c_get_clientdata(client);
 
@@ -1452,7 +1452,7 @@ static struct i2c_driver adc3xxx_i2c_driver = {
                   .of_match_table = tlv320adc3xxx_of_match,
                  },
        .probe = adc3xxx_i2c_probe,
-       .remove = __exit_p(adc3xxx_i2c_remove),
+       .remove = adc3xxx_i2c_remove,
        .id_table = adc3xxx_i2c_id,
 };
 
index 860e66ec85e8a3748c4769e691e4201fe1b12695..9fa020ef7eab9bb565b9c66527ac1f8b35b65303 100644 (file)
@@ -25,8 +25,6 @@
 #define DEFAULT_MCLK_FS                                256
 #define CH_GRP_MAX                             4  /* The max channel 8 / 2 */
 #define MULTIPLEX_CH_MAX                       10
-#define CLK_PPM_MIN                            -1000
-#define CLK_PPM_MAX                            1000
 
 #define TRCM_TXRX 0
 #define TRCM_TX 1
@@ -53,20 +51,6 @@ struct rk_i2s_tdm_dev {
        struct clk *hclk;
        struct clk *mclk_tx;
        struct clk *mclk_rx;
-       /* The mclk_tx_src is parent of mclk_tx */
-       struct clk *mclk_tx_src;
-       /* The mclk_rx_src is parent of mclk_rx */
-       struct clk *mclk_rx_src;
-       /*
-        * The mclk_root0 and mclk_root1 are root parent and supplies for
-        * the different FS.
-        *
-        * e.g:
-        * mclk_root0 is VPLL0, used for FS=48000Hz
-        * mclk_root1 is VPLL1, used for FS=44100Hz
-        */
-       struct clk *mclk_root0;
-       struct clk *mclk_root1;
        struct regmap *regmap;
        struct regmap *grf;
        struct snd_dmaengine_dai_dma_data capture_dma_data;
@@ -76,19 +60,11 @@ struct rk_i2s_tdm_dev {
        const struct rk_i2s_soc_data *soc_data;
        bool is_master_mode;
        bool io_multiplex;
-       bool mclk_calibrate;
        bool tdm_mode;
-       unsigned int mclk_rx_freq;
-       unsigned int mclk_tx_freq;
-       unsigned int mclk_root0_freq;
-       unsigned int mclk_root1_freq;
-       unsigned int mclk_root0_initial_freq;
-       unsigned int mclk_root1_initial_freq;
        unsigned int frame_width;
        unsigned int clk_trcm;
        unsigned int i2s_sdis[CH_GRP_MAX];
        unsigned int i2s_sdos[CH_GRP_MAX];
-       int clk_ppm;
        int refcount;
        spinlock_t lock; /* xfer lock */
        bool has_playback;
@@ -114,12 +90,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
 {
        clk_disable_unprepare(i2s_tdm->mclk_tx);
        clk_disable_unprepare(i2s_tdm->mclk_rx);
-       if (i2s_tdm->mclk_calibrate) {
-               clk_disable_unprepare(i2s_tdm->mclk_tx_src);
-               clk_disable_unprepare(i2s_tdm->mclk_rx_src);
-               clk_disable_unprepare(i2s_tdm->mclk_root0);
-               clk_disable_unprepare(i2s_tdm->mclk_root1);
-       }
 }
 
 /**
@@ -142,29 +112,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
        ret = clk_prepare_enable(i2s_tdm->mclk_rx);
        if (ret)
                goto err_mclk_rx;
-       if (i2s_tdm->mclk_calibrate) {
-               ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
-               if (ret)
-                       goto err_mclk_rx;
-               ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
-               if (ret)
-                       goto err_mclk_rx_src;
-               ret = clk_prepare_enable(i2s_tdm->mclk_root0);
-               if (ret)
-                       goto err_mclk_root0;
-               ret = clk_prepare_enable(i2s_tdm->mclk_root1);
-               if (ret)
-                       goto err_mclk_root1;
-       }
 
        return 0;
 
-err_mclk_root1:
-       clk_disable_unprepare(i2s_tdm->mclk_root0);
-err_mclk_root0:
-       clk_disable_unprepare(i2s_tdm->mclk_rx_src);
-err_mclk_rx_src:
-       clk_disable_unprepare(i2s_tdm->mclk_tx_src);
 err_mclk_rx:
        clk_disable_unprepare(i2s_tdm->mclk_tx);
 err_mclk_tx:
@@ -564,159 +514,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
                           I2S_XFER_RXS_START);
 }
 
-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
-                                        struct clk *clk, unsigned long rate,
-                                        int ppm)
-{
-       unsigned long rate_target;
-       int delta, ret;
-
-       if (ppm == i2s_tdm->clk_ppm)
-               return 0;
-
-       if (ppm < 0)
-               delta = -1;
-       else
-               delta = 1;
-
-       delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
-                               1000000);
-
-       rate_target = rate + delta;
-
-       if (!rate_target)
-               return -EINVAL;
-
-       ret = clk_set_rate(clk, rate_target);
-       if (ret)
-               return ret;
-
-       i2s_tdm->clk_ppm = ppm;
-
-       return 0;
-}
-
-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
-                                          struct snd_pcm_substream *substream,
-                                          unsigned int lrck_freq)
-{
-       struct clk *mclk_root;
-       struct clk *mclk_parent;
-       unsigned int mclk_root_freq;
-       unsigned int mclk_root_initial_freq;
-       unsigned int mclk_parent_freq;
-       unsigned int div, delta;
-       u64 ppm;
-       int ret;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               mclk_parent = i2s_tdm->mclk_tx_src;
-       else
-               mclk_parent = i2s_tdm->mclk_rx_src;
-
-       switch (lrck_freq) {
-       case 8000:
-       case 16000:
-       case 24000:
-       case 32000:
-       case 48000:
-       case 64000:
-       case 96000:
-       case 192000:
-               mclk_root = i2s_tdm->mclk_root0;
-               mclk_root_freq = i2s_tdm->mclk_root0_freq;
-               mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
-               mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
-               break;
-       case 11025:
-       case 22050:
-       case 44100:
-       case 88200:
-       case 176400:
-               mclk_root = i2s_tdm->mclk_root1;
-               mclk_root_freq = i2s_tdm->mclk_root1_freq;
-               mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
-               mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
-               break;
-       default:
-               dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
-                       lrck_freq);
-               return -EINVAL;
-       }
-
-       ret = clk_set_parent(mclk_parent, mclk_root);
-       if (ret)
-               return ret;
-
-       ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
-                                           mclk_root_freq, 0);
-       if (ret)
-               return ret;
-
-       delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
-       ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
-
-       if (ppm) {
-               div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
-               if (!div)
-                       return -EINVAL;
-
-               mclk_root_freq = mclk_parent_freq * round_up(div, 2);
-
-               ret = clk_set_rate(mclk_root, mclk_root_freq);
-               if (ret)
-                       return ret;
-
-               i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
-               i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
-       }
-
-       return clk_set_rate(mclk_parent, mclk_parent_freq);
-}
-
-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
-                                    struct snd_pcm_substream *substream,
-                                    struct clk **mclk)
-{
-       unsigned int mclk_freq;
-       int ret;
-
-       if (i2s_tdm->clk_trcm) {
-               if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
-                       dev_err(i2s_tdm->dev,
-                               "clk_trcm, tx: %d and rx: %d should be the same\n",
-                               i2s_tdm->mclk_tx_freq,
-                               i2s_tdm->mclk_rx_freq);
-                       return -EINVAL;
-               }
-
-               ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
-               if (ret)
-                       return ret;
-
-               ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
-               if (ret)
-                       return ret;
-
-               /* mclk_rx is also ok. */
-               *mclk = i2s_tdm->mclk_tx;
-       } else {
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-                       *mclk = i2s_tdm->mclk_tx;
-                       mclk_freq = i2s_tdm->mclk_tx_freq;
-               } else {
-                       *mclk = i2s_tdm->mclk_rx;
-                       mclk_freq = i2s_tdm->mclk_rx_freq;
-               }
-
-               ret = clk_set_rate(*mclk, mclk_freq);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
 {
        if (substream_capture) {
@@ -853,19 +650,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
                                      struct snd_soc_dai *dai)
 {
        struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
-       struct clk *mclk;
-       int ret = 0;
        unsigned int val = 0;
        unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
+       int err;
 
        if (i2s_tdm->is_master_mode) {
-               if (i2s_tdm->mclk_calibrate)
-                       rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
-                                                       params_rate(params));
+               struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+                       i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
 
-               ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
-               if (ret)
-                       return ret;
+               err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+               if (err)
+                       return err;
 
                mclk_rate = clk_get_rate(mclk);
                bclk_rate = i2s_tdm->frame_width * params_rate(params);
@@ -973,96 +768,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
-                                      unsigned int freq, int dir)
-{
-       struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
-
-       /* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
-       if (i2s_tdm->clk_trcm) {
-               i2s_tdm->mclk_tx_freq = freq;
-               i2s_tdm->mclk_rx_freq = freq;
-       } else {
-               if (stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       i2s_tdm->mclk_tx_freq = freq;
-               else
-                       i2s_tdm->mclk_rx_freq = freq;
-       }
-
-       dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
-               stream ? "rx" : "tx", freq);
-
-       return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
-                                                 struct snd_ctl_elem_info *uinfo)
-{
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-       uinfo->count = 1;
-       uinfo->value.integer.min = CLK_PPM_MIN;
-       uinfo->value.integer.max = CLK_PPM_MAX;
-       uinfo->value.integer.step = 1;
-
-       return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
-                                                struct snd_ctl_elem_value *ucontrol)
-{
-       struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
-       struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
-
-       ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
-
-       return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
-                                                struct snd_ctl_elem_value *ucontrol)
-{
-       struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
-       struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
-       int ret = 0, ppm = 0;
-       int changed = 0;
-       unsigned long old_rate;
-
-       if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
-           ucontrol->value.integer.value[0] > CLK_PPM_MAX)
-               return -EINVAL;
-
-       ppm = ucontrol->value.integer.value[0];
-
-       old_rate = clk_get_rate(i2s_tdm->mclk_root0);
-       ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
-                                           i2s_tdm->mclk_root0_freq, ppm);
-       if (ret)
-               return ret;
-       if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
-               changed = 1;
-
-       if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
-               return changed;
-
-       old_rate = clk_get_rate(i2s_tdm->mclk_root1);
-       ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
-                                           i2s_tdm->mclk_root1_freq, ppm);
-       if (ret)
-               return ret;
-       if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
-               changed = 1;
-
-       return changed;
-}
-
-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
-       .iface = SNDRV_CTL_ELEM_IFACE_PCM,
-       .name = "PCM Clock Compensation in PPM",
-       .info = rockchip_i2s_tdm_clk_compensation_info,
-       .get = rockchip_i2s_tdm_clk_compensation_get,
-       .put = rockchip_i2s_tdm_clk_compensation_put,
-};
-
 static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
 {
        struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
@@ -1072,9 +777,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
        if (i2s_tdm->has_playback)
                snd_soc_dai_dma_data_set_playback(dai, &i2s_tdm->playback_dma_data);
 
-       if (i2s_tdm->mclk_calibrate)
-               snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
-
        return 0;
 }
 
@@ -1115,7 +817,6 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
        .probe = rockchip_i2s_tdm_dai_probe,
        .hw_params = rockchip_i2s_tdm_hw_params,
        .set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
-       .set_sysclk = rockchip_i2s_tdm_set_sysclk,
        .set_fmt = rockchip_i2s_tdm_set_fmt,
        .set_tdm_slot = rockchip_dai_tdm_slot,
        .trigger = rockchip_i2s_tdm_trigger,
@@ -1444,35 +1145,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
                rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
 }
 
-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
-{
-       int num_mclks = 0;
-
-       i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
-       if (!IS_ERR(i2s_tdm->mclk_tx_src))
-               num_mclks++;
-
-       i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
-       if (!IS_ERR(i2s_tdm->mclk_rx_src))
-               num_mclks++;
-
-       i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
-       if (!IS_ERR(i2s_tdm->mclk_root0))
-               num_mclks++;
-
-       i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
-       if (!IS_ERR(i2s_tdm->mclk_root1))
-               num_mclks++;
-
-       if (num_mclks < 4 && num_mclks != 0)
-               return -ENOENT;
-
-       if (num_mclks == 4)
-               i2s_tdm->mclk_calibrate = 1;
-
-       return 0;
-}
-
 static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
                                         struct device_node *np,
                                         bool is_rx_path)
@@ -1610,11 +1282,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
        i2s_tdm->io_multiplex =
                of_property_read_bool(node, "rockchip,io-multiplex");
 
-       ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
-       if (ret)
-               return dev_err_probe(i2s_tdm->dev, ret,
-                                    "mclk-calibrate clocks missing");
-
        regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(regs)) {
                return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
@@ -1667,13 +1334,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
                goto err_disable_hclk;
        }
 
-       if (i2s_tdm->mclk_calibrate) {
-               i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
-               i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
-               i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
-               i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
-       }
-
        pm_runtime_enable(&pdev->dev);
 
        regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
index a38fee48ee005afcbb66b8d4ead712cb5d2c5af8..e692aa3b8b22f84537a3c171e0c3936c3a5f28b8 100644 (file)
@@ -385,11 +385,15 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
 
        fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
 
+       snd_soc_dpcm_mutex_lock(fe);
        ret = dpcm_be_dai_hw_params(fe, stream);
+       snd_soc_dpcm_mutex_unlock(fe);
        if (ret < 0)
                goto out;
 
+       snd_soc_dpcm_mutex_lock(fe);
        ret = dpcm_be_dai_prepare(fe, stream);
+       snd_soc_dpcm_mutex_unlock(fe);
        if (ret < 0)
                goto out;
 
index 1e94edba12eb85706d0f1f0acf7c75ad3a8249eb..2ec13d1634b636455f51ab541773e5587003e14b 100644 (file)
@@ -1219,6 +1219,9 @@ static int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
                        if (!snd_soc_is_matching_component(platform, component))
                                continue;
 
+                       if (snd_soc_component_is_dummy(component) && component->num_dai)
+                               continue;
+
                        snd_soc_rtd_add_component(rtd, component);
                }
        }
index d2d21478399e02d1fd010ad4897b08e58602fd73..aad904839b817cd74d3731d8b06e8739c27bb43e 100644 (file)
@@ -173,7 +173,7 @@ int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
 
        adata = sdev->pdata->hw_pdata;
 
-       if (adata->signed_fw_image)
+       if (adata->quirks && adata->quirks->signed_fw_image)
                size_fw = adata->fw_bin_size - ACP_FIRMWARE_SIGNATURE;
        else
                size_fw = adata->fw_bin_size;
index 9b3c26210db38f97581bf0bb5d0346aa2d06fc49..be7dc1e02284ab62f8cbaeffdd70f26a19ff6232 100644 (file)
 #include "acp.h"
 #include "acp-dsp-offset.h"
 
-#define SECURED_FIRMWARE 1
-
 static bool enable_fw_debug;
 module_param(enable_fw_debug, bool, 0444);
 MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
 
+static struct acp_quirk_entry quirk_valve_galileo = {
+       .signed_fw_image = true,
+       .skip_iram_dram_size_mod = true,
+};
+
 const struct dmi_system_id acp_sof_quirk_table[] = {
        {
                /* Steam Deck OLED device */
@@ -33,7 +36,7 @@ const struct dmi_system_id acp_sof_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
                },
-               .driver_data = (void *)SECURED_FIRMWARE,
+               .driver_data = &quirk_valve_galileo,
        },
        {}
 };
@@ -254,7 +257,7 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
                }
        }
 
-       if (adata->signed_fw_image)
+       if (adata->quirks && adata->quirks->signed_fw_image)
                snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
 
        snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
@@ -278,7 +281,7 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
        }
 
        /* psp_send_cmd only required for vangogh platform (rev - 5) */
-       if (desc->rev == 5) {
+       if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
                /* Modify IRAM and DRAM size */
                ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
                if (ret)
@@ -738,26 +741,27 @@ skip_soundwire:
        sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
        sdev->debug_box.size = BOX_SIZE_1024;
 
-       adata->signed_fw_image = false;
        dmi_id = dmi_first_match(acp_sof_quirk_table);
-       if (dmi_id && dmi_id->driver_data) {
-               adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
-                                                   "sof-%s-code.bin",
-                                                   chip->name);
-               if (!adata->fw_code_bin) {
-                       ret = -ENOMEM;
-                       goto free_ipc_irq;
+       if (dmi_id) {
+               adata->quirks = dmi_id->driver_data;
+
+               if (adata->quirks->signed_fw_image) {
+                       adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+                                                           "sof-%s-code.bin",
+                                                           chip->name);
+                       if (!adata->fw_code_bin) {
+                               ret = -ENOMEM;
+                               goto free_ipc_irq;
+                       }
+
+                       adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+                                                           "sof-%s-data.bin",
+                                                           chip->name);
+                       if (!adata->fw_data_bin) {
+                               ret = -ENOMEM;
+                               goto free_ipc_irq;
+                       }
                }
-
-               adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
-                                                   "sof-%s-data.bin",
-                                                   chip->name);
-               if (!adata->fw_data_bin) {
-                       ret = -ENOMEM;
-                       goto free_ipc_irq;
-               }
-
-               adata->signed_fw_image = dmi_id->driver_data;
        }
 
        adata->enable_fw_debug = enable_fw_debug;
index 947068da39b5350e0f14d05a47b3e4ba1630a7e0..e229bb6b849d6ba51c137bb5761756f7b6ec8e3f 100644 (file)
@@ -207,6 +207,11 @@ struct sof_amd_acp_desc {
        u64 sdw_acpi_dev_addr;
 };
 
+struct acp_quirk_entry {
+       bool signed_fw_image;
+       bool skip_iram_dram_size_mod;
+};
+
 /* Common device data struct for ACP devices */
 struct acp_dev_data {
        struct snd_sof_dev  *dev;
@@ -236,7 +241,7 @@ struct acp_dev_data {
        u8 *data_buf;
        dma_addr_t sram_dma_addr;
        u8 *sram_data_buf;
-       bool signed_fw_image;
+       struct acp_quirk_entry *quirks;
        struct dma_descriptor dscr_info[ACP_MAX_DESC];
        struct acp_dsp_stream stream_buf[ACP_MAX_STREAM];
        struct acp_dsp_stream *dtrace_stream;
index de15d21aa6d982ab218e317a0661f13ba744e982..bc6ffdb5471a58ae4b30b27ba166fd356a974edd 100644 (file)
@@ -143,6 +143,7 @@ EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON);
 int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
 {
        const struct dmi_system_id *dmi_id;
+       struct acp_quirk_entry *quirks;
 
        /* common defaults */
        memcpy(&sof_vangogh_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
@@ -151,8 +152,12 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
        sof_vangogh_ops.num_drv = ARRAY_SIZE(vangogh_sof_dai);
 
        dmi_id = dmi_first_match(acp_sof_quirk_table);
-       if (dmi_id && dmi_id->driver_data)
-               sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
+       if (dmi_id) {
+               quirks = dmi_id->driver_data;
+
+               if (quirks->signed_fw_image)
+                       sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
+       }
 
        return 0;
 }
index c82a7f41b31c571c09c3dd454a7db3ebbb8a1e60..45e49671ae87b01fac48b2877f46403cf9ee1c36 100644 (file)
@@ -466,6 +466,8 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
 
 int ynl_recv_ack(struct ynl_sock *ys, int ret)
 {
+       struct ynl_parse_arg yarg = { .ys = ys, };
+
        if (!ret) {
                yerr(ys, YNL_ERROR_EXPECT_ACK,
                     "Expecting an ACK but nothing received");
@@ -478,7 +480,7 @@ int ynl_recv_ack(struct ynl_sock *ys, int ret)
                return ret;
        }
        return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
-                         ynl_cb_null, ys);
+                         ynl_cb_null, &yarg);
 }
 
 int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
@@ -521,6 +523,7 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
                                ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0;
                        }
                }
+               i++;
        }
 
        return 0;
@@ -586,7 +589,13 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
                return err;
        }
 
-       return ynl_recv_ack(ys, err);
+       err = ynl_recv_ack(ys, err);
+       if (err < 0) {
+               free(ys->mcast_groups);
+               return err;
+       }
+
+       return 0;
 }
 
 struct ynl_sock *
@@ -741,11 +750,14 @@ err_free:
 
 static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
 {
-       return ynl_ntf_parse((struct ynl_sock *)data, nlh);
+       struct ynl_parse_arg *yarg = data;
+
+       return ynl_ntf_parse(yarg->ys, nlh);
 }
 
 int ynl_ntf_check(struct ynl_sock *ys)
 {
+       struct ynl_parse_arg yarg = { .ys = ys, };
        ssize_t len;
        int err;
 
@@ -767,7 +779,7 @@ int ynl_ntf_check(struct ynl_sock *ys)
                        return len;
 
                err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
-                                 ynl_ntf_trampoline, ys,
+                                 ynl_ntf_trampoline, &yarg,
                                  ynl_cb_array, NLMSG_MIN_TYPE);
                if (err < 0)
                        return err;
index caff3834671f9dfb7d261d5b6633532f71ecd9f5..030b388800f05191e5b6492b97ebf96e9d45dcf3 100644 (file)
@@ -13,6 +13,7 @@ ldflags-y += --wrap=cxl_hdm_decode_init
 ldflags-y += --wrap=cxl_dvsec_rr_decode
 ldflags-y += --wrap=devm_cxl_add_rch_dport
 ldflags-y += --wrap=cxl_rcd_component_reg_phys
+ldflags-y += --wrap=cxl_endpoint_parse_cdat
 
 DRIVERS := ../../../drivers
 CXL_SRC := $(DRIVERS)/cxl
index a3cdbb2be038c45e27326925d81ba43294b56c31..908e0d0839369c2e41f090bddc2e9a9b9121b4c9 100644 (file)
@@ -15,6 +15,8 @@
 
 static int interleave_arithmetic;
 
+#define FAKE_QTG_ID    42
+
 #define NR_CXL_HOST_BRIDGES 2
 #define NR_CXL_SINGLE_HOST 1
 #define NR_CXL_RCH 1
@@ -209,7 +211,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-                       .qtg_id = 0,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 4UL,
                },
                .target = { 0 },
@@ -224,7 +226,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-                       .qtg_id = 1,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 8UL,
                },
                .target = { 0, 1, },
@@ -239,7 +241,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 2,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 4UL,
                },
                .target = { 0 },
@@ -254,7 +256,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 3,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 8UL,
                },
                .target = { 0, 1, },
@@ -269,7 +271,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 4,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 4UL,
                },
                .target = { 2 },
@@ -284,7 +286,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
-                       .qtg_id = 5,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M,
                },
                .target = { 3 },
@@ -301,7 +303,7 @@ static struct {
                        .granularity = 4,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 0,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 8UL,
                },
                .target = { 0, },
@@ -317,7 +319,7 @@ static struct {
                        .granularity = 0,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 1,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 8UL,
                },
                .target = { 0, 1, },
@@ -333,7 +335,7 @@ static struct {
                        .granularity = 0,
                        .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
                                        ACPI_CEDT_CFMWS_RESTRICT_PMEM,
-                       .qtg_id = 0,
+                       .qtg_id = FAKE_QTG_ID,
                        .window_size = SZ_256M * 16UL,
                },
                .target = { 0, 1, 0, 1, },
@@ -976,6 +978,48 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
        return 0;
 }
 
+/*
+ * Faking the cxl_dpa_perf for the memdev when appropriate.
+ */
+static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
+                          struct cxl_dpa_perf *dpa_perf)
+{
+       dpa_perf->qos_class = FAKE_QTG_ID;
+       dpa_perf->dpa_range = *range;
+       dpa_perf->coord.read_latency = 500;
+       dpa_perf->coord.write_latency = 500;
+       dpa_perf->coord.read_bandwidth = 1000;
+       dpa_perf->coord.write_bandwidth = 1000;
+}
+
+static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+       struct cxl_root *cxl_root __free(put_cxl_root) =
+               find_cxl_root(port);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       struct range pmem_range = {
+               .start = cxlds->pmem_res.start,
+               .end = cxlds->pmem_res.end,
+       };
+       struct range ram_range = {
+               .start = cxlds->ram_res.start,
+               .end = cxlds->ram_res.end,
+       };
+
+       if (!cxl_root)
+               return;
+
+       if (range_len(&ram_range))
+               dpa_perf_setup(port, &ram_range, &mds->ram_perf);
+
+       if (range_len(&pmem_range))
+               dpa_perf_setup(port, &pmem_range, &mds->pmem_perf);
+
+       cxl_memdev_update_perf(cxlmd);
+}
+
 static struct cxl_mock_ops cxl_mock_ops = {
        .is_mock_adev = is_mock_adev,
        .is_mock_bridge = is_mock_bridge,
@@ -989,6 +1033,7 @@ static struct cxl_mock_ops cxl_mock_ops = {
        .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
        .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
        .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
+       .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
        .list = LIST_HEAD_INIT(cxl_mock_ops.list),
 };
 
index 1a61e68e30950ba623b52c5920a7874e3d97b9cd..6f737941dc0e164b9611e9dac91cb9e55b69e715 100644 (file)
@@ -285,6 +285,20 @@ resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
 }
 EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, CXL);
 
+void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+       int index;
+       struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+
+       if (ops && ops->is_mock_dev(cxlmd->dev.parent))
+               ops->cxl_endpoint_parse_cdat(port);
+       else
+               cxl_endpoint_parse_cdat(port);
+       put_cxl_mock_ops(index);
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, CXL);
+
 MODULE_LICENSE("GPL v2");
 MODULE_IMPORT_NS(ACPI);
 MODULE_IMPORT_NS(CXL);
index a94223750346c8d897197f3171091459982d29ac..d1b0271d282203b7bccac68aedef0646d1391d59 100644 (file)
@@ -25,6 +25,7 @@ struct cxl_mock_ops {
        int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port);
        int (*devm_cxl_enumerate_decoders)(
                struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info);
+       void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
 };
 
 void register_cxl_mock_ops(struct cxl_mock_ops *ops);
index bf84d4a1d9ae2c68ceeac9f25373fd9df01b6935..3c440370c1f0f2b9cc67a754da8087e447efa625 100644 (file)
@@ -193,6 +193,7 @@ static void subtest_task_iters(void)
        ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt");
        ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt");
        ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt");
+       ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt");
        pthread_mutex_unlock(&do_nothing_mutex);
        for (int i = 0; i < thread_num; i++)
                ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join");
diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
new file mode 100644 (file)
index 0000000..3405923
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "test_progs.h"
+#include "read_vsyscall.skel.h"
+
+#if defined(__x86_64__)
+/* For VSYSCALL_ADDR */
+#include <asm/vsyscall.h>
+#else
+/* To prevent build failure on non-x86 arch */
+#define VSYSCALL_ADDR 0UL
+#endif
+
+struct read_ret_desc {
+       const char *name;
+       int ret;
+} all_read[] = {
+       { .name = "probe_read_kernel", .ret = -ERANGE },
+       { .name = "probe_read_kernel_str", .ret = -ERANGE },
+       { .name = "probe_read", .ret = -ERANGE },
+       { .name = "probe_read_str", .ret = -ERANGE },
+       { .name = "probe_read_user", .ret = -EFAULT },
+       { .name = "probe_read_user_str", .ret = -EFAULT },
+       { .name = "copy_from_user", .ret = -EFAULT },
+       { .name = "copy_from_user_task", .ret = -EFAULT },
+};
+
+void test_read_vsyscall(void)
+{
+       struct read_vsyscall *skel;
+       unsigned int i;
+       int err;
+
+#if !defined(__x86_64__)
+       test__skip();
+       return;
+#endif
+       skel = read_vsyscall__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "read_vsyscall open_load"))
+               return;
+
+       skel->bss->target_pid = getpid();
+       err = read_vsyscall__attach(skel);
+       if (!ASSERT_EQ(err, 0, "read_vsyscall attach"))
+               goto out;
+
+       /* userspace may don't have vsyscall page due to LEGACY_VSYSCALL_NONE,
+        * but it doesn't affect the returned error codes.
+        */
+       skel->bss->user_ptr = (void *)VSYSCALL_ADDR;
+       usleep(1);
+
+       for (i = 0; i < ARRAY_SIZE(all_read); i++)
+               ASSERT_EQ(skel->bss->read_ret[i], all_read[i].ret, all_read[i].name);
+out:
+       read_vsyscall__destroy(skel);
+}
index 760ad96b4be099ed74779d8895165df5d212f091..d66687f1ee6a8df52cb228010a293a3d4d102216 100644 (file)
@@ -4,10 +4,29 @@
 #include "timer.skel.h"
 #include "timer_failure.skel.h"
 
+#define NUM_THR 8
+
+static void *spin_lock_thread(void *arg)
+{
+       int i, err, prog_fd = *(int *)arg;
+       LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+       for (i = 0; i < 10000; i++) {
+               err = bpf_prog_test_run_opts(prog_fd, &topts);
+               if (!ASSERT_OK(err, "test_run_opts err") ||
+                   !ASSERT_OK(topts.retval, "test_run_opts retval"))
+                       break;
+       }
+
+       pthread_exit(arg);
+}
+
 static int timer(struct timer *timer_skel)
 {
-       int err, prog_fd;
+       int i, err, prog_fd;
        LIBBPF_OPTS(bpf_test_run_opts, topts);
+       pthread_t thread_id[NUM_THR];
+       void *ret;
 
        err = timer__attach(timer_skel);
        if (!ASSERT_OK(err, "timer_attach"))
@@ -43,6 +62,20 @@ static int timer(struct timer *timer_skel)
        /* check that code paths completed */
        ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
 
+       prog_fd = bpf_program__fd(timer_skel->progs.race);
+       for (i = 0; i < NUM_THR; i++) {
+               err = pthread_create(&thread_id[i], NULL,
+                                    &spin_lock_thread, &prog_fd);
+               if (!ASSERT_OK(err, "pthread_create"))
+                       break;
+       }
+
+       while (i) {
+               err = pthread_join(thread_id[--i], &ret);
+               if (ASSERT_OK(err, "pthread_join"))
+                       ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join");
+       }
+
        return 0;
 }
 
index c3b45745cbccd71b089d23a4ee3f4d1a296c2de6..6d8b54124cb359697bcb85b369c9bcd7457e71b4 100644 (file)
@@ -511,7 +511,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
        if (!ASSERT_OK(err, "bond bpf_xdp_query"))
                goto out;
 
-       if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+       if (!ASSERT_EQ(query_opts.feature_flags, 0,
                       "bond query_opts.feature_flags"))
                goto out;
 
@@ -601,7 +601,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
        if (!ASSERT_OK(err, "bond bpf_xdp_query"))
                goto out;
 
-       ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+       ASSERT_EQ(query_opts.feature_flags, 0,
                  "bond query_opts.feature_flags");
 out:
        bpf_link__destroy(link);
index c9b4055cd410ae6378066e28e2f41c9b23d47ab1..e4d53e40ff2086112dff757581ef37f8fdcbe272 100644 (file)
@@ -10,7 +10,7 @@
 char _license[] SEC("license") = "GPL";
 
 pid_t target_pid;
-int procs_cnt, threads_cnt, proc_threads_cnt;
+int procs_cnt, threads_cnt, proc_threads_cnt, invalid_cnt;
 
 void bpf_rcu_read_lock(void) __ksym;
 void bpf_rcu_read_unlock(void) __ksym;
@@ -26,6 +26,16 @@ int iter_task_for_each_sleep(void *ctx)
        procs_cnt = threads_cnt = proc_threads_cnt = 0;
 
        bpf_rcu_read_lock();
+       bpf_for_each(task, pos, NULL, ~0U) {
+               /* Below instructions shouldn't be executed for invalid flags */
+               invalid_cnt++;
+       }
+
+       bpf_for_each(task, pos, NULL, BPF_TASK_ITER_PROC_THREADS) {
+               /* Below instructions shouldn't be executed for invalid task__nullable */
+               invalid_cnt++;
+       }
+
        bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
                if (pos->pid == target_pid)
                        procs_cnt++;
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
new file mode 100644 (file)
index 0000000..986f966
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+int target_pid = 0;
+void *user_ptr = 0;
+int read_ret[8];
+
+char _license[] SEC("license") = "GPL";
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int do_probe_read(void *ctx)
+{
+       char buf[8];
+
+       if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+               return 0;
+
+       read_ret[0] = bpf_probe_read_kernel(buf, sizeof(buf), user_ptr);
+       read_ret[1] = bpf_probe_read_kernel_str(buf, sizeof(buf), user_ptr);
+       read_ret[2] = bpf_probe_read(buf, sizeof(buf), user_ptr);
+       read_ret[3] = bpf_probe_read_str(buf, sizeof(buf), user_ptr);
+       read_ret[4] = bpf_probe_read_user(buf, sizeof(buf), user_ptr);
+       read_ret[5] = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
+
+       return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int do_copy_from_user(void *ctx)
+{
+       char buf[8];
+
+       if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+               return 0;
+
+       read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
+       read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
+                                             bpf_get_current_task_btf(), 0);
+
+       return 0;
+}
index 8b946c8188c65d10ed86de886ea9aead585de86d..f615da97df26382f4dd758015dd0aaf2cd359614 100644 (file)
@@ -51,7 +51,8 @@ struct {
        __uint(max_entries, 1);
        __type(key, int);
        __type(value, struct elem);
-} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps");
+} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
+       race_array SEC(".maps");
 
 __u64 bss_data;
 __u64 abs_data;
@@ -390,3 +391,34 @@ int BPF_PROG2(test5, int, a)
 
        return 0;
 }
+
+static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
+{
+       bpf_timer_start(timer, 1000000, 0);
+       return 0;
+}
+
+SEC("syscall")
+int race(void *ctx)
+{
+       struct bpf_timer *timer;
+       int err, race_key = 0;
+       struct elem init;
+
+       __builtin_memset(&init, 0, sizeof(struct elem));
+       bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
+
+       timer = bpf_map_lookup_elem(&race_array, &race_key);
+       if (!timer)
+               return 1;
+
+       err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
+       if (err && err != -EBUSY)
+               return 1;
+
+       bpf_timer_set_callback(timer, race_timer_callback);
+       bpf_timer_start(timer, 0, 0);
+       bpf_timer_cancel(timer);
+
+       return 0;
+}
index 5905e036e0eaca6aa7c38e06412aabf0e2b1b4b9..a955a6358206eac8a4b5065b531e0171d4cc2a62 100644 (file)
@@ -239,4 +239,74 @@ int bpf_loop_iter_limit_nested(void *unused)
        return 1000 * a + b + c;
 }
 
+struct iter_limit_bug_ctx {
+       __u64 a;
+       __u64 b;
+       __u64 c;
+};
+
+static __naked void iter_limit_bug_cb(void)
+{
+       /* This is the same as C code below, but written
+        * in assembly to control which branches are fall-through.
+        *
+        *   switch (bpf_get_prandom_u32()) {
+        *   case 1:  ctx->a = 42; break;
+        *   case 2:  ctx->b = 42; break;
+        *   default: ctx->c = 42; break;
+        *   }
+        */
+       asm volatile (
+       "r9 = r2;"
+       "call %[bpf_get_prandom_u32];"
+       "r1 = r0;"
+       "r2 = 42;"
+       "r0 = 0;"
+       "if r1 == 0x1 goto 1f;"
+       "if r1 == 0x2 goto 2f;"
+       "*(u64 *)(r9 + 16) = r2;"
+       "exit;"
+       "1: *(u64 *)(r9 + 0) = r2;"
+       "exit;"
+       "2: *(u64 *)(r9 + 8) = r2;"
+       "exit;"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all
+       );
+}
+
+SEC("tc")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+int iter_limit_bug(struct __sk_buff *skb)
+{
+       struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
+
+       bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
+
+       /* This is the same as C code below,
+        * written in assembly to guarantee checks order.
+        *
+        *   if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
+        *     asm volatile("r1 /= 0;":::"r1");
+        */
+       asm volatile (
+       "r1 = *(u64 *)%[ctx_a];"
+       "if r1 != 42 goto 1f;"
+       "r1 = *(u64 *)%[ctx_b];"
+       "if r1 != 42 goto 1f;"
+       "r1 = *(u64 *)%[ctx_c];"
+       "if r1 != 7 goto 1f;"
+       "r1 /= 0;"
+       "1:"
+       :
+       : [ctx_a]"m"(ctx.a),
+         [ctx_b]"m"(ctx.b),
+         [ctx_c]"m"(ctx.c)
+       : "r1"
+       );
+       return 0;
+}
+
 char _license[] SEC("license") = "GPL";
index d508486cc0bdc2c917f9386aa2aea796f12d2c1d..9a3d3c389dadda07d1e8d499fea65e307c656056 100755 (executable)
@@ -62,6 +62,8 @@ prio_test()
 
        # create bond
        bond_reset "${param}"
+       # set active_slave to primary eth1 specifically
+       ip -n ${s_ns} link set bond0 type bond active_slave eth1
 
        # check bonding member prio value
        ip -n ${s_ns} link set eth0 type bond_slave prio 0
index 6c4f901d6fed3c200bbcb40a6ba7dd22c0b2e2bd..110d73917615d177d5d7a891f08d523619c404f3 100644 (file)
@@ -1,2 +1,3 @@
-CONFIG_IOMMUFD
-CONFIG_IOMMUFD_TEST
+CONFIG_IOMMUFD=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_IOMMUFD_TEST=y
index 1a881e7a21d1b26ce7ad19de1cc5ea07d3773ff9..edf1c99c9936c8549e8a2938a2ff11875197b3d4 100644 (file)
@@ -12,6 +12,7 @@
 static unsigned long HUGEPAGE_SIZE;
 
 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
+#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
 
 static unsigned long get_huge_page_size(void)
 {
@@ -1716,10 +1717,12 @@ FIXTURE(iommufd_dirty_tracking)
 FIXTURE_VARIANT(iommufd_dirty_tracking)
 {
        unsigned long buffer_size;
+       bool hugepages;
 };
 
 FIXTURE_SETUP(iommufd_dirty_tracking)
 {
+       int mmap_flags;
        void *vrc;
        int rc;
 
@@ -1732,25 +1735,41 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
                           variant->buffer_size, rc);
        }
 
+       mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
+       if (variant->hugepages) {
+               /*
+                * MAP_POPULATE will cause the kernel to fail mmap if THPs are
+                * not available.
+                */
+               mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
+       }
        assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
        vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
-                  MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+                  mmap_flags, -1, 0);
        assert(vrc == self->buffer);
 
        self->page_size = MOCK_PAGE_SIZE;
        self->bitmap_size =
                variant->buffer_size / self->page_size / BITS_PER_BYTE;
 
-       /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */
+       /* Provision with an extra (PAGE_SIZE) for the unaligned case */
        rc = posix_memalign(&self->bitmap, PAGE_SIZE,
-                           self->bitmap_size + MOCK_PAGE_SIZE);
+                           self->bitmap_size + PAGE_SIZE);
        assert(!rc);
        assert(self->bitmap);
        assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
 
        test_ioctl_ioas_alloc(&self->ioas_id);
-       test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
-                            &self->idev_id);
+       /* Enable 1M mock IOMMU hugepages */
+       if (variant->hugepages) {
+               test_cmd_mock_domain_flags(self->ioas_id,
+                                          MOCK_FLAGS_DEVICE_HUGE_IOVA,
+                                          &self->stdev_id, &self->hwpt_id,
+                                          &self->idev_id);
+       } else {
+               test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
+                                    &self->hwpt_id, &self->idev_id);
+       }
 }
 
 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
@@ -1784,12 +1803,26 @@ FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
        .buffer_size = 128UL * 1024UL * 1024UL,
 };
 
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
+{
+       /* 4K bitmap (128M IOVA range) */
+       .buffer_size = 128UL * 1024UL * 1024UL,
+       .hugepages = true,
+};
+
 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
 {
        /* 8K bitmap (256M IOVA range) */
        .buffer_size = 256UL * 1024UL * 1024UL,
 };
 
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge)
+{
+       /* 8K bitmap (256M IOVA range) */
+       .buffer_size = 256UL * 1024UL * 1024UL,
+       .hugepages = true,
+};
+
 TEST_F(iommufd_dirty_tracking, enforce_dirty)
 {
        uint32_t ioas_id, stddev_id, idev_id;
@@ -1849,65 +1882,80 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
 
 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
 {
-       uint32_t stddev_id;
+       uint32_t page_size = MOCK_PAGE_SIZE;
        uint32_t hwpt_id;
        uint32_t ioas_id;
 
+       if (variant->hugepages)
+               page_size = MOCK_HUGE_PAGE_SIZE;
+
        test_ioctl_ioas_alloc(&ioas_id);
        test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
                                     variant->buffer_size, MOCK_APERTURE_START);
 
        test_cmd_hwpt_alloc(self->idev_id, ioas_id,
                            IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
-       test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
 
        test_cmd_set_dirty_tracking(hwpt_id, true);
 
        test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
-                               MOCK_APERTURE_START, self->page_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
                                self->bitmap, self->bitmap_size, 0, _metadata);
 
        /* PAGE_SIZE unaligned bitmap */
        test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
-                               MOCK_APERTURE_START, self->page_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
                                self->bitmap + MOCK_PAGE_SIZE,
                                self->bitmap_size, 0, _metadata);
 
-       test_ioctl_destroy(stddev_id);
+       /* u64 unaligned bitmap */
+       test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
+                               self->bitmap + 0xff1, self->bitmap_size, 0,
+                               _metadata);
+
        test_ioctl_destroy(hwpt_id);
 }
 
 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
 {
-       uint32_t stddev_id;
+       uint32_t page_size = MOCK_PAGE_SIZE;
        uint32_t hwpt_id;
        uint32_t ioas_id;
 
+       if (variant->hugepages)
+               page_size = MOCK_HUGE_PAGE_SIZE;
+
        test_ioctl_ioas_alloc(&ioas_id);
        test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
                                     variant->buffer_size, MOCK_APERTURE_START);
 
        test_cmd_hwpt_alloc(self->idev_id, ioas_id,
                            IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
-       test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
 
        test_cmd_set_dirty_tracking(hwpt_id, true);
 
        test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
-                               MOCK_APERTURE_START, self->page_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
                                self->bitmap, self->bitmap_size,
                                IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
                                _metadata);
 
        /* Unaligned bitmap */
        test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
-                               MOCK_APERTURE_START, self->page_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
                                self->bitmap + MOCK_PAGE_SIZE,
                                self->bitmap_size,
                                IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
                                _metadata);
 
-       test_ioctl_destroy(stddev_id);
+       /* u64 unaligned bitmap */
+       test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
+                               MOCK_APERTURE_START, self->page_size, page_size,
+                               self->bitmap + 0xff1, self->bitmap_size,
+                               IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
+                               _metadata);
+
        test_ioctl_destroy(hwpt_id);
 }
 
index c646264aa41fdc1871c60bba6dc25841767f399b..8d2b46b2114da814f75740992c0dc4b1be14d33b 100644 (file)
@@ -344,16 +344,19 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
                                                  page_size, bitmap, nr))
 
 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
-                                   __u64 iova, size_t page_size, __u64 *bitmap,
+                                   __u64 iova, size_t page_size,
+                                   size_t pte_page_size, __u64 *bitmap,
                                    __u64 bitmap_size, __u32 flags,
                                    struct __test_metadata *_metadata)
 {
-       unsigned long i, nbits = bitmap_size * BITS_PER_BYTE;
-       unsigned long nr = nbits / 2;
+       unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
+       unsigned long nbits = bitmap_size * BITS_PER_BYTE;
+       unsigned long j, i, nr = nbits / pteset ?: 1;
        __u64 out_dirty = 0;
 
        /* Mark all even bits as dirty in the mock domain */
-       for (i = 0; i < nbits; i += 2)
+       memset(bitmap, 0, bitmap_size);
+       for (i = 0; i < nbits; i += pteset)
                set_bit(i, (unsigned long *)bitmap);
 
        test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
@@ -365,8 +368,12 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
        test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
                                  flags);
        /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
-       for (i = 0; i < nbits; i++) {
-               ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap));
+       for (i = 0; i < nbits; i += pteset) {
+               for (j = 0; j < pteset; j++) {
+                       ASSERT_EQ(j < npte,
+                                 test_bit(i + j, (unsigned long *)bitmap));
+               }
+               ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
        }
 
        memset(bitmap, 0, bitmap_size);
@@ -374,19 +381,23 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
                                  flags);
 
        /* It as read already -- expect all zeroes */
-       for (i = 0; i < nbits; i++) {
-               ASSERT_EQ(!(i % 2) && (flags &
-                                      IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
-                         test_bit(i, (unsigned long *)bitmap));
+       for (i = 0; i < nbits; i += pteset) {
+               for (j = 0; j < pteset; j++) {
+                       ASSERT_EQ(
+                               (j < npte) &&
+                                       (flags &
+                                        IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
+                               test_bit(i + j, (unsigned long *)bitmap));
+               }
        }
 
        return 0;
 }
-#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap,      \
-                               bitmap_size, flags, _metadata)                 \
+#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
+                               bitmap, bitmap_size, flags, _metadata)     \
        ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
-                                             page_size, bitmap, bitmap_size,  \
-                                             flags, _metadata))
+                                             page_size, pte_size, bitmap,     \
+                                             bitmap_size, flags, _metadata))
 
 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
                                   __u32 *access_id, unsigned int flags)
index 40337f566eebb319d07f332d4eab1e0a6d0c287f..06b43ed23580b67c060aeaadea11b06641a629c3 100644 (file)
@@ -367,11 +367,21 @@ static void test_invalid_memory_region_flags(void)
        }
 
        if (supported_flags & KVM_MEM_GUEST_MEMFD) {
+               int guest_memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0);
+
                r = __vm_set_user_memory_region2(vm, 0,
                                                 KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD,
-                                                0, MEM_REGION_SIZE, NULL, 0, 0);
+                                                0, MEM_REGION_SIZE, NULL, guest_memfd, 0);
                TEST_ASSERT(r && errno == EINVAL,
                            "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported");
+
+               r = __vm_set_user_memory_region2(vm, 0,
+                                                KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD,
+                                                0, MEM_REGION_SIZE, NULL, guest_memfd, 0);
+               TEST_ASSERT(r && errno == EINVAL,
+                           "KVM_SET_USER_MEMORY_REGION2 should have failed, read-only GUEST_MEMFD memslots are unsupported");
+
+               close(guest_memfd);
        }
 }
 
index cce90a10515ad2fe78fe68147d26732d717c3bb6..2b9f8cc52639d1942238b41a1ad55edc6bd406ed 100644 (file)
@@ -1517,6 +1517,12 @@ int main(int argc, char *argv[])
                                continue;
 
                        uffd_test_start("%s on %s", test->name, mem_type->name);
+                       if ((mem_type->mem_flag == MEM_HUGETLB ||
+                           mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
+                           (default_huge_page_size() == 0)) {
+                               uffd_test_skip("huge page size is 0, feature missing?");
+                               continue;
+                       }
                        if (!uffd_feature_supported(test)) {
                                uffd_test_skip("feature missing");
                                continue;
index b0f5e55d2d0b2584aefacc135ffe6b2d2cab34fc..58962963650227bcc942354a052d8bf2bd95aa13 100755 (executable)
@@ -235,9 +235,6 @@ mirred_egress_to_ingress_tcp_test()
        check_err $? "didn't mirred redirect ICMP"
        tc_check_packets "dev $h1 ingress" 102 10
        check_err $? "didn't drop mirred ICMP"
-       local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
-       test ${overlimits} = 10
-       check_err $? "wrong overlimits, expected 10 got ${overlimits}"
 
        tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
        tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
index fe59ca3e5596bfe3abfbb477dad2d8bcbb608a56..12491850ae985a779b069662ccba312a3dc1964e 100755 (executable)
@@ -367,14 +367,12 @@ run_test()
   local desc=$2
   local node_src=$3
   local node_dst=$4
-  local ip6_src=$5
-  local ip6_dst=$6
-  local if_dst=$7
-  local trace_type=$8
-  local ioam_ns=$9
-
-  ip netns exec $node_dst ./ioam6_parser $if_dst $name $ip6_src $ip6_dst \
-         $trace_type $ioam_ns &
+  local ip6_dst=$5
+  local trace_type=$6
+  local ioam_ns=$7
+  local type=$8
+
+  ip netns exec $node_dst ./ioam6_parser $name $trace_type $ioam_ns $type &
   local spid=$!
   sleep 0.1
 
@@ -489,7 +487,7 @@ out_undef_ns()
          trace prealloc type 0x800000 ns 0 size 4 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0x800000 0
+         db01::1 0x800000 0 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -509,7 +507,7 @@ out_no_room()
          trace prealloc type 0xc00000 ns 123 size 4 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0xc00000 123
+         db01::1 0xc00000 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -543,14 +541,14 @@ out_bits()
       if [ $cmd_res != 0 ]
       then
         npassed=$((npassed+1))
-        log_test_passed "$descr"
+        log_test_passed "$descr ($1 mode)"
       else
         nfailed=$((nfailed+1))
-        log_test_failed "$descr"
+        log_test_failed "$descr ($1 mode)"
       fi
     else
        run_test "out_bit$i" "$descr ($1 mode)" $ioam_node_alpha \
-           $ioam_node_beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+           $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1
     fi
   done
 
@@ -574,7 +572,7 @@ out_full_supp_trace()
          trace prealloc type 0xfff002 ns 123 size 100 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0xfff002 123
+         db01::1 0xfff002 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -604,7 +602,7 @@ in_undef_ns()
          trace prealloc type 0x800000 ns 0 size 4 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0x800000 0
+         db01::1 0x800000 0 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -624,7 +622,7 @@ in_no_room()
          trace prealloc type 0xc00000 ns 123 size 4 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0xc00000 123
+         db01::1 0xc00000 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -651,7 +649,7 @@ in_bits()
            dev veth0
 
     run_test "in_bit$i" "${desc/<n>/$i} ($1 mode)" $ioam_node_alpha \
-           $ioam_node_beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+           $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1
   done
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
@@ -679,7 +677,7 @@ in_oflag()
          trace prealloc type 0xc00000 ns 123 size 4 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0xc00000 123
+         db01::1 0xc00000 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 
@@ -703,7 +701,7 @@ in_full_supp_trace()
          trace prealloc type 0xfff002 ns 123 size 80 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
-         db01::2 db01::1 veth0 0xfff002 123
+         db01::1 0xfff002 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
 }
@@ -731,7 +729,7 @@ fwd_full_supp_trace()
          trace prealloc type 0xfff002 ns 123 size 244 via db01::1 dev veth0
 
   run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_gamma \
-         db01::2 db02::2 veth0 0xfff002 123
+         db02::2 0xfff002 123 $1
 
   [ "$1" = "encap" ] && ip -netns $ioam_node_gamma link set ip6tnl0 down
 }
index d9d1d41901267439aac832166e46410c85f44111..895e5bb5044bb126dc9894cf35a68d7cf1c79ec7 100644 (file)
@@ -8,7 +8,6 @@
 #include <errno.h>
 #include <limits.h>
 #include <linux/const.h>
-#include <linux/if_ether.h>
 #include <linux/ioam6.h>
 #include <linux/ipv6.h>
 #include <stdlib.h>
@@ -512,14 +511,6 @@ static int str2id(const char *tname)
        return -1;
 }
 
-static int ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2)
-{
-       return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
-               (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
-               (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
-               (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
-}
-
 static int get_u32(__u32 *val, const char *arg, int base)
 {
        unsigned long res;
@@ -603,70 +594,80 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
 
 int main(int argc, char **argv)
 {
-       int fd, size, hoplen, tid, ret = 1;
-       struct in6_addr src, dst;
+       int fd, size, hoplen, tid, ret = 1, on = 1;
        struct ioam6_hdr *opt;
-       struct ipv6hdr *ip6h;
-       __u8 buffer[400], *p;
-       __u16 ioam_ns;
+       struct cmsghdr *cmsg;
+       struct msghdr msg;
+       struct iovec iov;
+       __u8 buffer[512];
        __u32 tr_type;
+       __u16 ioam_ns;
+       __u8 *ptr;
 
-       if (argc != 7)
+       if (argc != 5)
                goto out;
 
-       tid = str2id(argv[2]);
+       tid = str2id(argv[1]);
        if (tid < 0 || !func[tid])
                goto out;
 
-       if (inet_pton(AF_INET6, argv[3], &src) != 1 ||
-           inet_pton(AF_INET6, argv[4], &dst) != 1)
+       if (get_u32(&tr_type, argv[2], 16) ||
+           get_u16(&ioam_ns, argv[3], 0))
                goto out;
 
-       if (get_u32(&tr_type, argv[5], 16) ||
-           get_u16(&ioam_ns, argv[6], 0))
+       fd = socket(PF_INET6, SOCK_RAW,
+                   !strcmp(argv[4], "encap") ? IPPROTO_IPV6 : IPPROTO_ICMPV6);
+       if (fd < 0)
                goto out;
 
-       fd = socket(AF_PACKET, SOCK_DGRAM, __cpu_to_be16(ETH_P_IPV6));
-       if (!fd)
-               goto out;
+       setsockopt(fd, IPPROTO_IPV6, IPV6_RECVHOPOPTS,  &on, sizeof(on));
 
-       if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
-                      argv[1], strlen(argv[1])))
+       iov.iov_len = 1;
+       iov.iov_base = malloc(CMSG_SPACE(sizeof(buffer)));
+       if (!iov.iov_base)
                goto close;
-
 recv:
-       size = recv(fd, buffer, sizeof(buffer), 0);
+       memset(&msg, 0, sizeof(msg));
+       msg.msg_iov = &iov;
+       msg.msg_iovlen = 1;
+       msg.msg_control = buffer;
+       msg.msg_controllen = CMSG_SPACE(sizeof(buffer));
+
+       size = recvmsg(fd, &msg, 0);
        if (size <= 0)
                goto close;
 
-       ip6h = (struct ipv6hdr *)buffer;
+       for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+               if (cmsg->cmsg_level != IPPROTO_IPV6 ||
+                   cmsg->cmsg_type != IPV6_HOPOPTS ||
+                   cmsg->cmsg_len < sizeof(struct ipv6_hopopt_hdr))
+                       continue;
 
-       if (!ipv6_addr_equal(&ip6h->saddr, &src) ||
-           !ipv6_addr_equal(&ip6h->daddr, &dst))
-               goto recv;
+               ptr = (__u8 *)CMSG_DATA(cmsg);
 
-       if (ip6h->nexthdr != IPPROTO_HOPOPTS)
-               goto close;
+               hoplen = (ptr[1] + 1) << 3;
+               ptr += sizeof(struct ipv6_hopopt_hdr);
 
-       p = buffer + sizeof(*ip6h);
-       hoplen = (p[1] + 1) << 3;
-       p += sizeof(struct ipv6_hopopt_hdr);
+               while (hoplen > 0) {
+                       opt = (struct ioam6_hdr *)ptr;
 
-       while (hoplen > 0) {
-               opt = (struct ioam6_hdr *)p;
+                       if (opt->opt_type == IPV6_TLV_IOAM &&
+                           opt->type == IOAM6_TYPE_PREALLOC) {
+                               ptr += sizeof(*opt);
+                               ret = func[tid](tid,
+                                               (struct ioam6_trace_hdr *)ptr,
+                                               tr_type, ioam_ns);
+                               goto close;
+                       }
 
-               if (opt->opt_type == IPV6_TLV_IOAM &&
-                   opt->type == IOAM6_TYPE_PREALLOC) {
-                       p += sizeof(*opt);
-                       ret = func[tid](tid, (struct ioam6_trace_hdr *)p,
-                                          tr_type, ioam_ns);
-                       break;
+                       ptr += opt->opt_len + 2;
+                       hoplen -= opt->opt_len + 2;
                }
-
-               p += opt->opt_len + 2;
-               hoplen -= opt->opt_len + 2;
        }
+
+       goto recv;
 close:
+       free(iov.iov_base);
        close(fd);
 out:
        return ret;
index 04fcb8a077c995c768d222171c045caf2ab3c3b3..75fc95675e2dcaa7a4593d0b9bd2ccaccdb12921 100755 (executable)
@@ -20,7 +20,7 @@ flush_pids()
 
        ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
 
-       for _ in $(seq 10); do
+       for _ in $(seq $((timeout_poll * 10))); do
                [ -z "$(ip netns pids "${ns}")" ] && break
                sleep 0.1
        done
@@ -62,14 +62,14 @@ __chk_nr()
        nr=$(eval $command)
 
        printf "%-50s" "$msg"
-       if [ $nr != $expected ]; then
-               if [ $nr = "$skip" ] && ! mptcp_lib_expect_all_features; then
+       if [ "$nr" != "$expected" ]; then
+               if [ "$nr" = "$skip" ] && ! mptcp_lib_expect_all_features; then
                        echo "[ skip ] Feature probably not supported"
                        mptcp_lib_result_skip "${msg}"
                else
                        echo "[ fail ] expected $expected found $nr"
                        mptcp_lib_result_fail "${msg}"
-                       ret=$test_cnt
+                       ret=${KSFT_FAIL}
                fi
        else
                echo "[  ok  ]"
@@ -91,6 +91,15 @@ chk_msk_nr()
        __chk_msk_nr "grep -c token:" "$@"
 }
 
+chk_listener_nr()
+{
+       local expected=$1
+       local msg="$2"
+
+       __chk_nr "ss -nlHMON $ns | wc -l" "$expected" "$msg - mptcp" 0
+       __chk_nr "ss -nlHtON $ns | wc -l" "$expected" "$msg - subflows"
+}
+
 wait_msk_nr()
 {
        local condition="grep -c token:"
@@ -115,11 +124,11 @@ wait_msk_nr()
        if [ $i -ge $timeout ]; then
                echo "[ fail ] timeout while expecting $expected max $max last $nr"
                mptcp_lib_result_fail "${msg} # timeout"
-               ret=$test_cnt
+               ret=${KSFT_FAIL}
        elif [ $nr != $expected ]; then
                echo "[ fail ] expected $expected found $nr"
                mptcp_lib_result_fail "${msg} # unexpected result"
-               ret=$test_cnt
+               ret=${KSFT_FAIL}
        else
                echo "[  ok  ]"
                mptcp_lib_result_pass "${msg}"
@@ -166,9 +175,13 @@ chk_msk_listen()
 chk_msk_inuse()
 {
        local expected=$1
-       local msg="$2"
+       local msg="....chk ${2:-${expected}} msk in use"
        local listen_nr
 
+       if [ "${expected}" -eq 0 ]; then
+               msg+=" after flush"
+       fi
+
        listen_nr=$(ss -N "${ns}" -Ml | grep -c LISTEN)
        expected=$((expected + listen_nr))
 
@@ -179,16 +192,21 @@ chk_msk_inuse()
                sleep 0.1
        done
 
-       __chk_nr get_msk_inuse $expected "$msg" 0
+       __chk_nr get_msk_inuse $expected "${msg}" 0
 }
 
 # $1: cestab nr
 chk_msk_cestab()
 {
-       local cestab=$1
+       local expected=$1
+       local msg="....chk ${2:-${expected}} cestab"
+
+       if [ "${expected}" -eq 0 ]; then
+               msg+=" after flush"
+       fi
 
        __chk_nr "mptcp_lib_get_counter ${ns} MPTcpExtMPCurrEstab" \
-                "${cestab}" "....chk ${cestab} cestab" ""
+                "${expected}" "${msg}" ""
 }
 
 wait_connected()
@@ -227,12 +245,12 @@ wait_connected $ns 10000
 chk_msk_nr 2 "after MPC handshake "
 chk_msk_remote_key_nr 2 "....chk remote_key"
 chk_msk_fallback_nr 0 "....chk no fallback"
-chk_msk_inuse 2 "....chk 2 msk in use"
+chk_msk_inuse 2
 chk_msk_cestab 2
 flush_pids
 
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "2->0"
+chk_msk_cestab 0 "2->0"
 
 echo "a" | \
        timeout ${timeout_test} \
@@ -247,12 +265,12 @@ echo "b" | \
                                127.0.0.1 >/dev/null &
 wait_connected $ns 10001
 chk_msk_fallback_nr 1 "check fallback"
-chk_msk_inuse 1 "....chk 1 msk in use"
+chk_msk_inuse 1
 chk_msk_cestab 1
 flush_pids
 
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "1->0"
+chk_msk_cestab 0 "1->0"
 
 NR_CLIENTS=100
 for I in `seq 1 $NR_CLIENTS`; do
@@ -273,12 +291,28 @@ for I in `seq 1 $NR_CLIENTS`; do
 done
 
 wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
-chk_msk_inuse $((NR_CLIENTS*2)) "....chk many msk in use"
-chk_msk_cestab $((NR_CLIENTS*2))
+chk_msk_inuse $((NR_CLIENTS*2)) "many"
+chk_msk_cestab $((NR_CLIENTS*2)) "many"
 flush_pids
 
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "many->0"
+chk_msk_cestab 0 "many->0"
+
+chk_listener_nr 0 "no listener sockets"
+NR_SERVERS=100
+for I in $(seq 1 $NR_SERVERS); do
+       ip netns exec $ns ./mptcp_connect -p $((I + 20001)) \
+               -t ${timeout_poll} -l 0.0.0.0 >/dev/null 2>&1 &
+done
+mptcp_lib_wait_local_port_listen $ns $((NR_SERVERS + 20001))
+
+chk_listener_nr $NR_SERVERS "many listener sockets"
+
+# graceful termination
+for I in $(seq 1 $NR_SERVERS); do
+       echo a | ip netns exec $ns ./mptcp_connect -p $((I + 20001)) 127.0.0.1 >/dev/null 2>&1 &
+done
+flush_pids
 
 mptcp_lib_result_print_all_tap
 exit $ret
index c07386e21e0a4aa10b004cb820488f15ff18dd7a..e4581b0dfb967723e36b1847c512f02f4bc87a45 100755 (executable)
@@ -161,6 +161,11 @@ check_tools()
                exit $ksft_skip
        fi
 
+       if ! ss -h | grep -q MPTCP; then
+               echo "SKIP: ss tool does not support MPTCP"
+               exit $ksft_skip
+       fi
+
        # Use the legacy version if available to support old kernel versions
        if iptables-legacy -V &> /dev/null; then
                iptables="iptables-legacy"
@@ -3333,16 +3338,17 @@ userspace_pm_rm_sf()
 {
        local evts=$evts_ns1
        local t=${3:-1}
-       local ip=4
+       local ip
        local tk da dp sp
        local cnt
 
        [ "$1" == "$ns2" ] && evts=$evts_ns2
-       if mptcp_lib_is_v6 $2; then ip=6; fi
+       [ -n "$(mptcp_lib_evts_get_info "saddr4" "$evts" $t)" ] && ip=4
+       [ -n "$(mptcp_lib_evts_get_info "saddr6" "$evts" $t)" ] && ip=6
        tk=$(mptcp_lib_evts_get_info token "$evts")
-       da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t)
-       dp=$(mptcp_lib_evts_get_info dport "$evts" $t)
-       sp=$(mptcp_lib_evts_get_info sport "$evts" $t)
+       da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t $2)
+       dp=$(mptcp_lib_evts_get_info dport "$evts" $t $2)
+       sp=$(mptcp_lib_evts_get_info sport "$evts" $t $2)
 
        cnt=$(rm_sf_count ${1})
        ip netns exec $1 ./pm_nl_ctl dsf lip $2 lport $sp \
@@ -3429,20 +3435,23 @@ userspace_tests()
        if reset_with_events "userspace pm add & remove address" &&
           continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
                set_userspace_pm $ns1
-               pm_nl_set_limits $ns2 1 1
+               pm_nl_set_limits $ns2 2 2
                speed=5 \
                        run_tests $ns1 $ns2 10.0.1.1 &
                local tests_pid=$!
                wait_mpj $ns1
                userspace_pm_add_addr $ns1 10.0.2.1 10
-               chk_join_nr 1 1 1
-               chk_add_nr 1 1
-               chk_mptcp_info subflows 1 subflows 1
-               chk_subflows_total 2 2
-               chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
+               userspace_pm_add_addr $ns1 10.0.3.1 20
+               chk_join_nr 2 2 2
+               chk_add_nr 2 2
+               chk_mptcp_info subflows 2 subflows 2
+               chk_subflows_total 3 3
+               chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
                userspace_pm_rm_addr $ns1 10
                userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
-               chk_rm_nr 1 1 invert
+               userspace_pm_rm_addr $ns1 20
+               userspace_pm_rm_sf $ns1 10.0.3.1 $SUB_ESTABLISHED
+               chk_rm_nr 2 2 invert
                chk_mptcp_info subflows 0 subflows 0
                chk_subflows_total 1 1
                kill_events_pids
index 3a2abae5993e2b4b32ae810d080dc3d336e41e11..3777d66fc56d36a4770b164fd781af298cd4eb70 100644 (file)
@@ -213,9 +213,9 @@ mptcp_lib_get_info_value() {
        grep "${2}" | sed -n 's/.*\('"${1}"':\)\([0-9a-f:.]*\).*$/\2/p;q'
 }
 
-# $1: info name ; $2: evts_ns ; $3: event type
+# $1: info name ; $2: evts_ns ; [$3: event type; [$4: addr]]
 mptcp_lib_evts_get_info() {
-       mptcp_lib_get_info_value "${1}" "^type:${3:-1}," < "${2}"
+       grep "${4:-}" "${2}" | mptcp_lib_get_info_value "${1}" "^type:${3:-1},"
 }
 
 # $1: PID
index 8f4ff123a7eb92646845a5dea4caf28483057085..71899a3ffa7a9d7831c61f08b7f3b9c20aaed58e 100755 (executable)
@@ -183,7 +183,7 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
 subflow 10.0.1.1" "          (nobackup)"
 
 # fullmesh support has been added later
-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh
+ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh 2>/dev/null
 if ip netns exec $ns1 ./pm_nl_ctl dump | grep -q "fullmesh" ||
    mptcp_lib_expect_all_features; then
        check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
@@ -194,6 +194,12 @@ subflow 10.0.1.1" "          (nofullmesh)"
        ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
        check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
 subflow,backup,fullmesh 10.0.1.1" "          (backup,fullmesh)"
+else
+       for st in fullmesh nofullmesh backup,fullmesh; do
+               st="          (${st})"
+               printf "%-50s%s\n" "${st}" "[SKIP]"
+               mptcp_lib_result_skip "${st}"
+       done
 fi
 
 mptcp_lib_result_print_all_tap
index 0cc964e6f2c1768dad6a474cffd1c521580b7741..8f9ddb3ad4fe83501f54a1ac5e62047108eea910 100755 (executable)
@@ -250,7 +250,8 @@ run_test()
                [ $bail -eq 0 ] || exit $ret
        fi
 
-       printf "%-60s" "$msg - reverse direction"
+       msg+=" - reverse direction"
+       printf "%-60s" "${msg}"
        do_transfer $large $small $time
        lret=$?
        mptcp_lib_result_code "${lret}" "${msg}"
index 6167837f48e17ef8ba0d41ba541f73da765f945e..1b94a75604fee98788ba5792b384ca4870bdafbb 100755 (executable)
@@ -75,7 +75,7 @@ print_test()
 {
        test_name="${1}"
 
-       _printf "%-63s" "${test_name}"
+       _printf "%-68s" "${test_name}"
 }
 
 print_results()
@@ -542,7 +542,7 @@ verify_subflow_events()
        local remid
        local info
 
-       info="${e_saddr} (${e_from}) => ${e_daddr} (${e_to})"
+       info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
 
        if [ "$e_type" = "$SUB_ESTABLISHED" ]
        then
index 49c84602707f89d6f901ee60b63c4e5dfe75f8c8..b95c249f81c254dae9160b42ec595b3d2daf6679 100644 (file)
@@ -1485,6 +1485,51 @@ TEST_F(tls, control_msg)
        EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
 }
 
+TEST_F(tls, control_msg_nomerge)
+{
+       char *rec1 = "1111";
+       char *rec2 = "2222";
+       int send_len = 5;
+       char buf[15];
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec1, send_len, 0), send_len);
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec2, send_len, 0), send_len);
+
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), MSG_PEEK), send_len);
+       EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), MSG_PEEK), send_len);
+       EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), 0), send_len);
+       EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), 0), send_len);
+       EXPECT_EQ(memcmp(buf, rec2, send_len), 0);
+}
+
+TEST_F(tls, data_control_data)
+{
+       char *rec1 = "1111";
+       char *rec2 = "2222";
+       char *rec3 = "3333";
+       int send_len = 5;
+       char buf[15];
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       EXPECT_EQ(send(self->fd, rec1, send_len, 0), send_len);
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec2, send_len, 0), send_len);
+       EXPECT_EQ(send(self->fd, rec3, send_len, 0), send_len);
+
+       EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len);
+       EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len);
+}
+
 TEST_F(tls, shutdown)
 {
        char const *test_str = "test_read";
index 27574bbf2d6386f770673b82684edf07b586c79a..5ae85def07395b50c07600f4a31b7ff69578bb9f 100755 (executable)
@@ -246,6 +246,20 @@ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
 chk_gro "        - aggregation with TSO off" 1
 cleanup
 
+create_ns
+ip -n $NS_DST link set dev veth$DST up
+ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp
+chk_gro_flag "gro vs xdp while down - gro flag on" $DST on
+ip -n $NS_DST link set dev veth$DST down
+chk_gro_flag "                      - after down" $DST on
+ip -n $NS_DST link set dev veth$DST xdp off
+chk_gro_flag "                      - after xdp off" $DST off
+ip -n $NS_DST link set dev veth$DST up
+chk_gro_flag "                      - after up" $DST off
+ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp
+chk_gro_flag "                      - after peer xdp" $DST off
+cleanup
+
 create_ns
 chk_channels "default channels" $DST 1 1
 
index db27153eb4a02c1db3f0f9dc55445558fbb5d5ea..936c3085bb8373ea74036a6870cb67f5b103f0ae 100644 (file)
@@ -7,7 +7,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
        ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
        conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
-       conntrack_sctp_collision.sh xt_string.sh
+       conntrack_sctp_collision.sh xt_string.sh \
+       bridge_netfilter.sh
 
 HOSTPKG_CONFIG := pkg-config
 
diff --git a/tools/testing/selftests/netfilter/bridge_netfilter.sh b/tools/testing/selftests/netfilter/bridge_netfilter.sh
new file mode 100644 (file)
index 0000000..659b3ab
--- /dev/null
@@ -0,0 +1,188 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bridge netfilter + conntrack, a combination that doesn't really work,
+# with multicast/broadcast packets racing for hash table insertion.
+
+#           eth0    br0     eth0
+# setup is: ns1 <->,ns0 <-> ns3
+#           ns2 <-'    `'-> ns4
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+ns3="ns3-$sfx"
+ns4="ns4-$sfx"
+
+ebtables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ebtables"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+for i in $(seq 0 4); do
+  eval ip netns add \$ns$i
+done
+
+cleanup() {
+  for i in $(seq 0 4); do eval ip netns del \$ns$i;done
+}
+
+trap cleanup EXIT
+
+do_ping()
+{
+       fromns="$1"
+       dstip="$2"
+
+       ip netns exec $fromns ping -c 1 -q $dstip > /dev/null
+       if [ $? -ne 0 ]; then
+               echo "ERROR: ping from $fromns to $dstip"
+               ip netns exec ${ns0} nft list ruleset
+               ret=1
+       fi
+}
+
+bcast_ping()
+{
+       fromns="$1"
+       dstip="$2"
+
+       for i in $(seq 1 1000); do
+               ip netns exec $fromns ping -q -f -b -c 1 -q $dstip > /dev/null 2>&1
+               if [ $? -ne 0 ]; then
+                       echo "ERROR: ping -b from $fromns to $dstip"
+                       ip netns exec ${ns0} nft list ruleset
+                       fi
+       done
+}
+
+ip link add veth1 netns ${ns0} type veth peer name eth0 netns ${ns1}
+if [ $? -ne 0 ]; then
+       echo "SKIP: Can't create veth device"
+       exit $ksft_skip
+fi
+
+ip link add veth2 netns ${ns0} type veth peer name eth0 netns $ns2
+ip link add veth3 netns ${ns0} type veth peer name eth0 netns $ns3
+ip link add veth4 netns ${ns0} type veth peer name eth0 netns $ns4
+
+ip -net ${ns0} link set lo up
+
+for i in $(seq 1 4); do
+  ip -net ${ns0} link set veth$i up
+done
+
+ip -net ${ns0} link add br0 type bridge stp_state 0 forward_delay 0 nf_call_iptables 1 nf_call_ip6tables 1 nf_call_arptables 1
+if [ $? -ne 0 ]; then
+       echo "SKIP: Can't create bridge br0"
+       exit $ksft_skip
+fi
+
+# make veth0,1,2 part of bridge.
+for i in $(seq 1 3); do
+  ip -net ${ns0} link set veth$i master br0
+done
+
+# add a macvlan on top of the bridge.
+MACVLAN_ADDR=ba:f3:13:37:42:23
+ip -net ${ns0} link add link br0 name macvlan0 type macvlan mode private
+ip -net ${ns0} link set macvlan0 address ${MACVLAN_ADDR}
+ip -net ${ns0} link set macvlan0 up
+ip -net ${ns0} addr add 10.23.0.1/24 dev macvlan0
+
+# add a macvlan on top of veth4.
+MACVLAN_ADDR=ba:f3:13:37:42:24
+ip -net ${ns0} link add link veth4 name macvlan4 type macvlan mode vepa
+ip -net ${ns0} link set macvlan4 address ${MACVLAN_ADDR}
+ip -net ${ns0} link set macvlan4 up
+
+# make the macvlan part of the bridge.
+# veth4 is not a bridge port, only the macvlan on top of it.
+ip -net ${ns0} link set macvlan4 master br0
+
+ip -net ${ns0} link set br0 up
+ip -net ${ns0} addr add 10.0.0.1/24 dev br0
+ip netns exec ${ns0} sysctl -q net.bridge.bridge-nf-call-iptables=1
+ret=$?
+if [ $ret -ne 0 ] ; then
+       echo "SKIP: bridge netfilter not available"
+       ret=$ksft_skip
+fi
+
+# for testing, so namespaces will reply to ping -b probes.
+ip netns exec ${ns0} sysctl -q net.ipv4.icmp_echo_ignore_broadcasts=0
+
+# enable conntrack in ns0 and drop broadcast packets in forward to
+# avoid them from getting confirmed in the postrouting hook before
+# the cloned skb is passed up the stack.
+ip netns exec ${ns0} nft -f - <<EOF
+table ip filter {
+       chain input {
+               type filter hook input priority 1; policy accept
+               iifname br0 counter
+               ct state new accept
+       }
+}
+
+table bridge filter {
+       chain forward {
+               type filter hook forward priority 0; policy accept
+               meta pkttype broadcast ip protocol icmp counter drop
+       }
+}
+EOF
+
+# place 1, 2 & 3 in same subnet, connected via ns0:br0.
+# ns4 is placed in same subnet as well, but its not
+# part of the bridge: the corresponding veth4 is not
+# part of the bridge, only its macvlan interface.
+for i in $(seq 1 4); do
+  eval ip -net \$ns$i link set lo up
+  eval ip -net \$ns$i link set eth0 up
+done
+for i in $(seq 1 2); do
+  eval ip -net \$ns$i addr add 10.0.0.1$i/24 dev eth0
+done
+
+ip -net ${ns3} addr add 10.23.0.13/24 dev eth0
+ip -net ${ns4} addr add 10.23.0.14/24 dev eth0
+
+# test basic connectivity
+do_ping ${ns1} 10.0.0.12
+do_ping ${ns3} 10.23.0.1
+do_ping ${ns4} 10.23.0.1
+
+if [ $ret -eq 0 ];then
+       echo "PASS: netns connectivity: ns1 can reach ns2, ns3 and ns4 can reach ns0"
+fi
+
+bcast_ping ${ns1} 10.0.0.255
+
+# This should deliver broadcast to macvlan0, which is on top of ns0:br0.
+bcast_ping ${ns3} 10.23.0.255
+
+# same, this time via veth4:macvlan4.
+bcast_ping ${ns4} 10.23.0.255
+
+read t < /proc/sys/kernel/tainted
+
+if [ $t -eq 0 ];then
+       echo PASS: kernel not tainted
+else
+       echo ERROR: kernel is tainted
+       ret=1
+fi
+
+exit $ret
index 7b1addd504209fadb59f908a843b23a9d0218f3f..8a64f63e37ce215e4aeff2675b7114704075ae48 100644 (file)
@@ -18,6 +18,7 @@
 #include <pthread.h>
 
 #include "utils.h"
+#include "fpu.h"
 
 /* Number of times each thread should receive the signal */
 #define ITERATIONS 10
@@ -27,9 +28,7 @@
  */
 #define THREAD_FACTOR 8
 
-__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
-                    1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
-                    2.1};
+__thread double darray[32];
 
 bool bad_context;
 int threads_starting;
@@ -43,9 +42,9 @@ void signal_fpu_sig(int sig, siginfo_t *info, void *context)
        ucontext_t *uc = context;
        mcontext_t *mc = &uc->uc_mcontext;
 
-       /* Only the non volatiles were loaded up */
-       for (i = 14; i < 32; i++) {
-               if (mc->fp_regs[i] != darray[i - 14]) {
+       // Don't check f30/f31, they're used as scratches in check_all_fprs()
+       for (i = 0; i < 30; i++) {
+               if (mc->fp_regs[i] != darray[i]) {
                        bad_context = true;
                        break;
                }
@@ -54,7 +53,6 @@ void signal_fpu_sig(int sig, siginfo_t *info, void *context)
 
 void *signal_fpu_c(void *p)
 {
-       int i;
        long rc;
        struct sigaction act;
        act.sa_sigaction = signal_fpu_sig;
@@ -64,9 +62,7 @@ void *signal_fpu_c(void *p)
                return p;
 
        srand(pthread_self());
-       for (i = 0; i < 21; i++)
-               darray[i] = rand();
-
+       randomise_darray(darray, ARRAY_SIZE(darray));
        rc = preempt_fpu(darray, &threads_starting, &running);
 
        return (void *) rc;
index 10bfc88a69f72b6a0e310cca043fb04882e24eb1..0f50960b0e3a89215757163ad3b458c92670f4de 100644 (file)
@@ -1615,7 +1615,13 @@ static int check_memory_region_flags(struct kvm *kvm,
                valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
 
 #ifdef __KVM_HAVE_READONLY_MEM
-       valid_flags |= KVM_MEM_READONLY;
+       /*
+        * GUEST_MEMFD is incompatible with read-only memslots, as writes to
+        * read-only memslots have emulated MMIO, not page fault, semantics,
+        * and KVM doesn't allow emulated MMIO for private memory.
+        */
+       if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
+               valid_flags |= KVM_MEM_READONLY;
 #endif
 
        if (mem->flags & ~valid_flags)