diff --git a/_multibuild b/_multibuild index 01794d8..7938bc7 100644 --- a/_multibuild +++ b/_multibuild @@ -1,3 +1,4 @@ raspberrypi-kernel - \ No newline at end of file + kernel-rt + diff --git a/kernel-rt.spec b/kernel-rt.spec new file mode 100644 index 0000000..eea400c --- /dev/null +++ b/kernel-rt.spec @@ -0,0 +1,7880 @@ + + +%define with_signmodules 1 + +%define with_kabichk 0 + +%define modsign_cmd %{SOURCE10} + +%global Arch $(echo %{_host_cpu} | sed -e s/i.86/x86/ -e s/x86_64/x86/ -e s/aarch64.*/arm64/) + +%global TarballVer 4.19.90 + +%global KernelVer %{version}-%{release}.%{_target_cpu} + +%global hulkrelease 2203.3.0 + +%define with_patch 0 + +%define debuginfodir /usr/lib/debug + +%define with_debuginfo 1 + +%define with_perf 1 +# Do not recompute the build-id of vmlinux in find-debuginfo.sh +%global _missing_build_ids_terminate_build 1 +%global _no_recompute_build_ids 1 +%undefine _include_minidebuginfo +%undefine _include_gdb_index +%undefine _unique_build_ids + +%define with_source 1 + +Name: kernel-rt +Version: 4.19.90 +Release: %{hulkrelease}.rt103.0141 +Summary: Linux Kernel +License: GPLv2 +URL: http://www.kernel.org/ + +Source0: kernel.tar.gz +Source10: sign-modules +Source11: x509.genkey +Source12: extra_certificates + +%if 0%{?with_kabichk} +Source18: check-kabi +Source20: Module.kabi_aarch64 +Source21: Module.kabi_x86_64 +%endif + +Source200: mkgrub-menu-aarch64.sh + +Source2000: cpupower.service +Source2001: cpupower.config + +%if 0%{?with_patch} +Source9000: apply-patches +Source9001: guards +Source9002: series.conf +Source9998: patches.tar.bz2 +%endif + +Patch0: patch-4.19.90-2203.3.0-rt103.patch +Patch1: patch-4.19.90-2203.3.0-rt103-openeuler_defconfig.patch + +#BuildRequires: +BuildRequires: module-init-tools, patch >= 2.5.4, bash >= 2.03, tar +BuildRequires: bzip2, xz, findutils, gzip, m4, perl, make >= 3.78, diffutils, gawk +BuildRequires: gcc >= 3.4.2, binutils >= 2.12 +BuildRequires: hostname, net-tools, bc +BuildRequires: xmlto, asciidoc +BuildRequires: openssl openssl-devel +BuildRequires: hmaccalc +BuildRequires: ncurses-devel +#BuildRequires: pesign >= 0.109-4 +BuildRequires: elfutils-libelf-devel +BuildRequires: rpm >= 4.14.2 +#BuildRequires: sparse >= 0.4.1 +BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel python-devel perl(ExtUtils::Embed) bison +BuildRequires: audit-libs-devel +BuildRequires: pciutils-devel gettext +BuildRequires: rpm-build, elfutils +BuildRequires: numactl-devel python3-devel glibc-static python3-docutils +BuildRequires: perl-generators perl(Carp) libunwind-devel gtk2-devel +%if 0%{?with_perf} +# libbabeltrace-devel >= 1.3.0 +BuildRequires: libbabeltrace-devel java-1.8.0-openjdk-devel perl-devel +%endif +AutoReq: no +AutoProv: yes + +Conflicts: device-mapper-libs < 1.02.63-2 e2fsprogs < 1.37-4 initscripts < 7.23 iptables < 1.3.2-1 +Conflicts: ipw2200-firmware < 2.4 isdn4k-utils < 3.2-32 iwl4965-firmware < 228.57.2 jfsutils < 1.1.7-2 +Conflicts: mdadm < 3.2.1-5 nfs-utils < 1.0.7-12 oprofile < 0.9.1-2 ppp < 2.4.3-3 procps < 3.2.5-6.3 +Conflicts: reiserfs-utils < 3.6.19-2 selinux-policy-targeted < 1.25.3-14 squashfs-tools < 4.0 +Conflicts: udev < 063-6 util-linux < 2.12 wireless-tools < 29-3 xfsprogs < 2.6.13-4 + +Provides: kernel-rt-aarch64 = %{version}-%{release} kernel-rt-drm = 4.3.0 kernel-rt-drm-nouveau = 16 kernel-rt-modeset = 1 +Provides: kernel-rt-uname-r = %{KernelVer} kernel-rt=%{KernelVer} + +Requires: dracut >= 001-7 grubby >= 8.28-2 initscripts >= 8.11.1-1 linux-firmware >= 20100806-2 module-init-tools >= 3.16-2 + +ExclusiveArch: noarch aarch64 i686 x86_64 +ExclusiveOS: Linux + +%description +The Linux Kernel, the operating system core itself. + +%package devel +Summary: Development package for building kernel modules to match the %{KernelVer} kernel +AutoReqProv: no +Provides: %{name}-headers +Obsoletes: %{name}-headers +Provides: glibc-kernheaders +Provides: kernel-rt-devel-uname-r = %{KernelVer} +Provides: kernel-rt-devel-aarch64 = %{version}-%{release} +Requires: perl findutils + +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{KernelVer} kernel package. + +%package tools +Summary: Assortment of tools for the Linux kernel +Provides: %{name}-tools-libs +Obsoletes: %{name}-tools-libs +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +%description tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package tools-devel +Summary: Assortment of tools for the Linux kernel +Requires: kernel-rt-tools = %{version}-%{release} +Requires: kernel-rt-tools-libs = %{version}-%{release} +Provides: kernel-rt-tools-libs-devel = %{version}-%{release} +Obsoletes: kernel-rt-tools-libs-devel +%description tools-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%if 0%{?with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n python2-perf +Provides: python-perf = %{version}-%{release} +Obsoletes: python-perf +Summary: Python bindings for apps which will manipulate perf events + +%description -n python2-perf +A Python module that permits applications written in the Python programming +language to use the interface to manipulate perf events. + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +%description -n python3-perf +A Python module that permits applications written in the Python programming +language to use the interface to manipulate perf events. +%endif + +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package source +Summary: the kernel source +%description source +This package contains vaious source files from the kernel. + +%if 0%{?with_debuginfo} +%define _debuginfo_template %{nil} +%define _debuginfo_subpackages 0 + +%define debuginfo_template(n:) \ +%package -n %{-n*}-debuginfo\ +Summary: Debug information for package %{-n*}\ +Group: Development/Debug\ +AutoReq: 0\ +AutoProv: 1\ +%description -n %{-n*}-debuginfo\ +This package provides debug information for package %{-n*}.\ +Debug information is useful when developing applications that use this\ +package or when debugging this package.\ +%{nil} + +%debuginfo_template -n kernel-rt +%files -n kernel-rt-debuginfo -f debugfiles.list + +%debuginfo_template -n bpftool +%files -n bpftool-debuginfo -f bpftool-debugfiles.list +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%{_sbindir}/bpftool.*(\.debug)?|XXX' -o bpftool-debugfiles.list} + +%debuginfo_template -n kernel-rt-tools +%files -n kernel-rt-tools-debuginfo -f kernel-tools-debugfiles.list +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%{_bindir}/centrino-decode.*(\.debug)?|.*%{_bindir}/powernow-k8-decode.*(\.debug)?|.*%{_bindir}/cpupower.*(\.debug)?|.*%{_libdir}/libcpupower.*|.*%{_libdir}/libcpupower.*|.*%{_bindir}/turbostat.(\.debug)?|.*%{_bindir}/.*gpio.*(\.debug)?|.*%{_bindir}/.*iio.*(\.debug)?|.*%{_bindir}/tmon.*(.debug)?|XXX' -o kernel-tools-debugfiles.list} + +%if 0%{?with_perf} +%debuginfo_template -n perf +%files -n perf-debuginfo -f perf-debugfiles.list +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%{_bindir}/perf.*(\.debug)?|.*%{_libexecdir}/perf-core/.*|.*%{_libdir}/traceevent/.*|XXX' -o perf-debugfiles.list} + + +%debuginfo_template -n python2-perf +%files -n python2-perf-debuginfo -f python2-perf-debugfiles.list +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%{python2_sitearch}/perf.*(.debug)?|XXX' -o python2-perf-debugfiles.list} + +%debuginfo_template -n python3-perf +%files -n python3-perf-debuginfo -f python3-perf-debugfiles.list +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%{python3_sitearch}/perf.*(.debug)?|XXX' -o python3-perf-debugfiles.list} +%endif +%endif + +%prep + +%setup -q -n kernel-%{version} -c + +%if 0%{?with_patch} +tar -xjf %{SOURCE9998} +%endif + +mv kernel linux-%{KernelVer} +cd linux-%{KernelVer} + +%if 0%{?with_patch} +cp %{SOURCE9000} . +cp %{SOURCE9001} . +cp %{SOURCE9002} . + +if [ ! -d patches ];then + mv ../patches . +fi + +Applypatches() +{ + set -e + set -o pipefail + local SERIESCONF=$1 + local PATCH_DIR=$2 + sed -i '/^#/d' $SERIESCONF + sed -i '/^[\s]*$/d' $SERIESCONF + ( + echo "trap 'echo \"*** patch \$_ failed ***\"' ERR" + echo "set -ex" + cat $SERIESCONF | \ + sed "s!^!patch -s -F0 -E -p1 --no-backup-if-mismatch -i $PATCH_DIR/!" \ + ) | sh +} + +Applypatches series.conf %{_builddir}/kernel-%{version}/linux-%{KernelVer} +%endif + +%patch0 -p1 +%patch1 -p1 + +touch .scmversion + +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null +find . -name .gitignore -exec rm -f {} \; >/dev/null + +%if 0%{?with_signmodules} + cp %{SOURCE11} certs/. +%endif + +%if 0%{?with_source} +# Copy directory backup for kernel-source +cp -a ../linux-%{KernelVer} ../linux-%{KernelVer}-Source +find ../linux-%{KernelVer}-Source -type f -name "\.*" -exec rm -rf {} \; >/dev/null +%endif + +%if 0%{?with_perf} +cp -a tools/perf tools/python3-perf +%endif + +%build +cd linux-%{KernelVer} + +perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}.%{_target_cpu}/" Makefile + +## make linux +make mrproper %{_smp_mflags} + +make ARCH=%{Arch} openeuler_defconfig +make ARCH=%{Arch} olddefconfig + +TargetImage=$(basename $(make -s image_name)) + +make ARCH=%{Arch} $TargetImage %{?_smp_mflags} +make ARCH=%{Arch} modules %{?_smp_mflags} + +%if 0%{?with_kabichk} + chmod 0755 %{SOURCE18} + if [ -e $RPM_SOURCE_DIR/Module.kabi_%{_target_cpu} ]; then + %{SOURCE18} -k $RPM_SOURCE_DIR/Module.kabi_%{_target_cpu} -s Module.symvers || exit 1 + echo "**** NOTE: now don't check Kabi. ****" + else + echo "**** NOTE: Cannot find reference Module.kabi file. ****" + fi +%endif + +# aarch64 make dtbs +%ifarch aarch64 + make ARCH=%{Arch} dtbs +%endif + +## make tools +%if 0%{?with_perf} +# perf +%global perf_make \ + make EXTRA_CFLAGS="-Wl,-z,now -g -Wall -fstack-protector-strong -fPIC" EXTRA_PERFLIBS="-fpie -pie" %{?_smp_mflags} -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_LIBNUMA=1 NO_STRLCPY=1 prefix=%{_prefix} +%global perf_python2 -C tools/perf PYTHON=%{__python2} +%global perf_python3 -C tools/python3-perf PYTHON=%{__python3} +# perf +chmod +x tools/perf/check-headers.sh +%{perf_make} %{perf_python2} all + +# make sure check-headers.sh is executable +chmod +x tools/python3-perf/check-headers.sh +%{perf_make} %{perf_python3} all + +pushd tools/perf/Documentation/ +make %{?_smp_mflags} man +popd +%endif + +# bpftool +pushd tools/bpf/bpftool +make +popd + +# cpupower +chmod +x tools/power/cpupower/utils/version-gen.sh +make %{?_smp_mflags} -C tools/power/cpupower CPUFREQ_BENCH=false +%ifarch %{ix86} + pushd tools/power/cpupower/debug/i386 + make %{?_smp_mflags} centrino-decode powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + make %{?_smp_mflags} centrino-decode powernow-k8-decode + popd +%endif +%ifarch %{ix86} x86_64 + pushd tools/power/x86/x86_energy_perf_policy/ + make + popd + pushd tools/power/x86/turbostat + make + popd +%endif +# thermal +pushd tools/thermal/tmon/ +make +popd +# iio +pushd tools/iio/ +make +popd +# gpio +pushd tools/gpio/ +make +popd +# kvm +pushd tools/kvm/kvm_stat/ +make %{?_smp_mflags} man +popd + + +%install +%define _python_bytecompile_errors_terminate_build 0 +%if 0%{?with_source} + mkdir -p $RPM_BUILD_ROOT/usr/src/ + mv linux-%{KernelVer}-Source $RPM_BUILD_ROOT/usr/src/linux-%{KernelVer} + cp linux-%{KernelVer}/.config $RPM_BUILD_ROOT/usr/src/linux-%{KernelVer}/ + cp linux-%{KernelVer}/.scmversion $RPM_BUILD_ROOT/usr/src/linux-%{KernelVer}/ +%endif + +cd linux-%{KernelVer} + +## install linux + +# deal with kernel-source, now we don't need kernel-source +#mkdir $RPM_BUILD_ROOT/usr/src/linux-%{KernelVer} +#tar cf - --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git --exclude=.tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=.config.old --exclude=.missing-syscalls.d --exclude=patches . | tar xf - -C %{buildroot}/usr/src/linux-%{KernelVer} + +mkdir -p $RPM_BUILD_ROOT/boot +dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-%{KernelVer}.img bs=1M count=20 + +install -m 755 $(make -s image_name) $RPM_BUILD_ROOT/boot/vmlinuz-%{KernelVer} +pushd $RPM_BUILD_ROOT/boot +sha512hmac ./vmlinuz-%{KernelVer} >./.vmlinuz-%{KernelVer}.hmac +popd + +install -m 644 .config $RPM_BUILD_ROOT/boot/config-%{KernelVer} +install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-%{KernelVer} + +%if 0%{?with_kabichk} + gzip -c9 < Module.symvers > $RPM_BUILD_ROOT/boot/symvers-%{KernelVer}.gz +%endif + +mkdir -p $RPM_BUILD_ROOT%{_sbindir} +install -m 755 %{SOURCE200} $RPM_BUILD_ROOT%{_sbindir}/mkgrub-menu-%{hulkrelease}.sh + + +%if 0%{?with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/%{KernelVer} + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/%{KernelVer} +%endif + +# deal with module, if not kdump +make ARCH=%{Arch} INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=%{KernelVer} mod-fw= +######## to collect ko to module.filelist about netwoking. block. drm. modesetting ############### +pushd $RPM_BUILD_ROOT/lib/modules/%{KernelVer} +find -type f -name "*.ko" >modnames + +# mark modules executable so that strip-to-file can strip them +xargs --no-run-if-empty chmod u+x < modnames + +# Generate a list of modules for block and networking. + +grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA | +sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + +collect_modules_list() +{ + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i modules.$1 + fi +} + +collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt2x00(pci|usb)_probe|register_netdevice' +collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size|ahci_platform_get_resources' 'pktcdvd.ko|dm-mod.ko' +collect_modules_list drm \ + 'drm_open|drm_init' +collect_modules_list modesetting \ + 'drm_crtc_init' + +# detect missing or incorrect license tags +rm -f modinfo +while read i +do + echo -n "$i " >> modinfo + /sbin/modinfo -l $i >> modinfo +done < modnames + +grep -E -v \ + 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ + modinfo && exit 1 + +rm -f modinfo modnames drivers.undef + +for i in alias alias.bin builtin.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap +do + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$i +done +popd +# modsign module ko;need after find-debuginfo,strip +%define __modsign_install_post \ + if [ "%{with_signmodules}" -eq "1" ];then \ + cp certs/signing_key.pem . \ + cp certs/signing_key.x509 . \ + chmod 0755 %{modsign_cmd} \ + %{modsign_cmd} $RPM_BUILD_ROOT/lib/modules/%{KernelVer} || exit 1 \ + fi \ +%{nil} + +# deal with header +make ARCH=%{Arch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install +make ARCH=%{Arch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check +find $RPM_BUILD_ROOT/usr/include -name "\.*" -exec rm -rf {} \; + +# aarch64 dtbs install +%ifarch aarch64 + mkdir -p $RPM_BUILD_ROOT/boot/dtb-%{KernelVer} + install -m 644 $(find arch/%{Arch}/boot -name "*.dtb") $RPM_BUILD_ROOT/boot/dtb-%{KernelVer}/ + rm -f $(find arch/$Arch/boot -name "*.dtb") +%endif + +# deal with vdso +make -s ARCH=%{Arch} INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=%{KernelVer} +if [ ! -s ldconfig-kernel.conf ]; then + echo "# Placeholder file, no vDSO hwcap entries used in this kernel." >ldconfig-kernel.conf +fi +install -D -m 444 ldconfig-kernel.conf $RPM_BUILD_ROOT/etc/ld.so.conf.d/kernel-%{KernelVer}.conf + +# deal with /lib/module/ path- sub path: build source kernel +rm -f $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build +rm -f $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/source +mkdir -p $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build +mkdir -p $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/extra +mkdir -p $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/updates +mkdir -p $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/weak-updates +############ to do collect devel file ######### +# 1. Makefile And Kconfig, .config sysmbol +# 2. scrpits dir +# 3. .h file +find -type f \( -name "Makefile*" -o -name "Kconfig*" \) -exec cp --parents {} $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build \; +for f in Module.symvers System.map Module.markers .config;do + test -f $f || continue + cp $f $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build +done + +cp -a scripts $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build +if [ -d arch/%{Arch}/scripts ]; then + cp -a arch/%{Arch}/scripts $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/arch/%{_arch} || : +fi +if [ -f arch/%{Arch}/*lds ]; then + cp -a arch/%{Arch}/*lds $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/arch/%{_arch}/ || : +fi +find $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/scripts/ -name "*.o" -exec rm -rf {} \; + +if [ -d arch/%{Arch}/include ]; then + cp -a --parents arch/%{Arch}/include $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/ +fi +cp -a include $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/include + +%ifarch aarch64 + # Needed for systemtap + cp -a --parents arch/arm64/kernel/module.lds $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/ + cp -a --parents arch/arm/include/asm $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/ +%endif + +# copy objtool for kernel-devel (needed for building external modules) +if grep -q CONFIG_STACK_VALIDATION=y .config; then + mkdir -p $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/tools/objtool + cp -a tools/objtool/objtool $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/tools/objtool +fi + +# Make sure the Makefile and version.h have a matching timestamp so that +# external modules can be built +touch -r $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/Makefile $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/include/generated/uapi/linux/version.h +touch -r $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/.config $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/include/generated/autoconf.h +# for make prepare +if [ ! -f $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/include/config/auto.conf ];then + cp .config $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build/include/config/auto.conf +fi + +mkdir -p %{buildroot}/usr/src/kernels +mv $RPM_BUILD_ROOT/lib/modules/%{KernelVer}/build $RPM_BUILD_ROOT/usr/src/kernels/%{KernelVer} + +find $RPM_BUILD_ROOT/usr/src/kernels/%{KernelVer} -name ".*.cmd" -exec rm -f {} \; + +pushd $RPM_BUILD_ROOT/lib/modules/%{KernelVer} +ln -sf /usr/src/kernels/%{KernelVer} build +ln -sf build source +popd + + +# deal with doc , now we don't need + + +# deal with kernel abi whitelists. now we don't need + + +## install tools +%if 0%{?with_perf} +# perf +# perf tool binary and supporting scripts/binaries +%{perf_make} %{perf_python2} DESTDIR=%{buildroot} lib=%{_lib} install-bin install-traceevent-plugins +# remove the 'trace' symlink. +rm -f %{buildroot}%{_bindir}/trace + +# remove examples +rm -rf %{buildroot}/usr/lib/perf/examples +# remove the stray header file that somehow got packaged in examples +rm -rf %{buildroot}/usr/lib/perf/include/bpf/ + +# python-perf extension +%{perf_make} %{perf_python3} DESTDIR=%{buildroot} install-python_ext +%{perf_make} %{perf_python2} DESTDIR=%{buildroot} install-python_ext +%endif + +install -d %{buildroot}/%{_mandir}/man1 +install -pm0644 tools/kvm/kvm_stat/kvm_stat.1 %{buildroot}/%{_mandir}/man1/ +# perf man pages (note: implicit rpm magic compresses them later) +%if 0%{?with_perf} +install -pm0644 tools/perf/Documentation/*.1 %{buildroot}/%{_mandir}/man1/ +%endif + +# bpftool +pushd tools/bpf/bpftool +make DESTDIR=%{buildroot} prefix=%{_prefix} bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +popd +# cpupower +make -C tools/power/cpupower DESTDIR=%{buildroot} libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install +rm -f %{buildroot}%{_libdir}/*.{a,la} +%find_lang cpupower +mv cpupower.lang ../ +%ifarch %{ix86} + pushd tools/power/cpupower/debug/i386 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* +mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig +install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service +install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower +%ifarch %{ix86} x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + pushd tools/power/x86/x86_energy_perf_policy + make DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/turbostat + make DESTDIR=%{buildroot} install + popd +%endif +# thermal +pushd tools/thermal/tmon +make INSTALL_ROOT=%{buildroot} install +popd +# iio +pushd tools/iio +make DESTDIR=%{buildroot} install +popd +# gpio +pushd tools/gpio +make DESTDIR=%{buildroot} install +popd +# kvm +pushd tools/kvm/kvm_stat +make INSTALL_ROOT=%{buildroot} install-tools +popd + +%define __spec_install_post\ +%{?__debug_package:%{__debug_install_post}}\ +%{__arch_install_post}\ +%{__os_install_post}\ +%{__modsign_install_post}\ +%{nil} + + +%post +%{_sbindir}/new-kernel-pkg --package kernel --install %{KernelVer} || exit $? + +%preun +if [ `uname -i` == "aarch64" ] && + [ -f /boot/EFI/grub2/grub.cfg ]; then + /usr/bin/sh %{_sbindir}/mkgrub-menu-%{hulkrelease}.sh %{version}-%{hulkrelease}.aarch64 /boot/EFI/grub2/grub.cfg remove +fi + +%postun +%{_sbindir}/new-kernel-pkg --rminitrd --rmmoddep --remove %{KernelVer} || exit $? +if [ -x %{_sbindir}/weak-modules ] +then + %{_sbindir}/weak-modules --remove-kernel %{KernelVer} || exit $? +fi +if [ -d /lib/modules/%{KernelVer} ] && [ "`ls -A /lib/modules/%{KernelVer}`" = "" ]; then + rm -rf /lib/modules/%{KernelVer} +fi + +%posttrans +%{_sbindir}/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{KernelVer} || exit $? +%{_sbindir}/new-kernel-pkg --package kernel --rpmposttrans %{KernelVer} || exit $? +if [ `uname -i` == "aarch64" ] && + [ -f /boot/EFI/grub2/grub.cfg ]; then + /usr/bin/sh %{_sbindir}/mkgrub-menu-%{hulkrelease}.sh %{version}-%{hulkrelease}.aarch64 /boot/EFI/grub2/grub.cfg update +fi +if [ -x %{_sbindir}/weak-modules ] +then + %{_sbindir}/weak-modules --add-kernel %{KernelVer} || exit $? +fi +%{_sbindir}/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{KernelVer} || exit $? +%{_sbindir}/new-kernel-pkg --package kernel --rpmposttrans %{KernelVer} || exit $? + + +%post devel +if [ -f /etc/sysconfig/kernel ] +then + . /etc/sysconfig/kernel || exit $? +fi +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ] +then + (cd /usr/src/kernels/%{KernelVer} && + /usr/bin/find . -type f | while read f; do + hardlink -c /usr/src/kernels/*.oe*.*/$f $f + done) +fi + +%post -n kernel-rt-tools +/sbin/ldconfig +%systemd_post cpupower.service + +%preun -n kernel-rt-tools +%systemd_preun cpupower.service + +%postun -n kernel-rt-tools +/sbin/ldconfig +%systemd_postun cpupower.service + +%files +%defattr (-, root, root) +%doc +/boot/config-* +%ifarch aarch64 +/boot/dtb-* +%endif +%if 0%{?with_kabichk} +/boot/symvers-* +%endif +/boot/System.map-* +/boot/vmlinuz-* +%ghost /boot/initramfs-%{KernelVer}.img +/boot/.vmlinuz-*.hmac +/etc/ld.so.conf.d/* +/lib/modules/%{KernelVer}/ +%exclude /lib/modules/%{KernelVer}/source +%exclude /lib/modules/%{KernelVer}/build +%{_sbindir}/mkgrub-menu*.sh + +%files devel +%defattr (-, root, root) +%doc +/lib/modules/%{KernelVer}/source +/lib/modules/%{KernelVer}/build +/usr/src/kernels/%{KernelVer} +/usr/include/* + + +%if 0%{?with_perf} +%files -n perf +%{_libdir}/libperf* +%{_bindir}/perf +%dir %{_libdir}/traceevent +%{_libdir}/traceevent/plugins/ +%{_libexecdir}/perf-core +%{_datadir}/perf-core/ +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%doc linux-%{KernelVer}/tools/perf/Documentation/examples.txt +%dir %{_datadir}/doc/perf-tip +%{_datadir}/doc/perf-tip/* +%license linux-%{KernelVer}/COPYING + +%files -n python2-perf +%license linux-%{KernelVer}/COPYING +%{python2_sitearch}/* + +%files -n python3-perf +%license linux-%{KernelVer}/COPYING +%{python3_sitearch}/* +%endif + +%files -n kernel-rt-tools -f cpupower.lang +%{_bindir}/cpupower +%ifarch %{ix86} x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch %{ix86} x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-hammer +%{_bindir}/gpio-event-mon +%{_mandir}/man1/kvm_stat* +%{_bindir}/kvm_stat +%{_libdir}/libcpupower.so.0 +%{_libdir}/libcpupower.so.0.0.1 +%license linux-%{KernelVer}/COPYING + +%files -n kernel-rt-tools-devel +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h +%{_includedir}/cpuidle.h + +%files -n bpftool +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool-cgroup.8.gz +%{_mandir}/man8/bpftool-map.8.gz +%{_mandir}/man8/bpftool-prog.8.gz +%{_mandir}/man8/bpftool-perf.8.gz +%{_mandir}/man8/bpftool.8.gz +%{_mandir}/man7/bpf-helpers.7.gz +%license linux-%{KernelVer}/COPYING + +%if 0%{?with_source} +%files source +%defattr(-,root,root) +/usr/src/linux-%{KernelVer}/* +/usr/src/linux-%{KernelVer}/.config +/usr/src/linux-%{KernelVer}/.scmversion +%endif + +%changelog + +* Tue Mar 15 2022 Laibin Qiu - 4.19.90-2203.3.0.0141 +- crypto: pcrypt - Fix user-after-free on module unload +- lib/iov_iter: initialize "flags" in new pipe_buffer +- mm: Count reliable shmem used based on NR_SHMEM +- mm: fix zoneref mapping problem in memory reliable +- mm: disable memory reliable when kdump is in progress +- mm: introduce "clear_freelist" kernel parameter +- mm: fix unable to use reliable memory in page cache +- nfc: st21nfca: Fix potential buffer overflows in EVT_TRANSACTION +- select: Fix indefinitely sleeping task in poll_schedule_timeout() +- mtd: nand: bbt: Fix corner case in bad block table handling +- netns: add schedule point in ops_exit_list() +- af_unix: annote lockless accesses to unix_tot_inflight & gc_in_progress +- crypto: stm32/crc32 - Fix kernel BUG triggered in probe() +- ext4: don't use the orphan list when migrating an inode +- ext4: set csum seed in tmp inode while migrating to extents +- ext4: make sure quota gets properly shutdown on error +- ext4: make sure to reset inode lockdep class when quota enabling fails +- cputime, cpuacct: Include guest time in user time in cpuacct.stat +- serial: Fix incorrect rs485 polarity on uart open +- scsi: sr: Don't use GFP_DMA +- dm space map common: add bounds check to sm_ll_lookup_bitmap() +- dm btree: add a defensive bounds check to insert_at() +- ACPICA: Executer: Fix the REFCLASS_REFOF case in acpi_ex_opcode_1A_0T_1R() +- ACPICA: Utilities: Avoid deleting the same object twice in a row +- jffs2: GC deadlock reading a page that is used in jffs2_write_begin() +- bpf: Do not WARN in bpf_warn_invalid_xdp_action() +- net: bonding: debug: avoid printing debug logs when bond is not notifying peers +- net-sysfs: update the queue counts in the unregistration path +- dmaengine: pxa/mmp: stop referencing config->slave_id +- scsi: ufs: Fix race conditions related to driver data +- iommu/io-pgtable-arm: Fix table descriptor paddr formatting +- ext4: avoid trim error on fs with small groups +- net: mcs7830: handle usb read errors properly +- tpm: add request_locality before write TPM_INT_ENABLE +- netfilter: ipt_CLUSTERIP: fix refcount leak in clusterip_tg_check() +- xfrm: state and policy should fail if XFRMA_IF_ID 0 +- xfrm: interface with if_id 0 should return error +- crypto: stm32/cryp - fix double pm exit +- xfrm: fix a small bug in xfrm_sa_len() +- sched/rt: Try to restart rt period timer when rt runtime exceeded +- serial: amba-pl011: do not request memory region twice +- tty: serial: uartlite: allow 64 bit address +- netfilter: bridge: add support for pppoe filtering +- crypto: qce - fix uaf on qce_ahash_register_one +- shmem: fix a race between shmem_unused_huge_shrink and shmem_evict_inode +- can: bcm: switch timer to HRTIMER_MODE_SOFT and remove hrtimer_tasklet +- ip6_vti: initialize __ip6_tnl_parm struct in vti6_siocdevprivate +- scsi: libiscsi: Fix UAF in iscsi_conn_get_param()/iscsi_conn_teardown() +- ipv6: Do cleanup if attribute validation fails in multipath route +- ipv6: Continue processing multipath route even if gateway attribute is invalid +- ipv6: Check attribute length for RTA_GATEWAY when deleting multipath route +- ipv6: Check attribute length for RTA_GATEWAY in multipath route +- tracing: Tag trace_percpu_buffer as a percpu pointer +- tracing: Fix check for trace_percpu_buffer validity in get_trace_buf() +- net: fix use-after-free in tw_timer_handler +- udp: using datalen to cap ipv6 udp max gso segments +- selinux: initialize proto variable in selinux_ip_postroute_compat() +- x86/pkey: Fix undefined behaviour with PKRU_WD_BIT +- ipmi: fix initialization when workqueue allocation fails +- ipmi: bail out if init_srcu_struct fails +- bonding: fix ad_actor_system option setting to default +- ipmi: Fix UAF when uninstall ipmi_si and ipmi_msghandler module +- net: skip virtio_net_hdr_set_proto if protocol already set +- net: hns3: update hns3 version to 22.2.1 +- net: hns3: fix RMW issue for VLAN filter switch +- net: hns3: fix pf vlan filter out of work after self test +- arm64: acpi: fix UBSAN warning +- sched: Fix sleeping in atomic context at cpu_qos_write() +- io_uring: don't re-setup vecs/iter in io_resumit_prep() is already there +- io_uring: don't double complete failed reissue request +- io_uring: remove redundant initialization of variable ret +- block: don't ignore REQ_NOWAIT for direct IO +- io_uring: re-issue block requests that failed because of resources +- dm multipath: fix missing blk_account_io_done() in error path +- block: account inflight from blk_account_io_start() if 'precise_iostat' is set +- block: add a switch for precise iostat accounting +- blk-throttle: Set BIO_THROTTLED when bio has been throttled +- bfq: fix use-after-free in bfq_dispatch_request +- hugetlbfs: fix a truncation issue in hugepages parameter + +* Tue Mar 08 2022 Laibin Qiu - 4.19.90-2203.2.0.0140 +- mm: Fix return val in khugepaged_scan_pmd() +- mm: do some clean up of accounting ReliableTaskUsed +- mm: fix statistic of ReliableTaskUsed +- mm: fix missing reclaim of low-reliable page cache +- mm: fix statistic of ReliableFileCache in /proc/meminfo +- mm: Add more gfp flag check in prepare_before_alloc() +- efi: Stub mirrored_kernelcore if CONFIG_HAVE_MEMBLOCK_NODE_MAP is not enabled +- mm: Memory reliable features can only be disabled via proc interface +- mm: Fix reliable_debug in proc not consistent with boot parameter problem +- f2fs: fix to do sanity check on inode type during garbage collection +- mm: Check page status in page_reliable() +- mm: Show ReliableTaskUsed in /proc/meminfo +- mm: Refactor code in reliable_report_meminfo() +- mm: Show correct reliable_user_used if PAGE_SIZE is not 4K +- proc: Fix reliable display err in /proc/pid/status +- Revert "mm: add page cache fallback statistic" +- mm: fix page cache use reliable memory when reliable_debug=P +- mm: add support for limiting the usage of reliable memory in pagecache +- mm: add "ReliableFileCache" item in /proc/meminfo +- mm: Introduce shmem mirrored memory limit for memory reliable +- mm: Introduce watermark check for memory reliable +- mm: Count mirrored pages in buddy system +- mm: Export mem_reliable_status() for checking memory reliable status +- mm: Make MEMORY_RELIABLE depends on HAVE_MEMBLOCK_NODE_MAP +- efi: Disable mirror feature if kernelcore is not spcified +- mm: Introduce proc interface to control memory reliable features +- mm: Demote warning message in vmemmap_verify() to debug level +- mm: Ratelimited mirrored memory related warning messages + +* Tue Mar 01 2022 Laibin Qiu - 4.19.90-2203.1.0.0139 +- usb: gadget: rndis: check size of RNDIS_MSG_SET command +- USB: gadget: validate interface OS descriptor requests +- mm/hwpoison: clear MF_COUNT_INCREASED before retrying get_any_page() +- udf: Restore i_lenAlloc when inode expansion fails +- udf: Fix NULL ptr deref when converting from inline format +- ext4: fix underflow in ext4_max_bitmap_size() +- bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds() +- livepatch/x86: Fix incorrect use of 'strncpy' +- tipc: improve size validations for received domain records +- yam: fix a memory leak in yam_siocdevprivate() +- ipmi_si: Phytium S2500 missing timeout counter reset in intf_mem_inw +- mm,hwpoison: Fix use-after-free in memory_failure() +- dm-mpath: fix UAF in multipath_message() +- usb: gadget: clear related members when goto fail +- usb: gadget: don't release an existing dev->buf +- dm: make sure dm_table is binded before queue request +- cgroup-v1: Require capabilities to set release_agent +- NFSv4: nfs_atomic_open() can race when looking up a non-regular file +- NFSv4: Handle case where the lookup of a directory fails +- configfs: fix a race in configfs_{,un}register_subsystem() + +* Tue Feb 22 2022 Laibin Qiu - 4.19.90-2202.4.0.0138 +- tipc: improve size validations for received domain records +- yam: fix a memory leak in yam_siocdevprivate() +- ipmi_si: Phytium S2500 missing timeout counter reset in intf_mem_inw +- mm,hwpoison: Fix use-after-free in memory_failure() +- dm-mpath: fix UAF in multipath_message() +- usb: gadget: clear related members when goto fail +- usb: gadget: don't release an existing dev->buf +- dm: make sure dm_table is binded before queue request +- cgroup-v1: Require capabilities to set release_agent +- NFSv4: nfs_atomic_open() can race when looking up a non-regular file +- NFSv4: Handle case where the lookup of a directory fails +- configfs: fix a race in configfs_{,un}register_subsystem() +- fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once() +- drm/i915: Flush TLBs before releasing backing store +- moxart: fix potential use-after-free on remove path +- memstick: rtsx_usb_ms: fix UAF + +* Tue Feb 15 2022 Laibin Qiu - 4.19.90-2202.3.0.0137 +- fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once() +- drm/i915: Flush TLBs before releasing backing store +- moxart: fix potential use-after-free on remove path +- memstick: rtsx_usb_ms: fix UAF +- ext4: fix file system corrupted when rmdir non empty directory with IO error +- bpf, doc: Remove references to warning message when using bpf_trace_printk() +- bpf: Remove inline from bpf_do_trace_printk +- bpf: Use dedicated bpf_trace_printk event instead of trace_printk() +- net: cipso: fix warnings in netlbl_cipsov4_add_std +- xsk: Use struct_size() helper +- mm/page_alloc: fix counting of free pages after take off from buddy +- mm,hwpoison: drop unneeded pcplist draining +- mm,hwpoison: take free pages off the buddy freelists +- mm,hwpoison: drain pcplists before bailing out for non-buddy zero-refcount page +- mm,hwpoison: Try to narrow window race for free pages +- mm,hwpoison: introduce MF_MSG_UNSPLIT_THP +- mm,hwpoison: return 0 if the page is already poisoned in soft-offline +- mm,hwpoison: refactor soft_offline_huge_page and __soft_offline_page +- mm,hwpoison: rework soft offline for in-use pages +- mm,hwpoison: rework soft offline for free pages +- mm,hwpoison: unify THP handling for hard and soft offline +- mm,hwpoison: kill put_hwpoison_page +- mm,hwpoison: refactor madvise_inject_error +- mm,hwpoison-inject: don't pin for hwpoison_filter +- mm, hwpoison: remove recalculating hpage +- mm,hwpoison: cleanup unused PageHuge() check +- scsi: Revert "target: iscsi: Wait for all commands to finish before freeing a session" +- uce: get_user scenario support kernel recovery +- uce: copy_from_user scenario support kernel recovery +- mm: Modify sharepool sp_mmap() page_offset +- support multiple node for getting phys interface +- share_pool: Accept device_id in k2u flags +- share_pool: Clear the usage of node_id and device_id +- share_pool: Make multi-device support extendable +- share_pool: Fix flags conflict +- config: enable MEMORY_RELIABLE by default +- mm: add sysctl to clear free list pages +- workqueue: Provide queue_work_node to queue work near a given NUMA node +- mm:vmscan: add the missing check of page_cache_over_limit +- sysctl: add proc interface to set page cache limit +- mm/vmscan: dont do shrink_slab in reclaim page cache +- mm/vmscan: dont reclaim anon page when shrink page cache +- filemap: dont shrink_page_cache in add_to_page_cache +- mm/vmscan: fix unexpected shrinking page cache with vm_cache_reclaim_enable disable +- mm/vmscan: fix frequent call of shrink_page_cache_work +- proc/meminfo: add "FileCache" item in /proc/meminfo +- mm: add page cache fallback statistic +- mm: add cmdline for the reliable memory usage of page cache +- mm: make page cache use reliable memory by default +- shmem: Show reliable shmem info +- shmem: Introduce shmem reliable +- mm: Introduce fallback mechanism for memory reliable +- mm: Add reliable memory use limit for user tasks +- mm: thp: Add memory reliable support for hugepaged collapse +- proc: Count reliable memory usage of reliable tasks +- mm: Add reliable_nr_page for accounting reliable memory +- mm: Introduce reliable flag for user task +- meminfo: Show reliable memory info +- mm: Introduce memory reliable +- efi: Find mirrored memory ranges for arm64 +- efi: Make efi_find_mirror() public +- arm64: efi: Add fake memory support +- efi: Make efi_print_memmap() public +- mm/memory_hotplug: allow to specify a default online_type +- mm/memory_hotplug: convert memhp_auto_online to store an online_type +- hv_balloon: don't check for memhp_auto_online manually +- drivers/base/memory: store mapping between MMOP_* and string in an array +- drivers/base/memory: map MMOP_OFFLINE to 0 +- drivers/base/memory: rename MMOP_ONLINE_KEEP to MMOP_ONLINE +- drivers/base/memory.c: Use DEVICE_ATTR_RO and friends +- mm/memory_hotplug: drop "online" parameter from add_memory_resource() + +* Tue Feb 08 2022 Laibin Qiu - 4.19.90-2202.1.0.0136 +- config: enable CONFIG_MEMCG_MEMFS_INFO by default +- mm/memcg_memfs_info: show files that having pages charged in mem_cgroup +- ext4: fix e2fsprogs checksum failure for mounted filesystem +- drm/vmwgfx: Fix stale file descriptors on failed usercopy +- perf vendor events amd: Fix broken L2 Cache Hits from L2 HWPF metric +- perf vendor events amd: Add recommended events +- perf vendor events amd: Add L2 Prefetch events for zen1 +- perf/amd/uncore: Fix sysfs type mismatch +- perf/x86/amd: Don't touch the AMD64_EVENTSEL_HOSTONLY bit inside the guest +- tools/power turbostat: Support AMD Family 19h +- perf/x86/amd/ibs: Support 27-bit extended Op/cycle counter +- perf vendor events amd: Enable Family 19h users by matching Zen2 events +- perf vendor events amd: Update Zen1 events to V2 +- perf vendor events amd: Add Zen2 events +- perf vendor events amd: Restrict model detection for zen1 based processors +- perf vendor events amd: Remove redundant '[' +- perf vendor events intel: Add Tremontx event file v1.02 +- perf vendor events intel: Add Icelake V1.00 event file +- perf vendor events amd: Add L3 cache events for Family 17h +- perf vendor events intel: Add uncore_upi JSON support +- perf vendor events amd: perf PMU events for AMD Family 17h +- perf/amd/uncore: Allow F19h user coreid, threadmask, and sliceid specification +- perf/amd/uncore: Allow F17h user threadmask and slicemask specification +- perf/amd/uncore: Prepare to scale for more attributes that vary per family +- perf/x86/amd/ibs: Don't include randomized bits in get_ibs_op_count() +- perf/amd/uncore: Set all slices and threads to restore perf stat -a behaviour +- perf/x86/amd/ibs: Fix raw sample data accumulation +- arch/x86/amd/ibs: Fix re-arming IBS Fetch +- perf/amd/uncore: Add support for Family 19h L3 PMU +- perf/amd/uncore: Make L3 thread mask code more readable +- perf/amd/uncore: Prepare L3 thread mask code for Family 19h +- EDAC/amd64: Handle three rank interleaving mode +- EDAC/amd64: Add family ops for Family 19h Models 00h-0Fh +- EDAC/amd64: Save max number of controllers to family type +- EDAC/amd64: Gather hardware information early +- EDAC/amd64: Make struct amd64_family_type global +- EDAC/amd64: Set grain per DIMM +- EDAC/amd64: Support asymmetric dual-rank DIMMs +- EDAC/amd64: Cache secondary Chip Select registers +- EDAC/amd64: Add PCI device IDs for family 17h, model 70h +- EDAC/amd64: Find Chip Select memory size using Address Mask +- EDAC/amd64: Adjust printed chip select sizes when interleaved +- EDAC/amd64: Recognize x16 symbol size +- EDAC/amd64: Set maximum channel layer size depending on family +- EDAC/amd64: Support more than two Unified Memory Controllers +- EDAC/amd64: Add Family 17h Model 30h PCI IDs +- EDAC/amd64: Initialize DIMM info for systems with more than two channels +- EDAC/amd64: Support more than two controllers for chip selects handling +- EDAC/amd64: Use a macro for iterating over Unified Memory Controllers +- x86/mce: Fix use of uninitialized MCE message string +- x86/MCE/AMD, EDAC/mce_amd: Add new Load Store unit McaType +- x86/MCE/AMD, EDAC/mce_amd: Add new error descriptions for some SMCA bank types +- x86/MCE/AMD, EDAC/mce_amd: Add new McaTypes for CS, PSP, and SMU units +- x86/MCE/AMD, EDAC/mce_amd: Add new MP5, NBIO, and PCIE SMCA bank types +- EDAC/mce_amd: Always load on SMCA systems +- x86/cpu/amd: Call init_amd_zn() om Family 19h processors too +- x86/amd_nb: Add Family 19h PCI IDs +- x86/amd_nb: Add PCI device IDs for family 17h, model 70h +- x86/amd_nb: Add PCI device IDs for family 17h, model 30h +- hwmon/k10temp, x86/amd_nb: Consolidate shared device IDs +- EDAC/amd64: Drop some family checks for newer systems +- x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE +- KVM: mmu: Fix SPTE encoding of MMIO generation upper half +- build_bug.h: add wrapper for _Static_assert +- KVM: x86: fix overlap between SPTE_MMIO_MASK and generation +- KVM: x86: assign two bits to track SPTE kinds +- KVM: Move the memslot update in-progress flag to bit 63 +- KVM: Remove the hack to trigger memslot generation wraparound +- KVM: x86: clflushopt should be treated as a no-op by emulation +- KVM: SVM: Clear the CR4 register on reset +- KVM: SVM: Replace hard-coded value with #define +- KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated +- KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM +- KVM: x86: only do L1TF workaround on affected processors +- kvm: x86: Fix L1TF mitigation for shadow MMU +- KVM: x86/mmu: Consolidate "is MMIO SPTE" code +- KVM: SVM: Override default MMIO mask if memory encryption is enabled +- KVM: x86/mmu: Add explicit access mask for MMIO SPTEs +- kvm: x86: Fix reserved bits related calculation errors caused by MKTME +- KVM: x86: Rename access permissions cache member in struct kvm_vcpu_arch +- kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c +- kvm/svm: PKU not currently supported +- kvm: x86: Expose RDPID in KVM_GET_SUPPORTED_CPUID +- KVM: x86: Refactor the MMIO SPTE generation handling +- KVM: Explicitly define the "memslot update in-progress" bit +- KVM: x86: Use a u64 when passing the MMIO gen around +- KVM: x86: expose MOVDIR64B CPU feature into VM. +- KVM: x86: expose MOVDIRI CPU feature into VM. +- KVM: x86: Add requisite includes to hyperv.h +- KVM: x86: Add requisite includes to kvm_cache_regs.h +- KVM: nVMX: Allocate and configure VM{READ,WRITE} bitmaps iff enable_shadow_vmcs +- x86/cpufeatures: Enumerate MOVDIR64B instruction +- x86/cpufeatures: Enumerate MOVDIRI instruction +- x86/pkeys: Don't check if PKRU is zero before writing it +- x86/fpu: Only write PKRU if it is different from current +- x86/pkeys: Provide *pkru() helpers +- sysctl: returns -EINVAL when a negative value is passed to proc_doulongvec_minmax +- arm64: move jump_label_init() before parse_early_param() +- tcp: fix memleak when tcp internal pacing is used +- scsi: scsi_debug: Sanity check block descriptor length in resp_mode_select() +- ovl: fix warning in ovl_create_real() +- fuse: annotate lock in fuse_reverse_inval_entry() +- PCI/MSI: Clear PCI_MSIX_FLAGS_MASKALL on error +- sit: do not call ipip6_dev_free() from sit_init_net() +- net/packet: rx_owner_map depends on pg_vec +- x86/sme: Explicitly map new EFI memmap table as encrypted +- dm btree remove: fix use after free in rebalance_children() +- net: netlink: af_netlink: Prevent empty skb by adding a check on len. +- irqchip/irq-gic-v3-its.c: Force synchronisation when issuing INVALL +- net, neigh: clear whole pneigh_entry at alloc time +- aio: fix use-after-free due to missing POLLFREE handling +- aio: keep poll requests on waitqueue until completed +- signalfd: use wake_up_pollfree() +- wait: add wake_up_pollfree() +- tracefs: Have new files inherit the ownership of their parent +- mm: bdi: initialize bdi_min_ratio when bdi is unregistered +- udp: using datalen to cap max gso segments +- bpf: Fix the off-by-two error in range markings +- ipmi: msghandler: Make symbol 'remove_work_wq' static +- serial: core: fix transmit-buffer reset and memleak +- serial: pl011: Add ACPI SBSA UART match id +- net: annotate data-races on txq->xmit_lock_owner +- ipmi: Move remove_work to dedicated workqueue +- vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit +- scsi: iscsi: Unblock session then wake up error handler +- shm: extend forced shm destroy to support objects from several IPC nses +- fuse: release pipe buf after last use +- tracing: Check pid filtering when creating events +- ipv6: fix typos in __ip6_finish_output() +- proc/vmcore: fix clearing user buffer by properly using clear_user() +- tracing: Fix pid filtering when triggers are attached +- fuse: fix page stealing +- ipmi_si: Phytium S2500 workaround for MMIO-based IPMI +- etmem: Add a scan flag to support specified page swap-out +- etmem: add swapcache reclaim to etmem +- etmem: add original kernel swap enabled options + +* Tue Jan 25 2022 Laibin Qiu - 4.19.90-2201.4.0.0135 +- net: bridge: clear bridge's private skb space on xmit +- audit: bugfix for infinite loop when flush the hold queue +- blk-throttle: enable hierarchical throttle in cgroup v1 +- xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate + +* Tue Jan 18 2022 Laibin Qiu - 4.19.90-2201.3.0.0134 +- ip_gre: validate csum_start only on pull +- hugetlbfs: fix issue of preallocation of gigantic pages can't work +- hugetlbfs: extend the definition of hugepages parameter to support node allocation +- mm: remove sharepool sp_unshare_uva current->mm NULL check +- share pool: use rwsem to protect sp group exit +- Add new module parameters:time out +- virtio-blk: validate num_queues during probe +- virtio-blk: Use blk_validate_block_size() to validate block size +- block: Add a helper to validate the block size +- Revert "virtio-blk: Add validation for block size in config space" +- scsi: virtio_scsi: Rescan the entire target on transport reset when LUN is 0 +- Revert "svm: Add support to get svm mpam configuration" +- Revert "svm: Add support to set svm mpam configuration" +- Revert "svm: Add svm_set_user_mpam_en to enable/disable mpam for smmu" +- cgroup: Use open-time cgroup namespace for process migration perm checks +- cgroup: Allocate cgroup_file_ctx for kernfs_open_file->priv +- cgroup: Use open-time credentials for process migraton perm checks +- NFC: add necessary privilege flags in netlink layer +- NFC: add NCI_UNREG flag to eliminate the race +- NFC: reorder the logic in nfc_{un,}register_device +- NFC: reorganize the functions in nci_request +- ext4: Fix BUG_ON in ext4_bread when write quota data +- PM: hibernate: use correct mode for swsusp_close() +- Revert "watchdog: Fix check_preemption_disabled() error" + +* Tue Jan 11 2022 Laibin Qiu - 4.19.90-2201.2.0.0133 +- arm64/mpam: fix mpam dts init arm_mpam_of_device_ids error +- arm64/mpam: fix mpam probe error for wrong init order + +* Tue Jan 04 2022 Laibin Qiu - 4.19.90-2201.1.0.0132 +- mm: export collect_procs() +- net: hns: update hns version to 21.12.1 +- net: hns: fix bug when two ports opened promisc mode both +- net: hns3: update hns3 version to 21.12.4 +- net: hns3: fix the concurrency between functions reading debugfs +- f2fs: fix to do sanity check on last xattr entry in __f2fs_setxattr() +- mwifiex: Fix skb_over_panic in mwifiex_usb_recv() +- tee: handle lookup of shm with reference count 0 +- tee: don't assign shm id for private shms +- tee: remove linked list of struct tee_shm +- ext4: fix an use-after-free issue about data=journal writeback mode +- ext4: Fix null-ptr-deref in '__ext4_journal_ensure_credits' +- scsi: ufs: Correct the LUN used in eh_device_reset_handler() callback +- netdevsim: Zero-initialize memory for new map's value in function nsim_bpf_map_alloc +- lib/strncpy_from_user.c: Mask out bytes after NUL terminator. +- bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers +- bpf: Make use of probe_user_write in probe write helper +- uaccess: Add strict non-pagefault kernel-space read function +- bpf: fix script for generating man page on BPF helpers +- bpf: Backport __BPF_FUNC_MAPPER and annotation from mainline +- bpf: Fix up register-based shifts in interpreter to silence KUBSAN +- xen/netback: don't queue unlimited number of packages +- xen/netback: fix rx queue stall detection +- xen/console: harden hvc_xen against event channel storms +- xen/netfront: harden netfront against event channel storms +- xen/blkfront: harden blkfront against event channel storms +- xen/netfront: don't trust the backend response data blindly +- xen/netfront: disentangle tx_skb_freelist +- xen/netfront: don't read data from request on the ring page +- xen/netfront: read response from backend only once +- xen/blkfront: don't trust the backend response data blindly +- xen/blkfront: don't take local copy of a request from the ring page +- xen/blkfront: read response from backend only once +- xen: sync include/xen/interface/io/ring.h with Xen's newest version +- xen/netback: avoid race in xenvif_rx_ring_slots_available() +- netfilter: fix regression in looped (broad|multi)cast's MAC handling +- perf/core: Avoid put_page() when GUP fails +- perf/core: Disable page faults when getting phys address +- mm: kmemleak: slob: respect SLAB_NOLEAKTRACE flag +- ipc: WARN if trying to remove ipc object which is absent +- tun: fix bonding active backup with arp monitoring +- perf/x86/intel/uncore: Fix IIO event constraints for Skylake Server +- perf/x86/intel/uncore: Fix filter_tid mask for CHA events on Skylake Server +- sched/core: Mitigate race cpus_share_cache()/update_top_cache_domain() +- tty: tty_buffer: Fix the softlockup issue in flush_to_ldisc +- PCI/MSI: Deal with devices lying about their MSI mask capability +- PCI/MSI: Destroy sysfs before freeing entries +- ext4: fix lazy initialization next schedule time computation in more granular unit +- x86/cpu: Fix migration safety with X86_BUG_NULL_SEL +- mm, oom: do not trigger out_of_memory from the #PF +- mm, oom: pagefault_out_of_memory: don't force global OOM for dying tasks +- llc: fix out-of-bound array index in llc_sk_dev_hash() +- zram: off by one in read_block_state() +- mm/zsmalloc.c: close race window between zs_pool_dec_isolated() and zs_unregister_migration() +- dmaengine: dmaengine_desc_callback_valid(): Check for `callback_result` +- netfilter: nfnetlink_queue: fix OOB when mac header was cleared +- NFS: Fix deadlocks in nfs_scan_commit_list() +- apparmor: fix error check +- serial: 8250_dw: Drop wrong use of ACPI_PTR() +- crypto: pcrypt - Delay write to padata->info +- tcp: don't free a FIN sk_buff in tcp_remove_empty_skb() +- cgroup: Make rebind_subsystems() disable v2 controllers all at once +- task_stack: Fix end_of_stack() for architectures with upwards-growing stack +- gre/sit: Don't generate link-local addr if addr_gen_mode is IN6_ADDR_GEN_MODE_NONE +- smackfs: Fix use-after-free in netlbl_catmap_walk() +- signal: Remove the bogus sigkill_pending in ptrace_stop +- bpf: Prevent increasing bpf_jit_limit above max +- x86/sme: Use #define USE_EARLY_PGTABLE_L5 in mem_encrypt_identity.c +- tpm: Check for integer overflow in tpm2_map_response_body() +- scsi: core: Put LLD module refcnt after SCSI device is released +- net: Prevent infinite while loop in skb_tx_hash() + +* Thu Dec 30 2021 Laibin Qiu - 4.19.90-2112.8.0.0131 +- mm/page_alloc: Use cmdline to disable "place pages to tail" +- bpf: Remove MTU check in __bpf_skb_max_len +- sctp: account stream padding length for reconf chunk + +* Tue Dec 28 2021 Laibin Qiu - 4.19.90-2112.6.0.0130 +- watchdog: Fix check_preemption_disabled() error +- btrfs: unlock newly allocated extent buffer after error +- net/hinic: Fix call trace when the rx_buff module parameter is grater than 2 +- dt-bindings: mpam: add document for arm64 mpam +- arm64/mpam: add device tree support for mpam initialization +- arm64/mpam: remove __init macro to support driver probe +- arm64/mpam: rmid: refine allocation and release process +- arm64/mpam: resctrl: add tips when rmid modification failed +- arm64/mpam: Fix mpam corrupt when cpu online +- cpufreq: schedutil: Destroy mutex before kobject_put() frees the memory +- kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic +- kprobes: Set unoptimized flag after unoptimizing code + +* Wed Dec 22 2021 Laibin Qiu - 4.19.90-2112.5.0.0129 +- config: enable CONFIG_RAMAXEL_SPRAID by default +- scsi:spraid: support Ramaxel's spraid driver +- USB: gadget: bRequestType is a bitfield, not a enum +- phonet: refcount leak in pep_sock_accep +- USB: gadget: detect too-big endpoint 0 requests + +* Tue Dec 21 2021 Laibin Qiu - 4.19.90-2112.4.0.0128 +- block, bfq: don't move oom_bfqq +- blk-mq: fix abnormal free in single queue process +- scsi: hisi_sas: Add support for sata disk I/O errors report to libsas +- KVM: arm64: Allow vcpus running without HCR_EL2.FB +- KVM: arm64: Set kvm_vcpu::pre_pcpu properly +- KVM: arm64: Ensure I-cache isolation between vcpus of a same VM +- arm64/tlbi: mark tlbi ipi as EXPERIMENTAL +- arm64/tlb: restore no IPi code +- arm64/configs: enable TLBI_IPI +- arm64/tlbi: split disable_tlbflush_is to control flush +- arm64/tlb: add CONFIG_ARM64_TLBI_IPI +- arm64: tlb: Add boot parameter to disable TLB flush within the same inner shareable domain +- arm64: mm: Restore mm_cpumask (revert commit 38d96287504a ("arm64: mm: kill mm_cpumask usage")) +- audit: ensure userspace is penalized the same as the kernel when under pressure +- audit: improve robustness of the audit queue handling +- block/wbt: fix negative inflight counter when remove scsi device +- nbd: Fix use-after-free in blk_mq_free_rqs +- block, bfq: fix use after free in bfq_bfqq_expire +- block, bfq: fix queue removal from weights tree +- block, bfq: fix decrement of num_active_groups +- block, bfq: fix asymmetric scenarios detection +- block, bfq: improve asymmetric scenarios detection +- fget: check that the fd still exists after getting a ref to it +- config: Enable CONFIG_EXT4_PARALLEL_DIO_READ as default +- ext4: update direct I/O read lock pattern for IOCB_NOWAIT +- Revert "Revert "ext4: remove EXT4_STATE_DIOREAD_LOCK flag"" +- Revert "Revert "ext4: Allow parallel DIO reads"" +- net: hns3: update hns3 version to 21.12.3 +- net: hns3: fix the VLAN of a vf cannot be added problem +- net: hns3: fix pfc packet number incorrect after querying pfc parameters +- net: hns3: fix VF RSS failed problem after PF enable multi-TCs +- usb: gadget: configfs: Fix use-after-free issue with udc_name +- hugetlbfs: flush TLBs correctly after huge_pmd_unshare +- mm: share_pool: adjust sp_alloc behavior when coredump +- mm: share_pool: adjust sp_make_share_k2u behavior when coredump +- Revert "timekeeping: Fix ktime_add overflow in tk_set_wall_to_mono" +- Revert "timekeeping: Avoid undefined behaviour in 'ktime_get_with_offset()'" +- Revert "posix-cpu-timers: Avoid undefined behaviour in timespec64_to_ns()" +- time: Normalize timespec64 before timespec64_compare() +- iommu/arm-smmu-v3: remove unnecessary mpam enable procedure +- fix kabi effect by change in md_rdev +- Revert "dm space maps: don't reset space map allocation cursor when committing" +- nvme-fabrics: fix kabi broken by "reject I/O to offline device" +- nvme: fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store +- nvme: export fast_io_fail_tmo to sysfs +- nvme-fabrics: reject I/O to offline device +- nvme: add a Identify Namespace Identification Descriptor list quirk +- nvme: fix identify error status silent ignore +- nvme: fix possible hang when ns scanning fails during error recovery +- nvme: refactor nvme_identify_ns_descs error handling +- nvme: Namepace identification descriptor list is optional +- nvmet: use new ana_log_size instead the old one +- nvme-multipath: fix double initialization of ANA state +- nvme-core: use list_add_tail_rcu instead of list_add_tail for nvme_init_ns_head +- nvme: make nvme_report_ns_ids propagate error back +- nvme-multipath: avoid crash on invalid subsystem cntlid enumeration +- nvme-multipath: split bios with the ns_head bio_set before submitting +- nvme: add proper discard setup for the multipath device +- fix kabi change +- md: Fix undefined behaviour in is_mddev_idle +- xfs: fix up non-directory creation in SGID directories +- xfs: remove the kuid/kgid conversion wrappers +- xfs: remove the icdinode di_uid/di_gid members +- xfs: ensure that the inode uid/gid match values match the icdinode ones +- configfs: fix a use-after-free in __configfs_open_file +- share_pool: don't trace the invalid spa address +- share_pool: Remove the redundant warning message + +* Mon Dec 13 2021 Laibin Qiu - 4.19.90-2112.3.0.0127 +- ixgbe: fix large MTU request from VF +- block, bfq: move bfqq to root_group if parent group is offlined +- io_uring: use bottom half safe lock for fixed file data +- io_uring: fix soft lockup when call __io_remove_buffers +- block: Fix fsync always failed if once failed +- blk-mq: use the new flag to quiesce/unquiesce queue in block layer +- blk-mq: add a new queue flag to quiesce/unquiesce queue +- blk-mq: factor out some helps to quiesce/unquiesce queue +- blk: Fix lock inversion between ioc lock and bfqd lock +- bfq: Remove merged request already in bfq_requests_merged() +- md: fix a warning caused by a race between concurrent md_ioctl()s +- net: hns3: update hns3 version to 21.12.2 +- net: hns3: fix race condition in debugfs +- kabi: fix kabi broken in struct sock +- tracing: Have all levels of checks prevent recursion +- netfilter: Kconfig: use 'default y' instead of 'm' for bool config option +- mm, slub: fix mismatch between reconstructed freelist depth and cnt +- vfs: check fd has read access in kernel_read_file_from_fd() +- dma-debug: fix sg checks in debug_dma_map_sg() +- acpi/arm64: fix next_platform_timer() section mismatch error +- x86/resctrl: Free the ctrlval arrays when domain_setup_mon_state() fails +- sched: Always inline is_percpu_thread() +- perf/x86: Reset destroy callback on event init failure +- net: prevent user from passing illegal stab size +- netfilter: ip6_tables: zero-initialize fragment offset +- rtnetlink: fix if_nlmsg_stats_size() under estimation +- netlink: annotate data races around nlk->bound +- net: bridge: use nla_total_size_64bit() in br_get_linkxstats_size() +- net_sched: fix NULL deref in fifo_set_limit() +- phy: mdio: fix memory leak +- bpf, arm: Fix register clobbering in div/mod implementation +- scsi: sd: Free scsi_disk device via put_device() +- cred: allow get_cred() and put_cred() to be given NULL. +- net: udp: annotate data race around udp_sk(sk)->corkflag +- elf: don't use MAP_FIXED_NOREPLACE for elf interpreter mappings +- af_unix: fix races in sk_peer_pid and sk_peer_cred accesses +- cpufreq: schedutil: Use kobject release() method to free sugov_tunables +- tty: Fix out-of-bound vmalloc access in imageblit +- tcp: address problems caused by EDT misshaps +- arm64: Mark __stack_chk_guard as __ro_after_init +- md: fix a lock order reversal in md_alloc +- irqchip/gic-v3-its: Fix potential VPE leak on error +- scsi: iscsi: Adjust iface sysfs attr detection +- serial: mvebu-uart: fix driver's tx_empty callback +- cifs: fix incorrect check for null pointer in header_assemble + +* Tue Dec 07 2021 Laibin Qiu - 4.19.90-2112.1.0.0126 +- arm64: Fix conflict for capability when cpu hotplug +- mm: memcontrol: fix cpuhotplug statistics flushing +- mm, memcg: fix error return value of mem_cgroup_css_alloc() +- mm/memcontrol: fix a data race in scan count +- GPIO : support ascend_gpio_dwapb_enable switch +- ext4: always panic when errors=panic is specified +- config: disable CONFIG_NGBE by default in hulk_defconfig +- x86/config: Enable netswift Giga NIC driver for x86 +- net: ngbe: Add Netswift Giga NIC driver +- ras: report cpu logical index to userspace in arm event +- arm64: Avoid premature usercopy failure +- hugetlb: before freeing hugetlb page set dtor to appropriate value + +* Tue Nov 30 2021 Laibin Qiu - 4.19.90-2111.7.0.0125 +- defconfig: update the defconfigs to support 9P +- sched: Introduce handle priority reversion mechanism +- sched: unthrottle qos cfs rq when free a task group +- sched: Avoid sched entity null pointer panic +- sched: Clear idle_stamp when unthrottle offline tasks +- sched: Fix offline task can't be killed in a timely +- sched: Optimizing qos scheduler performance +- sched: Fix throttle offline task trigger panic +- sched: Remove residual checkings for qos scheduler +- sched: Change cgroup task scheduler policy +- sched: Unthrottle the throttled cfs rq when offline rq +- sched: Enable qos scheduler config +- sched: Throttle qos cfs_rq when current cpu is running online task +- sched: Introduce qos scheduler for co-location +- io_uring: return back safer resurrect +- cpufreq: Fix get_cpu_device() failed in add_cpu_dev_symlink() +- ACPI: CPPC: Fix cppc_cpufreq_init failed in CPU Hotplug situation +- lib/clear_user: ensure loop in __arch_clear_user cache-aligned v2 + +* Wed Nov 24 2021 Laibin Qiu - 4.19.90-2111.6.0.0124 +- drm/ioctl: Ditch DRM_UNLOCKED except for the legacy vblank ioctl +- config: Enable some configs for test +- share_pool: add mm address check when access the process's sp_group file + +* Tue Nov 23 2021 Laibin Qiu - 4.19.90-2111.5.0.0123 +- rq-qos: fix missed wake-ups in rq_qos_throttle try two +- atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait +- drivers : remove drivers/mtd/hisilicon/sfc +- drivers : remove drivers/soc/hisilicon/sysctl +- drivers : remove drivers/soc/hisilicon/lbc +- ipv4: fix uninitialized warnings in fnhe_remove_oldest() +- crypto: public_key: fix overflow during implicit conversion +- net: bridge: fix stale eth hdr pointer in br_dev_xmit +- x86/entry: Make entry_64_compat.S objtool clean + +* Tue Nov 16 2021 Laibin Qiu - 4.19.90-2111.4.0.0122 +- io_uring: fix ltout double free on completion race +- iommu: smmuv2: fix compile error when CONFIG_ARCH_PHYTIUM is off +- crypto: hisilicon delete invlaid api and config +- crypto: hisilicon - add CRYPTO_TFM_REQ_MAY_BACKLOG flag judge in sec_process() +- tcp: adjust rto_base in retransmits_timed_out() +- tcp: create a helper to model exponential backoff +- tcp: always set retrans_stamp on recovery +- profiling: fix shift-out-of-bounds bugs +- prctl: allow to setup brk for et_dyn executables +- dmaengine: acpi: Avoid comparison GSI with Linux vIRQ +- tracing/kprobe: Fix kprobe_on_func_entry() modification +- rcu: Fix missed wakeup of exp_wq waiters +- netfilter: socket: icmp6: fix use-after-scope +- PCI: Sync __pci_register_driver() stub for CONFIG_PCI=n +- PCI: Fix pci_dev_str_match_path() alloc while atomic bug +- block, bfq: honor already-setup queue merges +- mm/memory_hotplug: use "unsigned long" for PFN in zone_for_pfn_range() +- tcp: fix tp->undo_retrans accounting in tcp_sacktag_one() +- net/af_unix: fix a data-race in unix_dgram_poll +- events: Reuse value read using READ_ONCE instead of re-reading it +- x86/mm: Fix kern_addr_valid() to cope with existing but not present entries +- arm64/sve: Use correct size when reinitialising SVE state +- mm/hugetlb: initialize hugetlb_usage in mm_init +- scsi: BusLogic: Fix missing pr_cont() use +- ovl: fix BUG_ON() in may_delete() when called from ovl_cleanup() +- cifs: fix wrong release in sess_alloc_buffer() failed path +- bonding: 3ad: fix the concurrency between __bond_release_one() and bond_3ad_state_machine_handler() +- PCI: Use pci_update_current_state() in pci_enable_device_flags() +- userfaultfd: prevent concurrent API initialization +- PCI: Return ~0 data on pciconfig_read() CAP_SYS_ADMIN failure +- block: bfq: fix bfq_set_next_ioprio_data() +- arm64: head: avoid over-mapping in map_memory +- bpf: Fix pointer arithmetic mask tightening under state pruning +- bpf: verifier: Allocate idmap scratch in verifier env +- selftests/bpf: fix tests due to const spill/fill +- selftests/bpf: Test variable offset stack access +- bpf: Sanity check max value for var_off stack access +- bpf: Reject indirect var_off stack access in unpriv mode +- bpf: Reject indirect var_off stack access in raw mode +- bpf: Support variable offset stack access from helpers +- bpf: correct slot_type marking logic to allow more stack slot sharing +- PCI/MSI: Skip masking MSI-X on Xen PV +- tty: Fix data race between tiocsti() and flush_to_ldisc() +- net: sched: Fix qdisc_rate_table refcount leak when get tcf_block failed +- tty: serial: fsl_lpuart: fix the wrong mapbase value +- CIFS: Fix a potencially linear read overflow +- PCI: PM: Enable PME if it can be signaled from D3cold +- PCI: PM: Avoid forcing PCI_D0 for wakeup reasons inconsistently +- tcp: seq_file: Avoid skipping sk during tcp_seek_last_pos +- fcntl: fix potential deadlock for &fasync_struct.fa_lock +- hrtimer: Avoid double reprogramming in __hrtimer_start_range_ns() +- sched/deadline: Fix missing clock update in migrate_task_rq_dl() +- sched/deadline: Fix reset_on_fork reporting of DL tasks +- locking/mutex: Fix HANDOFF condition +- ipv4/icmp: l3mdev: Perform icmp error route lookup on source device routing table (v2) +- perf/x86/intel/pt: Fix mask of num_address_ranges +- Revert "EMMC: ascend customized emmc host" +- Revert "EMMC: add hisi_mmc_core" +- Revert "EMMC: adaption for ascend customized emmc card" +- Revert "EMMC: adaption for ascend customized sd card" +- Revert "EMMC: adaption for ascend customized host layer" +- Revert "EMMC: hisi extensions for dw mmc host controller" +- Revert "EMMC: add dts bindings documents" +- Revert "EMMC: open CONFIG_ASCEND_HISI_MMC" +- Revert "EMMC: fix ascend hisi emmc probe failed problem according to mmc_host struct" +- iommu: support phytium ft2000plus and S2500 iommu function +- arm64: Errata: fix kabi changed by cpu_errata and enable idc +- blk-mq: don't free tags if the tag_set is used by other device in queue initialztion +- nbd: add a flush_workqueue in nbd_start_device +- svm: Fix ts problem, which need the len to check out memory +- sctp: add vtag check in sctp_sf_ootb +- sctp: add vtag check in sctp_sf_do_8_5_1_E_sa +- sctp: add vtag check in sctp_sf_violation +- sctp: fix the processing for COOKIE_ECHO chunk +- sctp: fix the processing for INIT_ACK chunk +- sctp: fix the processing for INIT chunk +- sctp: use init_tag from inithdr for ABORT chunk +- openeuler_defconfig: Build HISI PMU drivers as modules. +- arm64: perf: Expose some new events via sysfs +- arm64: perf: Hook up new events +- arm64: perf: Correct the event index in sysfs +- arm64: perf: Add support for Armv8.1 PMCEID register format +- perf/smmuv3: Don't trample existing events with global filter +- drivers/perf: hisi: Add missing include of linux/module.h +- drivers/perf: Prevent forced unbinding of PMU drivers +- drivers/perf: Fix kernel panic when rmmod PMU modules during perf sampling +- drivers/perf: hisi: Fix wrong value for all counters enable +- pmu/smmuv3: Clear IRQ affinity hint on device removal +- drivers/perf: hisi: Permit modular builds of HiSilicon uncore drivers +- drivers/perf: hisi: Fix typo in events attribute array +- drivers/perf: hisi: Simplify hisi_read_sccl_and_ccl_id and its comment +- drivers/perf: hisi: update the sccl_id/ccl_id for certain HiSilicon platform +- perf/smmuv3: Validate groups for global filtering +- perf/smmuv3: Validate group size +- drivers/perf: arm_spe: Don't error on high-order pages for aux buf +- drm/hisilicon: Features to support reading resolutions from EDID +- drm/hisilicon: Support i2c driver algorithms for bit-shift adapters +- compiler.h: fix barrier_data() on clang + +* Tue Nov 09 2021 Laibin Qiu - 4.19.90-2111.3.0.0121 +- bonding: Fix a use-after-free problem when bond_sysfs_slave_add() failed +- ANDROID: staging: ion: move buffer kmap from begin/end_cpu_access() +- ath9k: Postpone key cache entry deletion for TXQ frames reference it +- ath: Modify ath_key_delete() to not need full key entry +- ath: Export ath_hw_keysetmac() +- ath9k: Clear key cache explicitly on disabling hardware +- ath: Use safer key clearing with key cache entries +- ext4: if zeroout fails fall back to splitting the extent node +- dccp: don't duplicate ccid when cloning dccp sock +- selftests/bpf: add demo for file read pattern detection +- libbpf: Support detecting writable tracepoint program +- ext4: add trace for the read and release of regular file +- xfs: add trace for read and release of regular file +- fs: add helper fs_file_read_do_trace() +- vfs: add bare tracepoints for vfs read and release +- bpf: Support writable context for bare tracepoint +- trace: bpf: Allow bpf to attach to bare tracepoints +- tracepoints: Add helper to test if tracepoint is enabled in a header +- Revert "xfs: add writable tracepoint for xfs file buffer read" +- Revert "selftests/bpf: add test_xfs_file.c and test_set_xfs_file.c" +- Partially revert "xfs: let writable tracepoint enable to clear flag of f_mode" +- Revert "selftests/bpf: test_xfs_file support to clear FMODE_RANDOM" +- Revert "selftests/bpf: add test_spec_readahead_xfs_file to support specail async readahead" +- EMMC: fix ascend hisi emmc probe failed problem according to mmc_host struct +- Bluetooth: cmtp: fix file refcount when cmtp_attach_device fails +- scsi: hisi_sas: print status and error when sata io abnormally completed +- Revert "scsi: hisi_sas: use threaded irq to process CQ interrupts" +- Revert "scsi: hisi_sas: replace spin_lock_irqsave/spin_unlock_restore with spin_lock/spin_unlock" +- net: hns3: update hns3 version to 21.10.5 +- net: hns3: remove an unnecessary 'goto' in hclge_init_ae_dev() +- net: hns3: fix ret not initialized problem in hclge_get_dfx_reg() +- net: hns3: refix kernel crash when unload VF while it is being reset +- net: hns3: ignore reset event before initialization process is done +- net: hns3: fix vf reset workqueue cannot exit +- net: hns3: reset DWRR of unused tc to zero +- net: hns3: fix a return value error in hclge_get_reset_status() +- net: hns3: fix the timing issue of VF clearing interrupt sources +- net: hns3: disable mac in flr process +- net: hns3: add trace event in hclge_gen_resp_to_vf() +- net: hns3: remove an unnecessary check in hclge_set_umv_space() +- net: hns3: remove unnecessary parameter 'is_alloc' in hclge_set_umv_space() +- net: hns3: remove the rss_size limitation by vector num +- net: hns3: bd_num from fireware should not be zero +- net: hns3: fix the exception when query imp info +- net: hns3: fix local variable "desc" not initialized problem +- net: hns3: limit bd numbers when getting dfx regs. +- s390/bpf: Fix optimizing out zero-extensions +- s390/bpf: Fix 64-bit subtraction of the -0x80000000 constant +- nbd: add sanity check for first_minor +- perf: hisi: Fix compile error if defined MODULE +- nfc: nci: fix the UAF of rf_conn_info object +- ipv6: make exception cache less predictible +- ipv6: use siphash in rt6_exception_hash() +- ipv4: make exception cache less predictible +- ipv4: use siphash instead of Jenkins in fnhe_hashfun() +- README: README optimize +- PM: hibernate: Get block device exclusively in swsusp_check() +- isdn: cpai: check ctr->cnr to avoid array index out of bound +- blk-cgroup: synchronize blkg creation against policy deactivation +- iommu/arm-smmu-v3: Add suspend and resume support +- nbd: Fix use-after-free in pid_show +- scsi: scsi_debug: Fix out-of-bound read in resp_report_tgtpgs() +- scsi: scsi_debug: Fix out-of-bound read in resp_readcap16() +- scsi: hisi_sas: unsupported DIX between OS and HBA only for SATA device +- scsi: hisi_sas: queue debugfs dump work before FLR +- mm/mempolicy: fix a race between offset_il_node and mpol_rebind_task +- jbd2: avoid transaction reuse after reformatting +- jbd2: clean up checksum verification in do_one_pass() +- ext4: check magic even the extent block bh is verified +- ext4: avoid recheck extent for EXT4_EX_FORCE_CACHE +- ext4: prevent partial update of the extent blocks +- ext4: check for inconsistent extents between index and leaf block +- ext4: check for out-of-order index extents in ext4_valid_extent_entries() +- quota: correct error number in free_dqentry() +- quota: check block number when reading the block in quota file +- nbd: fix uaf in nbd_handle_reply() +- nbd: partition nbd_read_stat() into nbd_read_reply() and nbd_handle_reply() +- nbd: clean up return value checking of sock_xmit() +- nbd: don't start request if nbd_queue_rq() failed +- nbd: check sock index in nbd_read_stat() +- nbd: make sure request completion won't concurrent +- nbd: don't handle response without a corresponding request message +- config: enable CONFIG_ASCEND_CLEAN_CDM by default +- numa/cdm: Introduce a bootarg to specify the target nodes to move to +- numa/cdm: Introduce a hbm_per_part variable +- numa: Restrict the usage of cdm_node_to_ddr_node() +- numa: Move the management structures for cdm nodes to ddr +- perf: hisi: Add support for HiSilicon SoC L3T PMU driver +- perf: hisi: Add support for HiSilicon SoC LPDDRC PMU driver +- Documentation: Add documentation for Hisilicon SoC PMU DTS binding +- perf: hisi: Add support for HiSilicon SoC PMU driver dt probe +- watchdog/corelockup: Depends on the hardlockup detection switch +- watchdog/corelockup: Add interface to control the detection sensitivity. +- watchdog/corelockup: Optimized core lockup detection judgment rules +- config/arm64: Enable corelockup detector for hulk defconfig +- corelockup: Add detector enable support by cmdline +- corelockup: Disable wfi/wfe mode for pmu based nmi +- corelockup: Add support of cpu core hang check +- driver/svm: used tgid when get phys +- share pool:Solving the 4G DVPP Address coexist +- share_pool: Default enable enable_share_k2u_spg +- share_pool: Export __vmalloc_node() +- share pool: Add export __get_vm_area map_vm_area for ascend driver +- share_pool: add sp_group_del_task api +- share_pool: Extract sp_check_caller_permission +- share_pool: Clear VM_SHAREPOOL when drop sp area +- share_pool: Don't allow concurrent sp_free or sp_unshare_uva calls +- share_pool: Add compatible interface for multi-group mode +- share_pool: Rename function is_k2task to sp_check_k2task +- share_pool: Add sp_k2u trace +- share_pool: Extract sp_k2u_prepare and sp_k2u_finish +- share_pool: Add sp_alloc trace +- share_pool: Show process prot in an sp_group +- share_pool: Add proc node to show process overview info +- share_pool: Apply proc_sp_group_state to multi-group-mode +- share_pool: Put the pointer of sp_proc_stat in sp_group_master +- share_pool: Free spg_node when group adding failed +- share_pool: Extract is_process_in_group +- share_pool: Apply sp_config_dvpp_range to to multi-group-mode +- share_pool: Apply sp_make_share_k2u() to multi-group-mode +- share_pool: Apply sp_group_id_by_pid() to multi-group-mode +- share_pool: Extract function get_task +- share_pool: Clean outdated DVPP pass through macros +- share_pool: Redesign sp_alloc pass through +- share_pool: Extract sp_free_get_spa +- share_pool: Extract sp_alloc_finish +- share_pool: Extract sp_alloc_mmap_populate +- share_pool: Extract sp_fallocate +- share_pool: Extract sp_alloc_prepare +- share_pool: Using pr_fmt in printing +- share_pool: Add access control for sp_unshare_uva +- ascend: share pool: Only memory of current process is allowed to u2k/k2u +- ascend: share pool: Remove unnecessary params of sp_unshare +- share_pool: k2u hugepage READONLY prot bug fix +- ascend: share pool: Add parameter prot in sp_group_add_task +- share_pool: Introduce struct sp_spg_stat +- share_pool: Introduce struct spg_proc_stat +- share_pool: Initialize sp_group_master when call k2u_task +- share_pool: Rename sp_stat_idr to sp_proc_stat_idr +- share_pool: Rename sp_spg_stat to sp_overall_stat +- share_pool: Add group max process num limitation +- share_pool: Add system max group num limitation +- ascend/config: enable share pool feature +- kabi: fix kabi broken in struct mm_struct +- ascend: sharepool: support multi-group mode +- sharepool: Fix ASLR broken +- share_pool: Adjust the position of do_mmap checker +- share_pool: share_pool: Don't allow non-sp mmap in sp address range +- share_pool: Free newly generated id only when necessary +- share_pool: Show sp vmflags in /proc/$pid/smaps +- share_pool: Free newly generated id when failed +- share_pool: Fix missing semaphore operation in error branch +- share_pool: Use pr_debug to print addresses +- share_pool: Add compact switch for vmalloc_huge* funcs +- share_pool: Don't do direct reclaim or compact for vmalloc_huge* funcs +- share_pool: Eliminate compiler warning for atomic64_t in arm32 +- share_pool: Fix memleak of concurrent sp_free and sp_group_add_task +- share_pool: Set initial value to variable node_id +- ascend/share pool: bugfix, sp exit is not atomic +- share_pool: Alloc shared memory on a specified memory node +- share_pool: Alloc sp memory on a specified memory node +- share_pool: Fix concurrency problem when a process adding sp_group is killed +- share_pool: Fix address checker +- share_pool: Optimize compact procedure +- shmem/ascend: charge pages to the memcg of current task +- share_pool: Update kernel-doc comments +- share_pool: Fix warning symbol was not declared +- share_pool: Fix warning missing braces around initializer +- share_pool: Waiting for the migration to complete +- share_pool: Add parameter checking +- share_pool: Fix struct sp_proc_stat memleak +- share_pool: Show k2u_to_task processes in proc_stat interface +- ascend: sharepool: calculate the correct offset of the address which is customized +- share_pool: Print info when thread is being killed +- share pool: Clean sp_mutex for sp_add_group_task +- share_pool: Rename buff_vzalloc_user and buff_vzalloc_hugepage_user +- share_pool: Support showing pid of applier process in spa_stat +- share_pool: Fix coredump hungtask +- share_pool: change printk_ratelimit to pr_level_ratelimited +- share_pool: Turn the negative statistics into zeros +- share_pool: Put relevant functions together +- share_pool: Remove redundant sysctl_share_pool_hugepage_enable +- ascend: sharepool: fix compile warning when the sharepool is turned off +- share_pool: move sysctl interface of share pool from kern_table to vm table +- share_pool: Introduce refcount for struct sp_proc_stat +- share_pool: Increase refcount of sp_group when call __sp_find_spg +- share_pool: Update the comments after removing sp_mutex +- share_pool: Rename __sp_group_drop_locked to sp_group_drop +- share_pool: Introduce an rw semaphore sp_group_sem and remove sp_mutex +- share_pool: Introduce an rw semaphore for per process stat idr +- share_pool: Use type atomic64_t for process stat +- share_pool: Add comments for fine grained locking design +- share_pool: Remove residual macro ESPGMMEXIT +- share_pool: Fix use-after-free of spa in rb_spa_stat_show +- share_pool: Fix the bug of not down_write mm->mmap_sem +- ascend: sharepool: don't enable the vmalloc to use hugepage default +- share_pool: add sysctl_share_pool_map_lock_enable to control the mapped region to be locked +- mm/vmalloc: fix pud_page compile error on arm32 +- mm, share_pool: Print share pool info of a process when oom +- ascend: share pool: optimize the big lock for memory processing +- share_pool: Fix memleak if fail in sp_make_share_u2k() +- share_pool: Free sp group id only when it is auto generated +- share_pool: Add interrupt context checker +- share_pool: Use PMD_SIZE alignment in hugepage allocation functions +- share_pool: Remove redundant null pointer check +- mm: Fix compilation error of mm_update_next_owner() +- share_pool: Fix compilation error of do_mm_populate() +- sharepool: Fix null pointer dereference on adding exiting task +- share_pool: Check tsk->mm before use it +- share_pool: Fix a potential bug branch +- x86/mm/ioremap: Fix HUGE_VMAP interface redefinition +- share_pool: Calculate sp_alloc() size for a task +- share_pool: Calculate k2u size for a task +- share_pool: Refactor sp_make_share_k2u() +- share_pool: Fix error message printing +- share_pool: Calculate non-sharepool memory usage for a task +- share_pool: Calculate sp_alloc() size for a sp_group +- share_pool: Do cleanups for statistical functions +- mm/vmalloc: Fix a double free in __vmalloc_node_range +- share_pool: Add and export buff_vzalloc_user() +- ascend: share_pool: don't share the k2u to spg by default +- ascend: share_pool: make the function share_k2u_to_spg work +- share pool: Try to compact when memory is insufficient +- share_pool: Fix null pointer of mm in concurrency scenes +- share pool: Roll back when sp mmap failed +- share_pool: Set errno when fail in sp_free() +- share_pool: Release spg id when fail in sp_group_add_task() +- share_pool: Remove memleak debug printing +- ascend: share_pool: enable svm to use share pool memory +- share_pool: Fix series of bugs +- ascend: share_pool: Use remap_pfn_range to share kva to uva +- ascend: share_pool: Use sharepool_no_page to alloc hugepage +- share_pool: Add dvpp size statistics +- share_pool: Fix rbtree searching bugs +- share_pool: Don't use input param pid in sp_unshare_uva() +- share pool: Solve processing errors of some abnormal branches +- share_pool: Fix spa memleak in dvpp channel destroy procedure +- share_pool: Add sp_area cache +- ascend: share_pool: support debug mode and refactor some functions +- ascend: share_pool: support share pool features for ascend platform +- ascend: share_pool: support fork() and exit() to handle the mm +- ascend: share_pool: add support proc_sharepool_init and is_vm_huge_special +- ascend: share_pool: add /proc/sys/kernel/share_pool_hugepage_enable and ac_mode +- ascend: share_pool: add /proc//sp_group +- ascend: memory: introduce do_mm_populate and hugetlb_insert_hugepage +- ascend: mm_struct: introduce new parameter for share pool features +- ascend: vmalloc: export new function for share pool +- ascend: mm: add an owner for mm_struct +- mm/vmalloc: Hugepage vmalloc mappings +- mm/vmalloc: add vmap_range_noflush variant +- mm: Move vmap_range from mm/ioremap.c to mm/vmalloc.c +- arm64: inline huge vmap supported functions +- mm: HUGE_VMAP arch support cleanup +- mm/ioremap: rename ioremap_*_range to vmap_*_range +- mm/vmalloc: rename vmap_*_range vmap_pages_*_range +- mm: apply_to_pte_range warn and fail if a large pte is encountered +- mm/vmalloc: fix vmalloc_to_page for huge vmap mappings +- mm: move lib/ioremap.c to mm/ +- mm/ioremap: probe platform for p4d huge map support +- mm: remove map_vm_range +- mm: don't return the number of pages from map_kernel_range{, _noflush} +- mm: rename vmap_page_range to map_kernel_range +- mm: remove vmap_page_range_noflush and vunmap_page_range +- mm: pass addr as unsigned long to vb_free +- mm: only allow page table mappings for built-in zsmalloc +- mm: unexport unmap_kernel_range_noflush +- mm: remove __get_vm_area +- arm64: mm: add p?d_leaf() definitions +- mm: add generic p?d_leaf() macros +- mm/memory.c: add apply_to_existing_page_range() helper +- mm/vmalloc: Add empty headers and use them from +- lib/ioremap: ensure break-before-make is used for huge p4d mappings +- lib/ioremap: ensure phys_addr actually corresponds to a physical address +- ioremap: rework pXd_free_pYd_page() API +- mm: add do_vm_mmap +- config: update hulk_defconfig +- configs: remove euleros_defconfig +- iommu/amd: Fix incorrect PASID decoding from event log +- mm: compaction: avoid 100% CPU usage during compaction when a task is killed +- iommu/vt-d: Unlink device if failed to add to group +- iommu/arm-smmu: Prevent forced unbinding of Arm SMMU drivers +- EMMC: open CONFIG_ASCEND_HISI_MMC +- EMMC: add dts bindings documents +- EMMC: hisi extensions for dw mmc host controller +- EMMC: adaption for ascend customized host layer +- EMMC: adaption for ascend customized sd card +- EMMC: adaption for ascend customized emmc card +- EMMC: add hisi_mmc_core +- EMMC: ascend customized emmc host + +* Wed Oct 27 2021 Cheng Jian - 4.19.90-2110.8.0.0120 +- blk-mq: complete req in softirq context in case of single queue +- ovl: fix leaked dentry +- ovl: fix incorrect extent info in metacopy case +- ovl: warn about orphan metacopy +- ovl: fix lookup of indexed hardlinks with metacopy +- ovl: fix redirect traversal on metacopy dentries +- ovl: initialize OVL_UPPERDATA in ovl_lookup() +- ovl: use only uppermetacopy state in ovl_lookup() +- ovl: simplify setting of origin for index lookup +- net: hns3: update hns3 version to 21.10.1 +- net: hns3: fix buffer length not enough problem in debugfs +- net: hns3: use ae_dev->ops->reset_event to do reset. +- media: firewire: firedtv-avc: fix a buffer overflow in avc_ca_pmt() +- GPIO : support ascend gpio driver +- mpam: update monitor rmid and group configuration +- mpam: Add support for group rmid modify +- mpam: enable rdt_mon_capable for mbw monitor +- svm: Add svm_set_user_mpam_en to enable/disable mpam for smmu +- svm: Add support to set svm mpam configuration +- svm: Add support to get svm mpam configuration +- iommu/arm-smmu-v3: Add support to enable/disable SMMU user_mpam_en +- iommu/arm-smmu-v3: Add support to get SMMU mpam configuration +- iommu/arm-smmu-v3: Add support to configure mpam in STE/CD context +- nvme-rdma: destroy cm id before destroy qp to avoid use after free +- arm64: Errata: fix kabi changed by cpu_errata +- config: disable CONFIG_HISILICON_ERRATUM_1980005 by default +- cache: Workaround HiSilicon Taishan DC CVAU +- kabi: fix kabi broken in struct device +- virtio_pci: Support surprise removal of virtio pci device +- ip_gre: add validation for csum_start +- netfilter: nft_exthdr: fix endianness of tcp option cast +- tracing / histogram: Fix NULL pointer dereference on strcmp() on NULL event name +- scsi: core: Avoid printing an error if target_alloc() returns -ENXIO +- scsi: scsi_dh_rdac: Avoid crash during rdac_bus_attach() +- x86/fpu: Make init_fpstate correct with optimized XSAVE +- iommu/vt-d: Fix agaw for a supported 48 bit guest address width +- PCI/MSI: Enforce MSI[X] entry updates to be visible +- PCI/MSI: Enforce that MSI-X table entry is masked for update +- PCI/MSI: Mask all unused MSI-X entries +- PCI/MSI: Protect msi_desc::masked for multi-MSI +- PCI/MSI: Use msi_mask_irq() in pci_msi_shutdown() +- PCI/MSI: Correct misleading comments +- PCI/MSI: Do not set invalid bits in MSI mask +- PCI/MSI: Enable and mask MSI-X early +- genirq/msi: Ensure deactivation on teardown +- x86/ioapic: Force affinity setup before startup +- x86/msi: Force affinity setup before startup +- genirq: Provide IRQCHIP_AFFINITY_PRE_STARTUP +- tcp_bbr: fix u32 wrap bug in round logic if bbr_init() called after 2B packets +- net: bridge: fix memleak in br_add_if() +- net: igmp: fix data-race in igmp_ifc_timer_expire() +- ACPI: NFIT: Fix support for virtual SPA ranges +- ovl: prevent private clone if bind mount is not allowed +- tracing: Reject string operand in the histogram expression +- reiserfs: add check for root_inode in reiserfs_fill_super +- serial: 8250: Mask out floating 16/32-bit bus bits +- ext4: fix potential htree corruption when growing large_dir directories +- pipe: increase minimum default pipe size to 2 pages +- tracing/histogram: Rename "cpu" to "common_cpu" +- tracing / histogram: Give calculation hist_fields a size +- blk-iolatency: error out if blk_get_queue() failed in iolatency_set_limit() +- net: Fix zero-copy head len calculation. +- netfilter: nft_nat: allow to specify layer 4 protocol NAT only +- netfilter: conntrack: adjust stop timestamp to real expiry value +- virtio_net: Do not pull payload in skb->head +- virtio_net: Add XDP meta data support +- net: check untrusted gso_size at kernel entry +- sctp: move 198 addresses from unusable to private scope +- net: annotate data race around sk_ll_usec +- net/802/garp: fix memleak in garp_request_join() +- net/802/mrp: fix memleak in mrp_request_join() +- af_unix: fix garbage collect vs MSG_PEEK +- efi: Change down_interruptible() in virt_efi_reset_system() to down_trylock() +- svm: Use vma->vm_pgoff for the nid +- Ascend/hugetlb:support alloc normal and buddy hugepage +- Ascend/memcg: Use CONFIG_ASCEND_FEATURES for customized interfaces +- Ascend/cdm:alloc hugepage from the specified CDM node +- ascend/svm: Support pinned memory size greater than 2GB +- mm: ascend: Fix compilation error of mem_cgroup_from_css() +- fuse: truncate pagecache on atomic_o_trunc +- ext4: drop unnecessary journal handle in delalloc write +- ext4: factor out write end code of inline file +- ext4: correct the error path of ext4_write_inline_data_end() +- ext4: check and update i_disksize properly + +* Thu Oct 21 2021 Cheng Jian - 4.19.90-2110.7.0.0119 +- sched/topology: Fix sched_domain_topology_level alloc in sched_init_numa() +- uacce: misc fixes +- mm/page_alloc: place pages to tail in __free_pages_core() +- mm/page_alloc: move pages to tail in move_to_free_list() +- mm/page_alloc: place pages to tail in __putback_isolated_page() +- mm/page_alloc: convert "report" flag of __free_one_page() to a proper flag +- mm: add function __putback_isolated_page +- mm/page_alloc.c: memory hotplug: free pages as higher order +- raid1: ensure write behind bio has less than BIO_MAX_VECS sectors +- blk-wbt: fix IO hang due to negative inflight counter +- Export sysboml for bbox to use. +- ovl: use a private non-persistent ino pool +- ovl: simplify i_ino initialization +- ovl: factor out helper ovl_get_root() +- ovl: fix out of date comment and unreachable code + +* Tue Oct 19 2021 Cheng Jian - 4.19.90-2110.6.0.0118 +- Revert "cache: Workaround HiSilicon Taishan DC CVAU" +- Revert "config: disable CONFIG_HISILICON_ERRATUM_1980005 by default" + +* Tue Oct 19 2021 Cheng Jian - 4.19.90-2110.5.0.0117 +- soc: aspeed: lpc-ctrl: Fix boundary check for mmap +- mmap: userswap: fix some format issues +- mmap: userswap: fix memory leak in do_mmap +- arm64/mpam: fix the problem that the ret variable is not initialized +- NFS: Fix a race in __nfs_list_for_each_server() +- NFSv4: Clean up nfs_client_return_marked_delegations() +- NFS: Add a helper nfs_client_for_each_server() +- blktrace: Fix uaf in blk_trace access after removing by sysfs +- io_uring: don't take uring_lock during iowq cancel +- io_uring: hold uring_lock while completing failed polled io in io_wq_submit_work() +- block: fix UAF from race of ioc_release_fn() and __ioc_clear_queue() +- Driver/SMMUV3: Bugfix for the softlockup when the driver processes events +- net_sched: remove need_resched() from qdisc_run() +- ath10k: Fix TKIP Michael MIC verification for PCIe +- ath10k: drop fragments with multicast DA for PCIe +- ath10k: add CCMP PN replay protection for fragmented frames for PCIe +- ath10k: add struct for high latency PN replay protection +- config: disable CONFIG_HISILICON_ERRATUM_1980005 by default +- cache: Workaround HiSilicon Taishan DC CVAU +- kabi: Fix "Intel: perf/core: Add attr_groups_update into struct pmu" +- x86: Fix kabi broken for struct cpuinfo_x86 +- kabi: Fix "perf/x86/intel: Support per-thread RDPMC TopDown metrics" +- PCI: kabi: fix kabi broken for struct pci_dev +- kabi: Fix "PCI: Decode PCIe 32 GT/s link speed" +- openeuler_defconfig: Adjust some configs for Intel icelake support +- hulk_defconfig: Adjust some configs for Intel icelake support +- perf/x86/intel/uncore: Fix M2M event umask for Ice Lake server +- node: fix device cleanups in error handling code +- device-dax/core: Fix memory leak when rmmod dax.ko +- ntb: intel: Fix memleak in intel_ntb_pci_probe +- perf/x86/intel/uncore: Fix the scale of the IMC free-running events +- intel_idle: Ignore _CST if control cannot be taken from the platform +- intel_idle: Fix max_cstate for processor models without C-state tables +- perf/x86/intel/uncore: Reduce the number of CBOX counters +- powercap: RAPL: remove unused local MSR define +- PCI/ERR: Update error status after reset_link() +- PCI/ERR: Combine pci_channel_io_frozen cases +- intel_th: msu: Fix the unexpected state warning +- intel_th: msu: Fix window switching without windows +- intel_th: Fix freeing IRQs +- PCI: Do not use bus number zero from EA capability +- perf/x86/intel/uncore: Fix missing marker for snr_uncore_imc_freerunning_events +- intel_th: msu: Fix possible memory leak in mode_store() +- intel_th: msu: Fix overflow in shift of an unsigned int +- intel_th: msu: Fix missing allocation failure check on a kstrndup +- intel_th: msu: Fix an uninitialized mutex +- intel_th: gth: Fix the window switching sequence +- tools/power/x86/intel-speed-select: Fix a read overflow in isst_set_tdp_level_msr() +- intel_rapl: need linux/cpuhotplug.h for enum cpuhp_state +- device-dax: fix memory and resource leak if hotplug fails +- MAINTAINERS: Add entry for EDAC-I10NM +- MAINTAINERS: Update entry for EDAC-SKYLAKE +- tools x86 uapi asm: Sync the pt_regs.h copy with the kernel sources +- docs: fix numaperf.rst and add it to the doc tree +- acpi/hmat: fix an uninitialized memory_target +- acpi/hmat: Update acpi_hmat_type enum with ACPI_HMAT_TYPE_PROXIMITY +- acpi/hmat: fix memory leaks in hmat_init() +- drivers/dax: Allow to include DEV_DAX_PMEM as builtin +- doc: trace: fix reference to cpuidle documentation file +- openeuler_defconfig: Enable some Icelake support configs +- hulk_defconfig: Enable some Icelake support configs +- tools/power turbostat: Fix Haswell Core systems +- tools/power turbostat: Support Ice Lake server +- tools/power turbostat: consolidate duplicate model numbers +- tools/power turbostat: reduce debug output +- intel_th: msu-sink: An example msu buffer "sink" +- intel_th: msu: Introduce buffer interface +- intel_th: msu: Start read iterator from a non-empty window +- intel_th: msu: Split sgt array and pointer in multiwindow mode +- intel_th: msu: Support multipage blocks +- intel_th: msu: Remove set but not used variable 'last' +- intel_th: msu: Fix unused variable warning on arm64 platform +- intel_th: msu: Add current window tracking +- intel_th: msu: Add a sysfs attribute to trigger window switch +- intel_th: msu: Correct the block wrap detection +- intel_th: Add switch triggering support +- intel_th: gth: Factor out trace start/stop +- intel_th: msu: Factor out pipeline draining +- intel_th: msu: Switch over to scatterlist +- intel_th: msu: Replace open-coded list_{first,last,next}_entry variants +- intel_th: Only report useful IRQs to subdevices +- intel_th: msu: Start handling IRQs +- intel_th: pci: Use MSI interrupt signalling +- intel_th: Communicate IRQ via resource +- intel_th: Add "rtit" source device +- intel_th: Skip subdevices if their MMIO is missing +- intel_th: Rework resource passing between glue layers and core +- intel_th: pti: Use sysfs_match_string() helper +- intel_th: Only create useful device nodes +- intel_th: Mark expected switch fall-throughs +- perf/x86/amd: Fix sampling Large Increment per Cycle events +- Intel: hardirq/nmi: Allow nested nmi_enter() +- Intel: platform/x86: ISST: Increase timeout +- Intel: ICX: platform/x86: ISST: Fix wrong unregister type +- Intel: ICX: platform/x86: ISST: Allow additional core-power mailbox commands +- Intel: EDAC/i10nm: Update driver to support different bus number config register offsets +- Intel: EDAC, {skx,i10nm}: Make some configurations CPU model specific +- Intel: intel_idle: Customize IceLake server support +- Intel: x86/uaccess: Move copy_user_handle_tail() into asm +- Intel: x86/insn-eval: Add support for 64-bit kernel mode +- Intel: x86/extable: Introduce _ASM_EXTABLE_UA for uaccess fixups +- x86/traps: Stop using ist_enter/exit() in do_int3() +- Intel: EDAC, skx: Retrieve and print retry_rd_err_log registers +- Intel: EDAC, skx_common: Refactor so that we initialize "dev" in result of adxl decode. +- Intel: perf/x86: Fix n_metric for cancelled txn +- Intel: perf/x86/intel: Check perf metrics feature for each CPU +- Intel: perf/x86/intel: Support per-thread RDPMC TopDown metrics +- Intel: perf/x86/intel: Support TopDown metrics on Ice Lake +- Intel: perf/x86: Add a macro for RDPMC offset of fixed counters +- Intel: perf/x86/intel: Generic support for hardware TopDown metrics +- Intel: perf/core: Add a new PERF_EV_CAP_SIBLING event capability +- Intel: perf/x86/intel: Use switch in intel_pmu_disable/enable_event +- Intel: perf/x86/intel: Fix the name of perf METRICS +- Intel: perf/x86/intel: Move BTS index to 47 +- Intel: perf/x86/intel: Introduce the fourth fixed counter +- Intel: perf/x86/intel: Name the global status bit in NMI handler +- Intel: perf/x86: Use event_base_rdpmc for the RDPMC userspace support +- Intel: perf/x86: Keep LBR records unchanged in host context for guest usage +- Intel: perf/x86: Add constraint to create guest LBR event without hw counter +- Intel: perf/x86/lbr: Add interface to get LBR information +- perf/x86/core: Refactor hw->idx checks and cleanup +- Intel: perf/x86: Fix variable types for LBR registers +- perf/x86/amd: Add support for Large Increment per Cycle Events +- Intel: perf/x86/amd: Constrain Large Increment per Cycle events +- Intel: perf/x86/intel: Fix SLOTS PEBS event constraint +- Intel: perf/x86: Use update attribute groups for default attributes +- intel: perf/x86/intel: Use update attributes for skylake format +- Intel: perf/x86: Use update attribute groups for extra format +- Intel: perf/x86: Use update attribute groups for caps +- Intel: perf/x86: Add is_visible attribute_group callback for base events +- Intel: perf/x86: Use the new pmu::update_attrs attribute group +- Intel: perf/x86: Get rid of x86_pmu::event_attrs +- Intel: perf/core: Add attr_groups_update into struct pmu +- Intel: sysfs: Add sysfs_update_groups function +- perf/x86/intel: Export mem events only if there's PEBS support +- Intel: perf/x86/intel: Factor out common code of PMI handler +- PCI: pciehp: Add DMI table for in-band presence detection disabled +- Intel:PCI: pciehp: Wait for PDS if in-band presence is disabled +- Intel:PCI: pciehp: Disable in-band presence detect when possible +- Intel:PCI/AER: Fix the broken interrupt injection +- genirq: Provide interrupt injection mechanism +- Intel:PCI/DPC: Add "pcie_ports=dpc-native" to allow DPC without AER control +- Intel:PCI/AER: Fix kernel-doc warnings +- Intel:PCI/AER: Use for_each_set_bit() to simplify code +- Intel:PCI/AER: Save AER Capability for suspend/resume +- Intel:PCI: Get rid of dev->has_secondary_link flag +- Intel:PCI: Make pcie_downstream_port() available outside of access.c +- Intel:PCI: Assign bus numbers present in EA capability for bridges +- Intel:PCI/AER: Log messages with pci_dev, not pcie_device +- Intel:PCI/DPC: Log messages with pci_dev, not pcie_device +- Intel:PCI: Replace dev_printk(KERN_DEBUG) with dev_info(), etc +- Intel:PCI: Replace printk(KERN_INFO) with pr_info(), etc +- Intel:PCI: Use dev_printk() when possible +- Intel:PCI/portdrv: Support PCIe services on subtractive decode bridges +- Intel:PCI/portdrv: Use conventional Device ID table formatting +- Intel:PCI/ASPM: Save LTR Capability for suspend/resume +- Intel:PCI: Enable SERR# forwarding for all bridges +- Intel:PCI/AER: Use match_string() helper to simplify the code +- Intel:PCI/AER: Queue one GHES event, not several uninitialized ones +- Intel:PCI/AER: Abstract AER interrupt handling +- Intel:PCI/AER: Reuse existing pcie_port_find_device() interface +- Intel:PCI/AER: Use managed resource allocations +- Intel:PCI/AER: Use threaded IRQ for bottom half +- Intel:PCI/AER: Use kfifo_in_spinlocked() to insert locked elements +- Intel:PCI/AER: Remove unused aer_error_resume() +- Intel:PCI/ERR: Remove duplicated include from err.c +- Intel:PCI: Make link active reporting detection generic +- PCI: Unify device inaccessible +- Intel:PCI/ERR: Always report current recovery status for udev +- PCI/ERR: Simplify broadcast callouts +- PCI/ERR: Handle fatal error recovery +- Intel:PCI/DPC: Save and restore config state +- PCI: portdrv: Restore PCI config state on slot reset +- PCI: Simplify disconnected marking +- Intel: ntb: intel: add hw workaround for NTB BAR alignment +- Intel: ntb: intel: fix static declaration +- Intel: ntb: intel: Add Icelake (gen4) support for Intel NTB +- Intel: NTB: add new parameter to peer_db_addr() db_bit and db_data +- Intel: perf/x86/intel: Fix invalid Bit 13 for Icelake MSR_OFFCORE_RSP_x register +- Intel: perf/x86/intel/uncore: Add Ice Lake server uncore support +- Intel: perf/x86/intel/uncore: Add box_offsets for free-running counters +- Intel: perf/x86/intel/uncore: Factor out __snr_uncore_mmio_init_box +- Intel: perf/x86/intel/uncore: Add IMC uncore support for Snow Ridge +- Intel: perf/x86/intel/uncore: Clean up client IMC +- Intel: perf/x86/intel/uncore: Support MMIO type uncore blocks +- Intel: perf/x86/intel/uncore: Factor out box ref/unref functions +- Intel: perf/x86/intel/uncore: Add uncore support for Snow Ridge server +- Intel: perf/x86/intel: Add more Icelake CPUIDs +- Intel: Documentation: admin-guide: PM: Add intel_idle document +- Intel: ACPI: processor: Make ACPI_PROCESSOR_CSTATE depend on ACPI_PROCESSOR +- Intel: intel_idle: Use ACPI _CST on server systems +- Intel: intel_idle: Add module parameter to prevent ACPI _CST from being used +- Intel: intel_idle: Allow ACPI _CST to be used for selected known processors +- Intel: cpuidle: Allow idle states to be disabled by default +- Intel: Documentation: admin-guide: PM: Add cpuidle document +- Intel: cpuidle: use BIT() for idle state flags and remove CPUIDLE_DRIVER_FLAGS_MASK +- Intel: intel_idle: Use ACPI _CST for processor models without C-state tables +- Intel: intel_idle: Refactor intel_idle_cpuidle_driver_init() +- Intel: ACPI: processor: Export acpi_processor_evaluate_cst() +- Intel: ACPI: processor: Clean up acpi_processor_evaluate_cst() +- Intel: ACPI: processor: Introduce acpi_processor_evaluate_cst() +- Intel: ACPI: processor: Export function to claim _CST control +- Intel: tools/power/x86: A tool to validate Intel Speed Select commands +- Intel: platform/x86: ISST: Restore state on resume +- Intel: platform/x86: ISST: Add Intel Speed Select PUNIT MSR interface +- Intel: platform/x86: ISST: Add Intel Speed Select mailbox interface via MSRs +- Intel: platform/x86: ISST: Add Intel Speed Select mailbox interface via PCI +- Intel: platform/x86: ISST: Add Intel Speed Select mmio interface +- Intel: platform/x86: ISST: Add IOCTL to Translate Linux logical CPU to PUNIT CPU number +- Intel: platform/x86: ISST: Store per CPU information +- Intel: platform/x86: ISST: Add common API to register and handle ioctls +- Intel: platform/x86: ISST: Update ioctl-number.txt for Intel Speed Select interface +- Intel: EDAC, skx, i10nm: Fix source ID register offset +- Intel: EDAC, i10nm: Check ECC enabling status per channel +- Intel: EDAC, i10nm: Add Intel additional Ice-Lake support +- Intel: EDAC, skx, i10nm: Make skx_common.c a pure library +- Intel: EDAC, skx_common: Add code to recognise new compound error code +- Intel: EDAC, i10nm: Add a driver for Intel 10nm server processors +- EDAC, skx_edac: Delete duplicated code +- Intel: EDAC, skx_common: Separate common code out from skx_edac +- Intel: powercap/intel_rapl: add support for ICX-D +- Intel: powercap/intel_rapl: add support for ICX +- Intel: powercap/intel_rapl: add support for IceLake desktop +- Intel: intel_rapl: Fix module autoloading issue +- Intel: intel_rapl: support two power limits for every RAPL domain +- Intel: intel_rapl: support 64 bit register +- intel_rapl: abstract RAPL common code +- Intel: intel_rapl: cleanup hardcoded MSR access +- Intel: intel_rapl: cleanup some functions +- Intel: intel_rapl: abstract register access operations +- Intel: intel_rapl: abstract register address +- Intel: intel_rapl: introduce struct rapl_if_private +- Intel: intel_rapl: introduce intel_rapl.h +- Intel: intel_rapl: remove hardcoded register index +- Intel: intel_rapl: use reg instead of msr +- Intel: powercap/intel_rapl: Update RAPL domain name and debug messages +- Intel: powercap/intel_rapl: Support multi-die/package +- Intel: powercap/intel_rapl: Simplify rapl_find_package() +- Intel: x86/topology: Define topology_logical_die_id() +- Intel: x86/topology: Define topology_die_id() +- Intel: cpu/topology: Export die_id +- Intel: x86/topology: Create topology_max_die_per_package() +- Intel: x86/topology: Add CPUID.1F multi-die/package support +- Intel: topology: Simplify cputopology.txt formatting and wording +- Intel: perf/x86/regs: Use PERF_REG_EXTENDED_MASK +- Intel: perf/x86: Remove pmu->pebs_no_xmm_regs +- Intel: perf/x86: Clean up PEBS_XMM_REGS +- Intel: perf/x86/regs: Check reserved bits +- Intel: perf/x86: Disable extended registers for non-supported PMUs +- Intel: perf/core: Add PERF_PMU_CAP_NO_EXCLUDE for exclusion incapable PMUs +- Intel: perf/core: Add function to test for event exclusion flags +- Intel: perf/x86/intel/pt: Remove software double buffering PMU capability +- Intel: perf/ring_buffer: Fix AUX software double buffering +- Intel: perf regs x86: Add X86 specific arch__intr_reg_mask() +- Intel: perf parse-regs: Add generic support for arch__intr/user_reg_mask() +- Intel: perf parse-regs: Split parse_regs +- Intel: perf parse-regs: Improve error output when faced with unknown register name +- Intel: perf record: Fix suggestion to get list of registers usable with --user-regs and --intr-regs +- Intel: perf tools x86: Add support for recording and printing XMM registers +- Intel: perf/x86/intel/uncore: Add Intel Icelake uncore support +- Intel: perf/x86/lbr: Avoid reading the LBRs when adaptive PEBS handles them +- Intel: perf/x86/intel: Support adaptive PEBS v4 +- Intel: perf/x86/intel/ds: Extract code of event update in short period +- Intel: perf/x86/intel: Extract memory code PEBS parser for reuse +- Intel: perf/x86: Support outputting XMM registers +- Intel: doc/mm: New documentation for memory performance +- Intel: acpi/hmat: Register memory side cache attributes +- Intel: acpi/hmat: Register performance attributes +- Intel: acpi/hmat: Register processor domain to its memory +- Intel: node: Add memory-side caching attributes +- Intel: node: Add heterogenous memory access attributes +- node: Link memory nodes to their compute nodes +- Intel: acpi/hmat: Parse and report heterogeneous memory +- Intel: acpi: Add HMAT to generic parsing tables +- irqchip: phytium-2500: Fix compilation issues +- Intel: acpi: Create subtable parsing infrastructure +- Intel: ACPICA: ACPI 6.3: HMAT updates +- Intel: device-dax: "Hotplug" persistent memory for use like normal RAM +- mm/resource: Let walk_system_ram_range() search child resources +- Intel: mm/memory-hotplug: Allow memory resources to be children +- Intel: mm/resource: Move HMM pr_debug() deeper into resource code +- Intel: device-dax: Add a 'modalias' attribute to DAX 'bus' devices +- Intel: device-dax: Add a 'target_node' attribute +- Intel: device-dax: Auto-bind device after successful new_id +- Intel: acpi/nfit, device-dax: Identify differentiated memory with a unique numa-node +- Intel: device-dax: Add /sys/class/dax backwards compatibility +- Intel: device-dax: Add support for a dax override driver +- Intel: device-dax: Move resource pinning+mapping into the common driver +- Intel: device-dax: Introduce bus + driver model +- Intel: device-dax: Start defining a dax bus model +- Intel: device-dax: Remove multi-resource infrastructure +- Intel: device-dax: Kill dax_region base +- Intel: device-dax: Kill dax_region ida +- Intel: dmaengine: ioatdma: support latency tolerance report (LTR) for v3.4 +- Intel: dmaengine: ioatdma: add descriptor pre-fetch support for v3.4 +- Intel: dmaengine: ioatdma: disable DCA enabling on IOATDMA v3.4 +- Intel: dmaengine: ioatdma: Add Snow Ridge ioatdma device id +- perf/x86/intel: Add Tremont core PMU support +- perf/x86/intel: Add Icelake support +- perf/x86: Support constraint ranges +- PCI/PME: Fix kernel-doc of pcie_pme_resume() and pcie_pme_remove() +- PCI: Add PCIE_LNKCAP2_SLS2SPEED() macro +- PCI: Use pci_speed_string() for all PCI/PCI-X/PCIe strings +- PCI: Add pci_speed_string() +- PCI: Add 32 GT/s decoding in some macros +- PCI: Decode PCIe 32 GT/s link speed +- PCI/AER: Log which device prevents error recovery +- PCI/AER: Initialize aer_fifo +- PCI/AER: Use kfifo for tracking events instead of reimplementing it +- PCI/AER: Remove error source from AER struct aer_rpc +- Intel: PCI: Add support for Immediate Readiness +- ia64: ensure proper NUMA distance and possible map initialization +- sched/topology: Make sched_init_numa() use a set for the deduplicating sort +- block: don't call rq_qos_ops->done_bio if the bio isn't tracked +- block: fix blk-iolatency accounting underflow +- ovl: fix missing negative dentry check in ovl_rename() +- ext4: flush s_error_work before journal destroy in ext4_fill_super +- Revert "ext4: fix panic when mount failed with parallel flush_stashed_error_work" +- ext4: refresh the ext4_ext_path struct after dropping i_data_sem. +- ext4: ensure enough credits in ext4_ext_shift_path_extents +- ext4: use true,false for bool variable + +* Tue Oct 12 2021 Cheng Jian - 4.19.90-2110.3.0.0116 +- net: 6pack: fix slab-out-of-bounds in decode_data + +* Mon Oct 11 2021 Cheng Jian - 4.19.90-2110.2.0.0115 +- bpf: Fix integer overflow in prealloc_elems_and_freelist() + +* Fri Oct 08 2021 Cheng Jian - 4.19.90-2110.1.0.0114 +- timerqueue: fix kabi for struct timerqueue_head +- lib/timerqueue: Rely on rbtree semantics for next timer +- ACPI / APEI: Notify all ras err to driver +- ACPI / APEI: Add a notifier chain for unknown (vendor) CPER records +- blk-mq-sched: Fix blk_mq_sched_alloc_tags() error handling +- jbd2: protect jh by grab a ref in jbd2_journal_forget +- jbd2: Don't call __bforget() unnecessarily +- jbd2: Drop unnecessary branch from jbd2_journal_forget() +- ipc: replace costly bailout check in sysvipc_find_ipc() +- sched/topology: fix the issue groups don't span domain->span for NUMA diameter > 2 +- sched/topology: Warn when NUMA diameter > 2 +- USB: ehci: fix an interrupt calltrace error +- net: hns3: update hns3 version to 21.9.4 +- net: hns3: expand buffer len for fd tcam of debugfs +- net: hns3: fix hns3 debugfs queue info print coverage bugs +- net: hns3: fix memory override when bd_num is bigger than port info size +- scsi: hisi_sas: Optimize the code flow of setting sense data when ssp I/O abnormally completed + +* Wed Sep 29 2021 Cheng Jian - 4.19.90-2109.8.0.0113 +- Bluetooth: fix use-after-free error in lock_sock_nested() +- bpf, mips: Validate conditional branch offsets +- scsi: qla2xxx: Fix crash in qla2xxx_mqueuecommand() +- crypto: ccp - fix resource leaks in ccp_run_aes_gcm_cmd() +- bpf: Fix truncation handling for mod32 dst reg wrt zero +- bpf: Fix 32 bit src register truncation on div/mod +- bpf: Do not use ax register in interpreter on div/mod +- Revert "bpf: allocate 0x06 to new eBPF instruction class JMP32" +- Revert "bpf: refactor verifier min/max code for condition jump" +- Revert "bpf: verifier support JMP32" +- Revert "bpf: disassembler support JMP32" +- Revert "tools: bpftool: teach cfg code about JMP32" +- Revert "bpf: interpreter support for JMP32" +- Revert "bpf: JIT blinds support JMP32" +- Revert "x86_64: bpf: implement jitting of JMP32" +- Revert "arm64: bpf: implement jitting of JMP32" +- Revert "bpf: Fix 32 bit src register truncation on div/mod" +- Revert "bpf: Fix truncation handling for mod32 dst reg wrt zero" +- block: fix wrong define name +- block: fix compile error when CONFIG_BLK_DEV_THROTTLING disable +- pid: fix imbalanced calling of cgroup_threadgroup_change_begin/end() +- pid: fix return value when copy_process() failed +- block: fix NULL pointer in blkcg_drain_queue() +- block: clean up ABI breakage +- block: mark queue init done at the end of blk_register_queue +- block: fix race between adding/removing rq qos and normal IO +- scsi: hisi_sas: set sense data when the sas disk's I/O abnormally completed +- kyber: initialize 'async_depth' in kyber_queue_data_alloc() +- kyber: introduce kyber_depth_updated() +- blk-mq: handle all throttled io in blk_cleanup_queue() + +* Wed Sep 22 2021 Cheng Jian - 4.19.90-2109.7.0.0112 +- memcg: enable accounting for ldt_struct objects +- memcg: enable accounting for posix_timers_cache slab +- memcg: enable accounting for signals +- memcg: enable accounting for new namesapces and struct nsproxy +- memcg: enable accounting for fasync_cache +- memcg: enable accounting for mnt_cache entries +- memcg: enable accounting for pids in nested pid namespaces +- KVM: do not allow mapping valid but non-reference-counted pages +- nvme: remove the call to nvme_update_disk_info in nvme_ns_remove +- block: flush the integrity workqueue in blk_integrity_unregister +- block: check if a profile is actually registered in blk_integrity_unregister +- blk-mq: fix kabi broken in blk_mq_tags +- blk-mq: fix is_flush_rq +- blk-mq: fix kernel panic during iterating over flush request +- block: factor out a new helper from blk_rq_init() +- blk-mq: don't grab rq's refcount in blk_mq_check_expired() +- blk-mq: clearing flush request reference in tags->rqs[] +- blk-mq: clear stale request in tags->rq[] before freeing one request pool +- blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter +- Revert "blk-mq: use static_rqs instead of rqs to iterate tags" +- Revert "blk-mq: use blk_mq_queue_tag_inflight_iter() in debugfs" +- Revert "nbd: use blk_mq_queue_tag_inflight_iter()" +- blk-cgroup: fix UAF by grabbing blkcg lock before destroying blkg pd +- tasks: Fix kabi broken for struct task_struct +- tasks, sched/core: RCUify the assignment of rq->curr +- tasks, sched/core: With a grace period after finish_task_switch(), remove unnecessary code +- tasks, sched/core: Ensure tasks are available for a grace period after leaving the runqueue +- tasks: Add a count of task RCU users +- Revert "sched/membarrier: fix NULL poiner in membarrier_global_expedited" +- ext4: update last_pos for the case ext4_htree_fill_tree return fail +- blk-throttle: fix UAF by deleteing timer in blk_throtl_exit() +- nvme-rdma: don't update queue count when failing to set io queues +- scsi: hisi_sas: replace spin_lock_irqsave/spin_unlock_restore with spin_lock/spin_unlock +- scsi: hisi_sas: use threaded irq to process CQ interrupts + +* Wed Sep 15 2021 Cheng Jian - 4.19.90-2109.6.0.0111 +- ext4: fix race writing to an inline_data file while its xattrs are changing +- uce: pagecache reading scenario add shmem support +- Revert "uce: pagecache reading scenario add shmem support" +- memcg: enable accounting of ipc resources +- uce: pagecache reading scenario add shmem support +- misc/uacce: fixup out-of-bounds array write +- crypto/sec: add aead support for user-side + +* Mon Sep 13 2021 Cheng Jian - 4.19.90-2109.5.0.0110 +- nvme-pci: Use u32 for nvme_dev.q_depth and nvme_queue.q_depth +- nvme-pci: use unsigned for io queue depth +- net: hns3: update hns3 version to 21.9.2 +- net: hns3: the pointer is cast to another pointer in a different type, which is incompatible. +- net: hns3: cleanup for some print type miss match and blank lines +- net: hns3: remove tc enable checking +- net: hns3: Constify static structs +- net: hns3: fix kernel crash when unload VF while it is being reset +- net: hns3: fix memory override when bd_num is bigger than the ring size +- net: hns3: pad the short tunnel frame before sending to hardware +- net: hns3: check the return of skb_checksum_help() +- net: hns3: add 'QoS' support for port based VLAN configuration +- net: hns3: remove unused parameter from hclge_set_vf_vlan_common() +- net: hns3: disable port VLAN filter when support function level VLAN filter control +- net: hns3: remove redundant param mbx_event_pending +- net: hns3: remove the useless debugfs file node cmd +- net: hns3: fix get wrong pfc_en when query PFC configuration +- net: hns3: fix mixed flag HCLGE_FLAG_MQPRIO_ENABLE and HCLGE_FLAG_DCB_ENABLE +- net: hns3: add support for tc mqprio offload +- net: hns3: add debugfs support for vlan configuration +- net: hns3: add support for VF modify VLAN filter state +- net: hns3: add query basic info support for VF +- net: hns3: add support for modify VLAN filter state +- Revert: net: hns3: adds support for extended VLAN mode and 'QOS' in vlan 802.1Q protocol. +- net: hns3: change the method of getting cmd index in debugfs +- net: hns3: refactor dump mac tbl of debugfs +- net: hns3: add support for dumping MAC umv counter in debugfs +- net: hns3: refactor dump serv info of debugfs +- net: hns3: refactor dump mac tnl status of debugfs +- net: hns3: refactor dump qs shaper of debugfs +- net: hns3: refactor dump qos buf cfg of debugfs +- net: hns3: split out hclge_dbg_dump_qos_buf_cfg() +- net: hns3: refactor dump qos pri map of debugfs +- net: hns3: refactor dump qos pause cfg of debugfs +- net: hns3: refactor dump tc of debugfs +- net: hns3: refactor dump tm of debugfs +- net: hns3: refactor dump tm map of debugfs +- net: hns3: refactor dump fd tcam of debugfs +- net: hns3: refactor queue info of debugfs +- net: hns3: refactor queue map of debugfs +- net: hns3: refactor dump reg dcb info of debugfs +- net: hns3: refactor dump reg of debugfs +- net: hns3: Constify static structs +- net: hns3: refactor dump ncl config of debugfs +- net: hns3: refactor dump m7 info of debugfs +- net: hns3: refactor dump reset info of debugfs +- net: hns3: refactor dump intr of debugfs +- net: hns3: refactor dump loopback of debugfs +- net: hns3: refactor dump mng tbl of debugfs +- net: hns3: refactor dump mac list of debugfs +- net: hns3: refactor dump bd info of debugfs +- net: hns3: refactor the debugfs process +- net: hns3: add debugfs support for tm priority and qset info +- net: hns3: add interfaces to query information of tm priority/qset +- net: hns3: change the value of the SEPARATOR_VALUE macro in hclgevf_main.c +- net: hns3: fix for vxlan gpe tx checksum bug +- net: hns3: Fix for geneve tx checksum bug +- net: hns3: refine the struct hane3_tc_info +- net: hns3: VF not request link status when PF support push link status feature +- net: hns3: remove a duplicate pf reset counting +- net: hns3: remediate a potential overflow risk of bd_num_list +- net: hns3: fix query vlan mask value error for flow director +- net: hns3: fix error mask definition of flow director +- net: hns3: cleanup for endian issue for VF RSS +- net: hns3: fix incorrect handling of sctp6 rss tuple +- net: hns3: refine function hclge_set_vf_vlan_cfg() +- net: hns3: dump tqp enable status in debugfs +- hisilicon/hns3: convert comma to semicolon +- net: hns3: remove a misused pragma packed +- net: hns3: add debugfs of dumping pf interrupt resources +- net: hns3: Supply missing hclge_dcb.h include file +- net: hns3: print out speed info when parsing speed fails +- net: hns3: add a missing mutex destroy in hclge_init_ad_dev() +- net: hns3: add a print for initializing CMDQ when reset pending +- net: hns3: replace snprintf with scnprintf in hns3_update_strings +- net: hns3: change affinity_mask to numa node range +- net: hns3: change hclge/hclgevf workqueue to WQ_UNBOUND mode +- tcp_comp: Del compressed_data and remaining_data from tcp_comp_context_rx +- tcp_comp: Add dpkt to save decompressed skb +- tcp_comp: Fix ZSTD_decompressStream failed +- mm: downgrade the print level in do_shrink_slab +- uio: introduce UIO_MEM_IOVA +- mm/mempolicy.c: fix checking unmapped holes for mbind +- mm/mempolicy.c: check range first in queue_pages_test_walk +- net: qrtr: fix another OOB Read in qrtr_endpoint_post +- net: qrtr: fix OOB Read in qrtr_endpoint_post +- mm, slab, slub: stop taking cpu hotplug lock +- mm, slab, slub: stop taking memory hotplug lock +- mm, slub: stop freeing kmem_cache_node structures on node offline +- kernel/hung_task.c: introduce sysctl to print all traces when a hung task is detected +- vt_kdsetmode: extend console locking + +* Mon Sep 06 2021 Cheng Jian - 4.19.90-2109.2.0.0109 +- cpuidle: menu: Avoid computations when result will be discarded +- virtio_blk: fix handling single range discard request +- virtio_blk: add discard and write zeroes support +- iommu/arm-smmu-v3: add bit field SFM into GERROR_ERR_MASK +- page_alloc: consider highatomic reserve in watermark fast +- mm/filemap.c: fix a data race in filemap_fault() +- scsi/hifc: Fix memory leakage bug +- RDMA/hns: Fix wrong timer context buffer page size +- RDMA/hns: Bugfix for posting multiple srq work request +- RDMA/hns: Fix 0-length sge calculation error +- RDMA/hns: Fix configuration of ack_req_freq in QPC +- RDMA/hns: Add check for the validity of sl configuration +- RDMA/hns: Fix bug during CMDQ initialization +- RDMA/hns: Fixed wrong judgments in the goto branch +- RDMA/hns: Bugfix for checking whether the srq is full when post wr +- RDMA/hns: Fix wrong parameters when initial mtt of srq->idx_que +- RDMA/hns: Force rewrite inline flag of WQE +- RDMA/hns: Fix missing assignment of max_inline_data +- RDMA/hns: Avoid enabling RQ inline on UD +- RDMA/hns: Support to query firmware version +- RDMA/hns: Force srq_limit to 0 when creating SRQ +- RDMA/hns: Add interception for resizing SRQs +- RDMA/hns: Fix an cmd queue issue when resetting + +* Wed Sep 01 2021 Cheng Jian - 4.19.90-2109.1.0.0108 +- iommu: smmuv2: Using the SMMU_BYPASS_DEV to bypass SMMU for some SoCs +- iommu: dev_bypass: cleanup dev bypass code +- arm64: phytium: using MIDR_PHYTIUM_FT2000PLUS instead of ARM_CPU_IMP_PHYTIUM +- arm64: Add MIDR encoding for PHYTIUM CPUs +- arm64: Add MIDR encoding for HiSilicon Taishan CPUs +- sched: Fix sched_fork() access an invalid sched_task_group +- KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) +- KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656) +- Bluetooth: switch to lock_sock in SCO +- Bluetooth: avoid circular locks in sco_sock_connect +- Bluetooth: schedule SCO timeouts with delayed_work +- Bluetooth: defer cleanup of resources in hci_unregister_dev() + +* Mon Aug 30 2021 Cheng Jian - 4.19.90-2108.9.0.0107 +- tcp_comp: Fix comp_read_size return value +- virtio-blk: Add validation for block size in config space +- blk-mq: fix divide by zero crash in tg_may_dispatch() +- mm, vmscan: guarantee drop_slab_node() termination +- jump_label: skip resource release if jump label is not relocated +- ext4: prevent getting empty inode buffer +- ext4: move ext4_fill_raw_inode() related functions before __ext4_get_inode_loc() +- ext4: factor out ext4_fill_raw_inode() +- ext4: make the updating inode data procedure atomic +- KVM: X86: MMU: Use the correct inherited permissions to get shadow page +- x86/config: Enable CONFIG_USERSWAP for openeuler_defconfig +- ext4: fix panic when mount failed with parallel flush_stashed_error_work +- device core: Consolidate locking and unlocking of parent and device +- Revert "ext4: flush s_error_work before journal destroy in ext4_fill_super" +- ext2: Strengthen xattr block checks +- ext2: Merge loops in ext2_xattr_set() +- ext2: introduce helper for xattr entry validation +- mm: rmap: explicitly reset vma->anon_vma in unlink_anon_vmas() + +* Tue Aug 24 2021 Cheng Jian - 4.19.90-2108.8.0.0106 +- bpf: Fix leakage due to insufficient speculative store bypass mitigation +- bpf: Introduce BPF nospec instruction for mitigating Spectre v4 +- bpf: track spill/fill of constants +- bpf/verifier: per-register parent pointers +- blk-mq: clear active_queues before clearing BLK_MQ_F_TAG_QUEUE_SHARED + +* Mon Aug 23 2021 Cheng Jian - 4.19.90-2108.7.0.0105 +- scsi: hisi_sas: Flush workqueue in hisi_sas_v3_remove() +- nvme: force complete cancelled requests +- blk-mq: blk-mq: provide forced completion method +- ext4: flush s_error_work before journal destroy in ext4_fill_super +- Revert "net: make get_net_ns return error if NET_NS is disabled" +- kthread: Fix PF_KTHREAD vs to_kthread() race +- sched/debug: Fix 'sched_debug_lock' undeclared error +- Remove MODULE_ALIAS() calls that take undefined macro +- scripts/dtc: Remove redundant YYLOC global declaration +- x86/boot/compressed: Don't declare __force_order in kaslr_64.c +- usb: hso: fix error handling code of hso_create_net_device +- hso: fix bailout in error case of probe + + +* Tue Aug 17 2021 Cheng Jian - 4.19.90-2108.5.0.0104 +- spec: fixed the mistake for dates in kernel.spec + +* Tue Aug 17 2021 Cheng Jian - 4.19.90-2108.5.0.0103 +- iscsi: Fix KABI change for 'Fix conn use after free during resets' +- iscsi: alloc iscsi_cls_session with iscsi_cls_session_warpper +- iscsi: introduce iscsi_cls_session_warapper and helper +- scsi: iscsi: Fix conn use after free during resets +- scsi: sr: Return correct event when media event code is 3 +- net: xilinx_emaclite: Do not print real IOMEM pointer +- sctp: move the active_key update after sh_keys is added +- usb: max-3421: Prevent corruption of freed memory +- net: ll_temac: Fix bug causing buffer descriptor overrun +- tcp_comp: Avoiding the null pointer problem of ctx in comp_stream_read +- nbd: add the check to prevent overflow in __nbd_ioctl() +- ext4: fix potential uninitialized access to retval in kmmpd +- blk-mq: fix kabi broken by "blk-mq: fix hang caused by freeze/unfreeze sequence" +- blk-mq: fix hang caused by freeze/unfreeze sequence +- config: Enable CONFIG_UCE_KERNEL_RECOVERY by default +- EDAC/ghes: Remove intermediate buffer pvt->detail_location +- USB: fix some clerical mistakes +- uce: pagecache reading scenario support kernel recovery +- uce: cow scenario support kernel recovery +- selinux: fix NULL dereference in policydb_destroy() +- livepatch/x86: Ignore return code of save_stack_trace_tsk_reliable() +- mm,hwpoison: return -EHWPOISON to denote that the page has already been poisoned +- mm/memory-failure: use a mutex to avoid memory_failure() races +- arm64: mm: account for hotplug memory when randomizing the linear region + + +* Fri Aug 13 2021 Cheng Jian - 4.19.90-2108.4.0.0102 +- test modules directory existed when ls + +* Tue Aug 10 2021 Gou Hao -4.19.90-2108.4.0.0101 +- fix rpmbuild error with patches + +* Tue Aug 10 2021 Cheng Jian - 4.19.90-2108.4.0.0100 +- openeuler_defconfig: Enable ARCH_PHYTIUM and ARM_GIC_PHYTIUM_2500 +- config: Enable Phytium FT-2500 support configs for hulk_defconfig +- irqchip: phytium-2500: Add interrupt controller driver +- mm/vmscan: setup drop_caches_loop_limit in cmdline +- mm/memcg: optimize memory.numa_stat like memory.stat +- livepatch: Fix crash when access the global variable in hook +- timer: Use hlist_unhashed_lockless() in timer_pending() +- list: Add hlist_unhashed_lockless() +- config: Enable CONFIG_GPIO_HISI by default +- gpio: gpio-hisi: Add HiSilicon GPIO support +- config: Enable CONFIG_I2C_HISI by default +- i2c: add support for HiSilicon I2C controller +- i2c: core: add api to provide frequency mode strings +- i2c: core: add managed function for adding i2c adapters +- blk: reuse lookup_sem to serialize partition operations +- Revert "block: take bd_mutex around delete_partitions in del_gendisk" +- Revert "block: avoid creating invalid symlink file for patitions" +- Revert "block: call bdput() to avoid memleak" +- sctp: fix return value check in __sctp_rcv_asconf_lookup +- workqueue: fix UAF in pwq_unbound_release_workfn() +- exit: Move preemption fixup up, move blocking operations down +- Input: joydev - prevent use of not validated data in JSIOCSBTNMAP ioctl +- Input: joydev - prevent potential read overflow in ioctl +- srcu: Take early exit on memory-allocation failure +- Revert "modpost: add read_text_file() and get_line() helpers" +- Revert "modpost: use read_text_file() and get_line() for reading text files" +- Revert "modpost: remove use of non-standard strsep() in HOSTCC code" +- Revert "modpost: explain why we can't use strsep" +- cpuidle: fix return type err in haltpoll_switch_governor +- mm/slab: add naive detection of double free +- mm/mempool: fix a data race in mempool_free() +- mm/list_lru: fix a data race in list_lru_count_one +- mm/cma.c: fix NULL pointer dereference when cma could not be activated +- iommu/amd: Prevent NULL pointer dereference + +* Mon Aug 02 2021 Cheng Jian - 4.19.90-2108.1.0.0099 +- bcache: always record start time of a sample +- bcache: do not collect data insert info created by write_moving +- tcp_comp: open configs for tcp compression +- tcp_comp: implement recvmsg for tcp compression +- tcp_comp: implement sendmsg for tcp compression +- tcp_comp: add stub proto ops for tcp compression socket +- tcp_comp: allow ignore local tcp connections +- tcp_comp: only enable compression for give server ports +- tcp_comp: add sysctl for enable/disable compression +- tcp_comp: add init and cleanup hook for compression +- tcp_comp: add tcp comp option to SYN and SYN-ACK +- tcp_comp: add Kconfig for tcp payload compression +- tracing: Fix bug in rb_per_cpu_empty() that might cause deadloop. +- proc: Avoid mixing integer types in mem_rw() +- net: sched: cls_api: Fix the the wrong parameter +- sctp: update active_key for asoc when old key is being replaced +- nvme-pci: don't WARN_ON in nvme_reset_work if ctrl.state is not RESETTING +- net/sched: act_skbmod: Skip non-Ethernet packets +- net/tcp_fastopen: fix data races around tfo_active_disable_stamp +- scsi: target: Fix protect handling in WRITE SAME(32) +- scsi: iscsi: Fix iface sysfs attr detection +- nvme-pci: do not call nvme_dev_remove_admin from nvme_remove +- ipv6: fix 'disable_policy' for fwd packets +- net: ip_tunnel: fix mtu calculation for ETHER tunnel devices +- udp: annotate data races around unix_sk(sk)->gso_size +- ipv6: tcp: drop silly ICMPv6 packet too big messages +- tcp: annotate data races around tp->mtu_info +- dma-buf/sync_file: Don't leak fences on merge failure +- net: validate lwtstate->data before returning from skb_tunnel_info() +- net: send SYNACK packet with accepted fwmark +- net: bridge: sync fdb to new unicast-filtering ports +- netfilter: ctnetlink: suspicious RCU usage in ctnetlink_dump_helpinfo +- dm writecache: fix writing beyond end of underlying device when shrinking +- dm writecache: return the exact table values that were set +- dm multipath: use updated MPATHF_QUEUE_IO on mapping for bio-based mpath +- dm writecache: fix data corruption when reloading the target +- dm verity fec: fix hash block number in verity_fec_decode +- sched/fair: Fix CFS bandwidth hrtimer expiry type +- scsi: libfc: Fix array index out of bound exception +- scsi: scsi_dh_alua: Fix signedness bug in alua_rtpg() +- net: bridge: multicast: fix PIM hello router port marking race +- NFSv4/pNFS: Don't call _nfs4_pnfs_v3_ds_connect multiple times +- virtio_net: move tx vq operation under tx queue lock +- x86/fpu: Limit xstate copy size in xstateregs_set() +- nfs: fix acl memory leak of posix_acl_create() +- NFSv4: Initialise connection to the server in nfs4_alloc_client() +- PCI/sysfs: Fix dsm_label_utf16s_to_utf8s() buffer overrun +- virtio_console: Assure used length from device is limited +- virtio_net: Fix error handling in virtnet_restore() +- virtio-blk: Fix memory leak among suspend/resume procedure +- NFS: nfs_find_open_context() may only select open files +- lib/decompress_unlz4.c: correctly handle zero-padding around initrds. +- i2c: core: Disable client irq on reboot/shutdown +- scsi: qedi: Fix null ref during abort handling +- scsi: iscsi: Fix shost->max_id use +- scsi: iscsi: Add iscsi_cls_conn refcount helpers +- scsi: scsi_dh_alua: Check for negative result value +- tracing: Do not reference char * as a string in histograms +- scsi: core: Fix bad pointer dereference when ehandler kthread is invalid +- seq_buf: Fix overflow in seq_buf_putmem_hex() +- ipmi/watchdog: Stop watchdog timer when the current action is 'none' +- net: ip: avoid OOM kills with large UDP sends over loopback +- vsock: notify server to shutdown when client has pending signal +- xfrm: Fix error reporting in xfrm_state_construct. +- virtio_net: Remove BUG() to avoid machine dead +- dm space maps: don't reset space map allocation cursor when committing +- ipv6: use prandom_u32() for ID generation +- mm/huge_memory.c: don't discard hugepage if other processes are mapping it +- vfio/pci: Handle concurrent vma faults +- vfio-pci: Use io_remap_pfn_range() for PCI IO memory +- writeback: fix obtain a reference to a freeing memcg css +- ipv6: fix out-of-bound access in ip6_parse_tlv() +- bpf: Do not change gso_size during bpf_skb_change_proto() +- ipv6: exthdrs: do not blindly use init_net +- net/ipv4: swap flow ports when validating source +- vxlan: add missing rcu_read_lock() in neigh_reduce() +- pkt_sched: sch_qfq: fix qfq_change_class() error path +- netfilter: nft_tproxy: restrict support to TCP and UDP transport protocols +- netfilter: nft_osf: check for TCP packet before further processing +- netfilter: nft_exthdr: check for IPv6 packet before further processing +- netlabel: Fix memory leak in netlbl_mgmt_add_common +- ACPI: sysfs: Fix a buffer overrun problem with description_show() +- evm: fix writing /evm overflow +- lib: vsprintf: Fix handling of number field widths in vsscanf +- ACPI: processor idle: Fix up C-state latency if not ordered +- fuse: check connected before queueing on fpq->io +- evm: Refuse EVM_ALLOW_METADATA_WRITES only if an HMAC key is loaded +- evm: Execute evm_inode_init_security() only when an HMAC key is loaded +- seq_buf: Make trace_seq_putmem_hex() support data longer than 8 +- ext4: use ext4_grp_locked_error in mb_find_extent +- ext4: fix avefreec in find_group_orlov +- ext4: remove check for zero nr_to_scan in ext4_es_scan() +- ext4: correct the cache_nr in tracepoint ext4_es_shrink_exit +- ext4: return error code when ext4_fill_flex_info() fails +- ext4: fix kernel infoleak via ext4_extent_header +- iov_iter_fault_in_readable() should do nothing in xarray case +- scsi: core: Retry I/O for Notify (Enable Spinup) Required error +- kthread: prevent deadlock when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync() +- kthread_worker: split code for canceling the delayed work timer +- scsi: sr: Return appropriate error code when disk is ejected +- mm, futex: fix shared futex pgoff on shmem huge page +- mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk() +- mm/thp: fix page_vma_mapped_walk() if THP mapped by ptes +- mm: page_vma_mapped_walk(): get vma_address_end() earlier +- mm: page_vma_mapped_walk(): use goto instead of while (1) +- mm: page_vma_mapped_walk(): add a level of indentation +- mm: page_vma_mapped_walk(): crossing page table boundary +- mm: page_vma_mapped_walk(): prettify PVMW_MIGRATION block +- mm: page_vma_mapped_walk(): use pmde for *pvmw->pmd +- mm: page_vma_mapped_walk(): settle PageHuge on entry +- mm: page_vma_mapped_walk(): use page for pvmw->page +- mm: thp: replace DEBUG_VM BUG with VM_WARN when unmap fails for split +- mm/thp: unmap_mapping_page() to fix THP truncate_cleanup_page() +- mm/thp: fix page_address_in_vma() on file THP tails +- mm/thp: fix vma_address() if virtual address below file offset +- mm/thp: try_to_unmap() use TTU_SYNC for safe splitting +- mm/thp: make is_huge_zero_pmd() safe and quicker +- mm/thp: fix __split_huge_pmd_locked() on shmem migration entry +- mm/rmap: use page_not_mapped in try_to_unmap() +- mm/rmap: remove unneeded semicolon in page_not_mapped() +- mm: add VM_WARN_ON_ONCE_PAGE() macro +- sctp: add param size validation for SCTP_PARAM_SET_PRIMARY +- sctp: validate chunk size in __rcv_asconf_lookup +- stop_machine: Avoid potential race behaviour +- KVM: PPC: Book3S: Fix H_RTAS rets buffer overflow +- can: raw: fix raw_rcv panic for sock UAF +- mm/page_isolation: do not isolate the max order page +- mm/zswap: fix passing zero to 'PTR_ERR' warning +- mm/page_alloc: speed up the iteration of max_order +- mm: hugetlb: fix type of delta parameter and related local variables in gather_surplus_pages() +- mm/vmalloc.c:__vmalloc_area_node(): avoid 32-bit overflow +- sctp: add size validation when walking chunks +- sctp: validate from_addr_param return +- jbd2: fix kabi broken in struct journal_s +- ext4: inline jbd2_journal_[un]register_shrinker() +- jbd2: export jbd2_journal_[un]register_shrinker() +- fs: remove bdev_try_to_free_page callback +- ext4: remove bdev_try_to_free_page() callback +- jbd2: simplify journal_clean_one_cp_list() +- jbd2,ext4: add a shrinker to release checkpointed buffers +- jbd2: remove redundant buffer io error checks +- jbd2: don't abort the journal when freeing buffers +- jbd2: ensure abort the journal if detect IO error when writing original buffer back +- jbd2: remove the out label in __jbd2_journal_remove_checkpoint() +- mm: vmscan: use a new flag to indicate shrinker is registered +- Revert "jbd2: remove the out label in __jbd2_journal_remove_checkpoint()" +- Revert "jbd2: ensure abort the journal if detect IO error when writing original buffer back" +- Revert "jbd2: fix kabi broken in struct journal_s" +- Revert "jbd2: don't abort the journal when freeing buffers" +- mm/vmscan: add drop_caches_loop_limit to break loop in drop_slab_node +- mm/vmscan: fix infinite loop in drop_slab_node +- userswap: add a kernel parameter to enable userswap +- userfaultfd: fix BUG_ON() in userfaultfd_release() +- kprobes: Warn if the kprobe is reregistered +- Revert "kretprobe: check re-registration of the same kretprobe earlier" + +* Tue Jul 27 2021 Cheng Jian - 4.19.90-2107.5.0.0098 +- mm: vmalloc: prevent use after free in _vm_unmap_aliases +- PCI/sysfs: Take reference on device to be removed +- seq_file: disallow extremely large seq buffer allocations +- ARM: footbridge: remove personal server platform +- mm: slab: fix kmem_cache_create failed when sysfs node not destroyed +- ARM: ensure the signal page contains defined contents +- nvme-pci: use atomic bitops to mark a queue enabled +- nvme: check the PRINFO bit before deciding the host buffer length +- nvme: fix compat address handling in several ioctls +- nvme-core: make implicit seed truncation explicit +- nvme-core: don't use NVME_NSID_ALL for command effects and supported log +- nvme-pci: fix NULL req in completion handler +- nvme-pci: cancel nvme device request before disabling +- nvme: copy MTFA field from identify controller +- nvme-pci: Unblock reset_work on IO failure +- nvme-pci: Don't disable on timeout in reset state +- nvme-pci: Fix controller freeze wait disabling +- block: error out if blk_get_queue() failed in blk_init_rl() + +* Tue Jul 20 2021 Cheng Jian - 4.19.90-2107.4.0.0097 +- Revert "smp: Fix smp_call_function_single_async prototype" +- Revert "vt: Fix character height handling with VT_RESIZEX" +- block: only call sched requeue_request() for scheduled requests +- KVM: arm: replace WARN_ON with pr_warn for UNKNOWN type +- net/packet: annotate accesses to po->ifindex +- net/packet: annotate accesses to po->bind +- inet: annotate date races around sk->sk_txhash +- ping: Check return value of function 'ping_queue_rcv_skb' +- net: ethtool: clear heap allocations for ethtool function +- x86/fpu: Reset state for all signal restore failures +- inet: use bigger hash table for IP ID generation +- net: bridge: fix vlan tunnel dst refcnt when egressing +- net: bridge: fix vlan tunnel dst null pointer dereference +- tracing: Do no increment trace_clock_global() by one +- tracing: Do not stop recording comms if the trace file is being read +- tracing: Do not stop recording cmdlines when tracing is off +- icmp: don't send out ICMP messages with a source address of 0.0.0.0 +- net/af_unix: fix a data-race in unix_dgram_sendmsg / unix_release_sock +- net: ipv4: fix memory leak in ip_mc_add1_src +- net: make get_net_ns return error if NET_NS is disabled +- net: add documentation to socket.c +- sch_cake: Fix out of bounds when parsing TCP options and header +- netfilter: synproxy: Fix out of bounds when parsing TCP options +- rtnetlink: Fix regression in bridge VLAN configuration +- udp: fix race between close() and udp_abort() +- net: ipv4: fix memory leak in netlbl_cipsov4_add_std +- fib: Return the correct errno code +- net: Return the correct errno code +- rtnetlink: Fix missing error code in rtnl_bridge_notify() +- net: ipconfig: Don't override command-line hostnames or domains +- nvme-loop: check for NVME_LOOP_Q_LIVE in nvme_loop_destroy_admin_queue() +- nvme-loop: clear NVME_LOOP_Q_LIVE when nvme_loop_configure_admin_queue() fails +- nvme-loop: reset queue count to 1 in nvme_loop_destroy_io_queues() +- scsi: target: core: Fix warning on realtime kernels +- proc: only require mm_struct for writing +- tracing: Correct the length check which causes memory corruption +- ftrace: Do not blindly read the ip address in ftrace_bug() +- scsi: core: Only put parent device if host state differs from SHOST_CREATED +- scsi: core: Put .shost_dev in failure path if host state changes to RUNNING +- scsi: core: Fix error handling of scsi_host_alloc() +- NFSv4: nfs4_proc_set_acl needs to restore NFS_CAP_UIDGID_NOMAP on error. +- NFS: Fix use-after-free in nfs4_init_client() +- NFS: Fix a potential NULL dereference in nfs_get_client() +- sched/fair: Make sure to update tg contrib for blocked load +- perf: Fix data race between pin_count increment/decrement +- cgroup1: don't allow ' +- wq: handle VM suspension in stall detection +- cgroup: disable controllers at parse time +- net: mdiobus: get rid of a BUG_ON() +- netlink: disable IRQs for netlink_lock_table() +- bonding: init notify_work earlier to avoid uninitialized use +- proc: Track /proc/$pid/attr/ opener mm_struct +- ACPI: EC: Look for ECDT EC after calling acpi_load_tables() +- ACPI: probe ECDT before loading AML tables regardless of module-level code flag +- mm, hugetlb: fix simple resv_huge_pages underflow on UFFDIO_COPY +- x86/apic: Mark _all_ legacy interrupts when IO/APIC is missing +- pid: take a reference when initializing `cad_pid` +- netfilter: nfnetlink_cthelper: hit EBUSY on updates if size mismatches +- ipvs: ignore IP_VS_SVC_F_HASHED flag when adding service +- vfio/platform: fix module_put call in error flow +- vfio/pci: zap_vma_ptes() needs MMU +- vfio/pci: Fix error return code in vfio_ecap_init() +- efi: cper: fix snprintf() use in cper_dimm_err_location() +- efi: Allow EFI_MEMORY_XP and EFI_MEMORY_RO both to be cleared +- lib/clear_user: ensure loop in __arch_clear_user cache-aligned +- scsi: core: Treat device offline as a failure +- Revert "scsi: check the whole result for reading write protect flag" +- ext4: fix WARN_ON_ONCE(!buffer_uptodate) after an error writing the superblock +- arm64/config: Set CONFIG_TXGBE=m by default +- make bch_btree_check() to be multiple threads +- Make compile successful when CONFIG_BCACHE is not set. +- Move only dirty data when gc runnning, in order to reducing write amplification. +- Add traffic policy for low cache available. +- igmp: Add ip_mc_list lock in ip_check_mc_rcu +- memcg: fix unsuitable null check after alloc memory +- cpuidle: fix a build error when compiling haltpoll into module +- config: enable KASAN and UBSAN by default +- KVM: x86: expose AVX512_BF16 feature to guest +- KVM: cpuid: remove has_leaf_count from struct kvm_cpuid_param +- KVM: cpuid: rename do_cpuid_1_ent +- KVM: cpuid: set struct kvm_cpuid_entry2 flags in do_cpuid_1_ent +- KVM: cpuid: extract do_cpuid_7_mask and support multiple subleafs +- KVM: cpuid: do_cpuid_ent works on a whole CPUID function +- ext4: fix possible UAF when remounting r/o a mmp-protected file system +- locks: Fix UBSAN undefined behaviour in flock64_to_posix_lock +- iomap: Mark read blocks uptodate in write_begin +- iomap: Clear page error before beginning a write +- iomap: move the zeroing case out of iomap_read_page_sync +- nbd: handle device refs for DESTROY_ON_DISCONNECT properly +- cifs: Fix leak when handling lease break for cached root fid +- mm/memcontrol.c: fix kasan slab-out-of-bounds in mem_cgroup_css_alloc +- module: limit enabling module.sig_enforce +- selftests/bpf: add test_spec_readahead_xfs_file to support specail async readahead +- mm: support special async readahead +- selftests/bpf: test_xfs_file support to clear FMODE_RANDOM +- xfs: let writable tracepoint enable to clear flag of f_mode +- jbd2: fix kabi broken in struct journal_s +- btrfs: allow btrfs_truncate_block() to fallback to nocow for data space reservation +- NFSv4.1: fix kabi for struct rpc_xprt +- usb: gadget: rndis: Fix info leak of rndis +- once: Fix panic when module unload +- SUNRPC: Should wake up the privileged task firstly. +- SUNRPC: Fix the batch tasks count wraparound. +- bpf: Fix leakage under speculation on mispredicted branches +- bpf: Do not mark insn as seen under speculative path verification +- bpf: Inherit expanded/patched seen count from old aux data +- bpf: Update selftests to reflect new error states +- bpf, test_verifier: switch bpf_get_stack's 0 s> r8 test +- bpf: Test_verifier, bpf_get_stack return value add <0 +- bpf: extend is_branch_taken to registers +- selftests/bpf: add selftest part of "bpf: improve verifier branch analysis" +- selftests/bpf: Test narrow loads with off > 0 in test_verifier +- bpf, selftests: Fix up some test_verifier cases for unprivileged +- bpf: fix up selftests after backports were fixed +- nvme-rdma: avoid request double completion for concurrent nvme_rdma_timeout +- binfmt: Move install_exec_creds after setup_new_exec to match binfmt_elf +- ext4: fix memory leak in ext4_fill_super +- RDMA/hns: Add support for addressing when hopnum is 0 +- RDMA/hns: Optimize hns buffer allocation flow +- RDMA/hns: Check if depth of qp is 0 before configure +- RDMA/hns: Optimize qp param setup flow +- RDMA/hns: Optimize qp buffer allocation flow +- RDMA/hns: Optimize qp destroy flow +- RDMA/hns: Remove asynchronic QP destroy +- RDMA/hns: Bugfix for posting a wqe with sge +- RDMA/hns: Delete unnecessary variable max_post +- RDMA/hns: optimize the duplicated code for qpc setting flow +- RDMA/hns: Prevent undefined behavior in hns_roce_set_user_sq_size() +- RDMA/umem: Add rdma_umem_for_each_dma_block() +- RDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks +- can: bcm: delay release of struct bcm_op after synchronize_rcu() +- etmem_scan: fix memleak in vm_idle_read +- x86/uprobes: Do not use prefixes.nbytes when looping over prefixes.bytes +- Revert "arm64: capabilities: Merge entries for ARM64_WORKAROUND_CLEAN_CACHE" +- Revert "arm64: capabilities: Merge duplicate Cavium erratum entries" +- Revert "arm64: capabilities: Merge duplicate entries for Qualcomm erratum 1003" +- net: hns3: update hns3 version to 1.9.40.24 +- net: hns3: remove redundant assignment to rx_index +- net: hns3: Fix potential null pointer defererence of null ae_dev +- net: hns3: not reset TQP in the DOWN while VF resetting +- net: hns3: remove redundant enum type HNAE3_RESTORE_CLIENT +- net: hns3: add stats logging when skb padding fails +- net: hns3: add tx send size handling for tso skb +- net: hns3: add handling for xmit skb with recursive fraglist +- net: hns3: use napi_consume_skb() when cleaning tx desc +- net: hns3: use writel() to optimize the barrier operation +- net: hns3: optimize the rx clean process +- net: hns3: optimize the tx clean process +- net: hns3: batch tx doorbell operation +- net: hns3: batch the page reference count updates +- net: hns3: streaming dma buffer sync between cpu and device +- net: hns3: rename buffer-related functions +- net: hns3: pointer type of buffer should be void +- net: hns3: remove unnecessary devm_kfree +- net: hns3: add suspend and resume pm_ops +- Revert "net: hns3: add suspend/resume function for hns3 driver" +- net: hns3: change flr_prepare/flr_done function names +- net: hns3: change hclge_reset_done function name +- net: hns3: configure promisc mode for VF asynchronously +- kabi: add kabi list for x86_64 +- kabi: update kabi list for arm64 +- hugetlbfs: hugetlb_fault_mutex_hash() cleanup +- ipv6: record frag_max_size in atomic fragments in input path +- scsi: libsas: Use _safe() loop in sas_resume_port() +- SMB3: incorrect file id in requests compounded with open +- NFSv4: Fix v4.0/v4.1 SEEK_DATA return -ENOTSUPP when set NFS_V4_2 config +- NFS: Don't corrupt the value of pg_bytes_written in nfs_do_recoalesce() +- NFS: fix an incorrect limit in filelayout_decode_layout() +- dm snapshot: properly fix a crash when an origin has no snapshots +- proc: Check /proc/$pid/attr/ writes against file opener +- iommu/vt-d: Fix sysfs leak in alloc_iommu() +- NFSv4: Fix a NULL pointer dereference in pnfs_mark_matching_lsegs_return() +- cifs: set server->cipher_type to AES-128-CCM for SMB3.0 +- tty: vt: always invoke vc->vc_sw->con_resize callback +- vt: Fix character height handling with VT_RESIZEX +- vgacon: Record video mode changes with VT_RESIZEX +- Revert "niu: fix missing checks of niu_pci_eeprom_read" +- Revert "qlcnic: Avoid potential NULL pointer dereference" +- Revert "rtlwifi: fix a potential NULL pointer dereference" +- Revert "media: rcar_drif: fix a memory disclosure" +- Revert "gdrom: fix a memory leak bug" +- Revert "scsi: ufs: fix a missing check of devm_reset_control_get" +- Revert "video: imsttfb: fix potential NULL pointer dereferences" +- Revert "hwmon: (lm80) fix a missing check of bus read in lm80 probe" +- Revert "leds: lp5523: fix a missing check of return value of lp55xx_read" +- Revert "net: stmicro: fix a missing check of clk_prepare" +- Revert "video: hgafb: fix potential NULL pointer dereference" +- dm snapshot: fix crash with transient storage and zero chunk size +- Revert "serial: mvebu-uart: Fix to avoid a potential NULL pointer dereference" +- Revert "rapidio: fix a NULL pointer dereference when create_workqueue() fails" +- Revert "ALSA: sb8: add a check for request_region" +- cifs: fix memory leak in smb2_copychunk_range +- locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal +- nvmet: seset ns->file when open fails +- ptrace: make ptrace() fail if the tracee changed its pid unexpectedly +- firmware: arm_scpi: Prevent the ternary sign expansion bug +- ipv6: remove extra dev_hold() for fallback tunnels +- ip6_tunnel: sit: proper dev_{hold|put} in ndo_[un]init methods +- sit: proper dev_{hold|put} in ndo_[un]init methods +- ip6_gre: proper dev_{hold|put} in ndo_[un]init methods +- block: reexpand iov_iter after read/write +- scsi: target: tcmu: Return from tcmu_handle_completions() if cmd_id not found +- ACPI / hotplug / PCI: Fix reference count leak in enable_slot() +- nvme: do not try to reconfigure APST when the controller is not live +- netfilter: conntrack: Make global sysctls readonly in non-init netns +- kobject_uevent: remove warning in init_uevent_argv() +- blk-mq: Swap two calls in blk_mq_exit_queue() +- userfaultfd: release page in error path to avoid BUG_ON +- netfilter: nftables: avoid overflows in nft_hash_buckets() +- kernel: kexec_file: fix error return code of kexec_calculate_store_digests() +- sched/fair: Fix unfairness caused by missing load decay +- netfilter: nfnetlink_osf: Fix a missing skb_header_pointer() NULL check +- net: fix nla_strcmp to handle more then one trailing null character +- ksm: fix potential missing rmap_item for stable_node +- mm/hugeltb: handle the error case in hugetlb_fix_reserve_counts() +- khugepaged: fix wrong result value for trace_mm_collapse_huge_page_isolate() +- netfilter: xt_SECMARK: add new revision to fix structure layout +- sctp: fix a SCTP_MIB_CURRESTAB leak in sctp_sf_do_dupcook_b +- sctp: do asoc update earlier in sctp_sf_do_dupcook_a +- NFSv4.2 fix handling of sr_eof in SEEK's reply +- pNFS/flexfiles: fix incorrect size check in decode_nfs_fh() +- NFS: Deal correctly with attribute generation counter overflow +- NFSv4.2: Always flush out writes in nfs42_proc_fallocate() +- PCI: Release OF node in pci_scan_device()'s error path +- ethtool: ioctl: Fix out-of-bounds warning in store_link_ksettings_for_user() +- sctp: Fix out-of-bounds warning in sctp_process_asconf_param() +- cuse: prevent clone +- ip6_vti: proper dev_{hold|put} in ndo_[un]init methods +- tpm: fix error return code in tpm2_get_cc_attrs_tbl() +- sctp: delay auto_asconf init until binding the first addr +- Revert "net/sctp: fix race condition in sctp_destroy_sock" +- smp: Fix smp_call_function_single_async prototype +- net: Only allow init netns to set default tcp cong to a restricted algo +- mm/memory-failure: unnecessary amount of unmapping +- mm/sparse: add the missing sparse_buffer_fini() in error branch +- drivers/block/null_blk/main: Fix a double free in null_init. +- sched/debug: Fix cgroup_path[] serialization +- x86/events/amd/iommu: Fix sysfs type mismatch +- vfio/mdev: Do not allow a mdev_type to have a NULL parent pointer +- ata: libahci_platform: fix IRQ check +- x86/kprobes: Fix to check non boostable prefixes correctly +- ACPI: CPPC: Replace cppc_attr with kobj_attribute +- irqchip/gic-v3: Fix OF_BAD_ADDR error handling +- x86/microcode: Check for offline CPUs before requesting new microcode +- ovl: fix missing revert_creds() on error path +- x86/cpu: Initialize MSR_TSC_AUX if RDTSCP *or* RDPID is supported +- md: Fix missing unused status line of /proc/mdstat +- md: md_open returns -EBUSY when entering racing area +- md: factor out a mddev_find_locked helper from mddev_find +- md: split mddev_find +- md-cluster: fix use-after-free issue when removing rdev +- md/bitmap: wait for external bitmap writes to complete during tear down +- dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails +- dm space map common: fix division bug in sm_ll_find_free_block() +- dm persistent data: packed struct should have an aligned() attribute too +- tracing: Restructure trace_clock_global() to never block +- tracing: Map all PIDs to command lines +- tty: fix memory leak in vc_deallocate +- ext4: fix error code in ext4_commit_super +- posix-timers: Preserve return value in clock_adjtime32() +- Revert 337f13046ff0 ("futex: Allow FUTEX_CLOCK_REALTIME with FUTEX_WAIT op") +- dm raid: fix inconclusive reshape layout on fast raid4/5/6 table reload sequences +- md/raid1: properly indicate failure when ending a failed write request +- NFSv4: Don't discard segments marked for return in _pnfs_return_layout() +- NFS: Don't discard pNFS layout segments that are marked for return +- ACPI: GTDT: Don't corrupt interrupt mappings on watchdow probe failure +- arm64/vdso: Discard .note.gnu.property sections in vDSO +- perf/arm_pmu_platform: Fix error handling +- genirq/matrix: Prevent allocation counter corruption +- crypto: api - check for ERR pointers in crypto_destroy_tfm() +- cifs: Return correct error code from smb2_get_enc_key +- ftrace: Handle commands when closing set_ftrace_filter file +- ACPI/IORT: Fix 'Number of IDs' handling in iort_id_map() +- ext4: do not use extent after put_bh +- modpost: explain why we can't use strsep +- modpost: remove use of non-standard strsep() in HOSTCC code +- modpost: use read_text_file() and get_line() for reading text files +- modpost: add read_text_file() and get_line() helpers +- arm64: capabilities: Merge duplicate entries for Qualcomm erratum 1003 +- arm64: capabilities: Merge duplicate Cavium erratum entries +- arm64: capabilities: Merge entries for ARM64_WORKAROUND_CLEAN_CACHE +- net: phy: ensure phylib state machine is stopped after calling phy_stop +- net: linkwatch: add check for netdevice being present to linkwatch_do_dev +- net: phy: call state machine synchronously in phy_stop +- of: fix kmemleak crash caused by imbalance in early memory reservation +- random: fix soft lockup when trying to read from an uninitialized blocking pool +- random: only read from /dev/random after its pool has received 128 bits +- block: check queue's limits.discard_granularity in __blkdev_issue_discard() +- block: loop: set discard granularity and alignment for block device backed loop +- posix-cpu-timers: Stop disabling timers on mt-exec +- kprobes: Fix compiler warning for !CONFIG_KPROBES_ON_FTRACE +- perf top: Fix stdio interface input handling with glibc 2.28+ +- iommu/vt-d: Fix mm reference leak +- iommu/dma: Fix for dereferencing before null checking +- srcu: Apply *_ONCE() to ->srcu_last_gp_end +- arm64: Kconfig: select HAVE_FUTEX_CMPXCHG +- kill kernfs_pin_sb() +- mm, thp: fix defrag setting if newline is not used +- nfsd: Clone should commit src file metadata too +- nfsd: Ensure CLONE persists data and metadata changes to the target file +- x86/sysfb: Fix check for bad VRAM size +- x86/timer: Force PIT initialization when !X86_FEATURE_ARAT +- x86/timer: Don't skip PIT setup when APIC is disabled or in legacy mode +- x86/timer: Skip PIT initialization on modern chipsets +- x86/apic: Rename 'lapic_timer_frequency' to 'lapic_timer_period' +- iommu/vt-d: Handle PCI bridge RMRR device scopes in intel_iommu_get_resv_regions +- iommu/vt-d: Handle RMRR with PCI bridge device scopes +- iommu/vt-d: Introduce is_downstream_to_pci_bridge helper +- crypto: x86 - remove SHA multibuffer routines and mcryptd +- iommu/vt-d: Duplicate iommu_resv_region objects per device list +- memcg: fix kabi broken when memory cgroup enhance +- mm: memcontrol: fix NULL-ptr deref in percpu stats flush +- mm: memcg: get number of pages on the LRU list in memcgroup base on lru_zone_size +- mm: memcontrol: fix percpu vmstats and vmevents flush +- mm, memcg: partially revert "mm/memcontrol.c: keep local VM counters in sync with the hierarchical ones" +- mm/memcontrol.c: keep local VM counters in sync with the hierarchical ones +- mm: memcontrol: flush percpu vmevents before releasing memcg +- mm: memcontrol: flush percpu vmstats before releasing memcg +- mm/memcontrol: fix wrong statistics in memory.stat +- mm: memcontrol: don't batch updates of local VM stats and events +- mm: memcontrol: fix NUMA round-robin reclaim at intermediate level +- mm: memcontrol: fix recursive statistics correctness & scalabilty +- mm: memcontrol: move stat/event counting functions out-of-line +- mm: memcontrol: make cgroup stats and events query API explicitly local +- mm: memcontrol: quarantine the mem_cgroup_[node_]nr_lru_pages() API +- mm, memcg: rename ambiguously named memory.stat counters and functions +- mm/memcontrol.c: fix memory.stat item ordering +- mm: memcontrol: expose THP events on a per-memcg basis +- mm: memcontrol: track LRU counts in the vmstats array +- mm: memcontrol: push down mem_cgroup_nr_lru_pages() +- mm: memcontrol: push down mem_cgroup_node_nr_lru_pages() +- mm: workingset: don't drop refault information prematurely +- mm: memcontrol: replace zone summing with lruvec_page_state() +- mm: memcontrol: replace node summing with memcg_page_state() +- mm, oom: add oom victim's memcg to the oom context information +- mm/oom_kill.c: fix uninitialized oc->constraint +- mm, oom: reorganize the oom report in dump_header +- memcg: update the child's qos_level synchronously in memcg_qos_write() +- memcg: Add static key for memcg priority +- memcg: fix kabi broken when enable CONFIG_MEMCG_QOS +- memcg: enable CONFIG_MEMCG_QOS by default +- memcg: support priority for oom +- scsi: core: Fix failure handling of scsi_add_host_with_dma() +- fuse: fix the ->direct_IO() treatment of iov_iter +- bdev: Do not return EBUSY if bdev discard races with write +- block: mark flush request as IDLE when it is really finished +- blk-mq: mark flush request as IDLE in flush_end_io() +- vhost_net: avoid tx queue stuck when sendmsg fails +- iommu/vt-d: Add support for ACPI device use physical, node as pci device to establish identity mapping +- io_uring: NULL files dereference by SQPOLL +- vgacon: remove software scrollback support +- block: dio: ensure the memory order between bi_private and bi_css +- ext4: fix memory leak in ext4_fill_super +- RDMA/ucma: Rework ucma_migrate_id() to avoid races with destroy +- RDMA/ucma: Add missing locking around rdma_leave_multicast() +- RDMA/ucma: Fix locking for ctx->events_reported +- RDMA/ucma: Put a lock around every call to the rdma_cm layer +- mm/memory-failure: make sure wait for page writeback in memory_failure +- can: bcm: fix infoleak in struct bcm_msg_head +- blk-wbt: make sure throttle is enabled properly +- blk-wbt: introduce a new disable state to prevent false positive by rwb_enabled() +- ext4: stop return ENOSPC from ext4_issue_zeroout +- dm btree remove: assign new_root only when removal succeeds +- block: call bdput() to avoid memleak +- scsi: remove unused kobj map for sd devie to avoid memleak +- tools build: Check if gettid() is available before providing helper +- tools build feature: Check if eventfd() is available +- tools build feature: Check if get_current_dir_name() is available +- perf tools: Use %define api.pure full instead of %pure-parser +- bpf: move new add member to the end of the struct bpf_prog_aux + +* Thu Jul 08 2021 Senlin Xia - 4.19.90-2106.3.0.0096 +- add buildrequire: perl-devel for with_perf + +* Thu Jun 17 2021 Cheng Jian - 4.19.90-2106.3.0.0095 +- cpuidle: fix container_of err in cpuidle_device and cpuidle_driver + +* Wed Jun 16 2021 Cheng Jian - 4.19.90-2106.2.0.0094 +- sched/membarrier: fix NULL poiner in membarrier_global_expedited +- writeback: don't warn on an unregistered BDI in __mark_inode_dirty + +* Tue Jun 15 2021 Cheng Jian - 4.19.90-2106.1.0.0093 +- fs/buffer.c: add checking buffer head stat before clear +- Bluetooth: SMP: Fail if remote and local public keys are identical +- Bluetooth: use correct lock to prevent UAF of hdev object +- Bluetooth: fix the erroneous flush_work() order +- iomap: Make sure iomap_end is called after iomap_begin +- x86/kvm: Add "nopvspin" parameter to disable PV spinlocks +- scsi: libsas: add lun number check in .slave_alloc callback +- nfc: fix NULL ptr dereference in llcp_sock_getname() after failed connect +- USB:ehci:fix Kunpeng920 ehci hardware problem +- nvme: don't warn on block content change effects +- block: recalculate segment count for multi-segment discards correctly +- nbd: Fix NULL pointer in flush_workqueue +- Bluetooth: Fix slab-out-of-bounds read in hci_extended_inquiry_result_evt() +- HID: make arrays usage and value to be the same +- ath10k: Validate first subframe of A-MSDU before processing the list +- mac80211: extend protection against mixed key and fragment cache attacks +- mac80211: do not accept/forward invalid EAPOL frames +- mac80211: prevent attacks on TKIP/WEP as well +- mac80211: check defrag PN against current frame +- mac80211: add fragment cache to sta_info +- mac80211: drop A-MSDUs on old ciphers +- cfg80211: mitigate A-MSDU aggregation attacks +- mac80211: properly handle A-MSDUs that start with an RFC 1042 header +- mac80211: prevent mixed key and fragment cache attacks +- mac80211: assure all fragments are encrypted +- mac80211: mark station unauthorized before key removal +- block: avoid creating invalid symlink file for patitions +- block: take bd_mutex around delete_partitions in del_gendisk +- NFSv4: Fix second deadlock in nfs4_evict_inode() +- NFSv4: Fix deadlock between nfs4_evict_inode() and nfs4_opendata_get_inode() +- NFSv4.1: fix handling of backchannel binding in BIND_CONN_TO_SESSION +- NFS: Don't gratuitously clear the inode cache when lookup failed +- NFS: Don't revalidate the directory permissions on a lookup failure +- NFS: nfs_delegation_find_inode_server must first reference the superblock +- nfs4: strengthen error check to avoid unexpected result +- NFS: Fix interrupted slots by sending a solo SEQUENCE operation +- NFS: Ensure we time out if a delegreturn does not complete +- NFSv4.0: nfs4_do_fsinfo() should not do implicit lease renewals +- NFS: Use kmemdup_nul() in nfs_readdir_make_qstr() +- NFSv3: FIx bug when using chacl and chmod to change acl +- NFSv4.x: Handle bad/dead sessions correctly in nfs41_sequence_process() +- NFSv4.1: Only reap expired delegations +- NFSv4.1: Fix open stateid recovery +- NFSv4.1: Don't process the sequence op more than once. +- NFS: Ensure NFS writeback allocations don't recurse back into NFS. +- nfs_remount(): don't leak, don't ignore LSM options quietly +- UACCE backport from mainline +- crypto: hisilicon-Cap block size at 2^31 +- crypto: hisilicon-hpre add req check when callback +- crypto: hisilicon- count send_ref when sending bd +- crypto: hisilicon-enhancement of qm DFX +- crypto: hisilicon-memory management optimization +- net: hns3: update hns3 version to 1.9.38.12 +- net: hns3: add match_id to check mailbox response from PF to VF +- net: hns3: fix possible mismatches resp of mailbox +- net: hns3: fix the logic for clearing resp_msg +- net: hns3: fix queue id check error when configure flow director rule by ethtool +- net: hns3: add check for HNS3_NIC_STATE_INITED before net open +- net: hns3: add waiting time before cmdq memory is released +- net: hns3: disable firmware compatible features when uninstall PF +- net: hns3: fix change RSS 'hfunc' ineffective issue +- net: hns3: fix inconsistent vf id print +- net: hns3: remove redundant variable initialization +- net: hns3: replace the tab before the left brace with one space +- net: hns3: fix hns3_cae_pfc_storm.h missing header guard problem +- net: hns3: modify an error type configuration +- net: hns3: put off calling register_netdev() until client initialize complete +- net: hns3: replace disable_irq by IRQ_NOAUTOEN flag +- net: hns3: update rss indirection table after setup tc +- net: hns3: don't change tc mqprio configuration when client is unregistered +- net: hns3: remove redundant client_setup_tc handle +- arm64/mpam: Fix use-after-free in mkdir_resctrl_prepare() + +* Sat Jun 05 2021 Cheng Jian - 4.19.90-2105.9.0.0092 +- selftests/bpf: add test_xfs_file.c and test_set_xfs_file.c +- bpf: add bpf_probe_read_str into bpf_helpers.h +- xfs: add writable tracepoint for xfs file buffer read +- readahead: introduce FMODE_WILLNEED to read first 2MB of file + +* Fri Jun 04 2021 Cheng Jian - 4.19.90-2105.8.0.0091 +- tools: libbpf: fix compiler error +- bpf: fix kabi for struct bpf_prog_aux and struct bpf_raw_event_map +- tools: bpftool: add raw_tracepoint_writable prog type to header +- tools: sync bpf.h +- bpf: add writable context for raw tracepoints +- x86/tsc: Respect tsc command line paraemeter for clocksource_tsc_early + +* Tue Jun 01 2021 Cheng Jian - 4.19.90-2105.6.0.0090 +- cpuidle: fix kabi broken in cpuidle_device and cpuidle_driver +- config: set default value of haltpoll +- ARM: cpuidle: Add support for cpuidle-haltpoll driver for ARM +- arm64: Add some definitions of kvm_para* +- cpuidle-haltpoll: Use arch_cpu_idle() to replace default_idle() +- arm64: Optimize ttwu IPI +- config: enable CONFIG_CPU_IDLE_GOV_HALTPOLL and CONFIG_HALTPOLL_CPUIDLE default +- KVM: polling: add architecture backend to disable polling +- cpuidle-haltpoll: Fix small typo +- cpuidle: haltpoll: allow force loading on hosts without the REALTIME hint +- cpuidle-haltpoll: Enable kvm guest polling when dedicated physical CPUs are available +- cpuidle-haltpoll: do not set an owner to allow modunload +- cpuidle-haltpoll: return -ENODEV on modinit failure +- cpuidle-haltpoll: vcpu hotplug support +- cpuidle-haltpoll: set haltpoll as preferred governor +- cpuidle: allow governor switch on cpuidle_register_driver() +- cpuidle: governor: Add new governors to cpuidle_governors again +- cpuidle: Add cpuidle.governor= command line parameter +- cpuidle-haltpoll: disable host side polling when kvm virtualized +- kvm: x86: add host poll control msrs +- cpuidle: add haltpoll governor +- governors: unify last_state_idx +- cpuidle: use first valid target residency as poll time +- cpuidle: header file stubs must be "static inline" +- cpuidle: add poll_limit_ns to cpuidle_device structure +- add cpuidle-haltpoll driver +- cpuidle: poll_state: Fix default time limit +- cpuidle: poll_state: Disregard disable idle states +- cpuidle: poll_state: Revise loop termination condition +- cpuidle: menu: Do not update last_state_idx in menu_select() +- bpf: No need to simulate speculative domain for immediates +- bpf: Fix mask direction swap upon off reg sign change +- bpf: Wrap aux data inside bpf_sanitize_info container + +* Tue Jun 01 2021 Cheng Jian - 4.19.90-2105.5.0.0089 +- ata: ahci: Disable SXS for Hisilicon Kunpeng920 +- fuse: don't ignore errors from fuse_writepages_fill() +- NFS: finish_automount() requires us to hold 2 refs to the mount record +- NFS: If nfs_mountpoint_expiry_timeout < 0, do not expire submounts +- NFS: remove unused check for negative dentry +- NFSv3: use nfs_add_or_obtain() to create and reference inodes +- NFS: Refactor nfs_instantiate() for dentry referencing callers +- sysfs: Remove address alignment constraint in sysfs_emit{_at} +- Revert "mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two)" +- Revert "mm, sl[ou]b: improve memory accounting" +- Revert "mm: memcontrol: fix slub memory accounting" +- io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers +- arm/ras: Report ARM processor information to userspace +- fuse: update attr_version counter on fuse_notify_inval_inode() +- alinux: random: speed up the initialization of module +- net: mac802154: Fix general protection fault +- cipso,calipso: resolve a number of problems with the DOI refcounts +- Bluetooth: verify AMP hci_chan before amp_destroy +- net/nfc: fix use-after-free llcp_sock_bind/connect +- x86: Select HARDIRQS_SW_RESEND on x86 +- x86/apic/vector: Force interupt handler invocation to irq context + +* Wed May 26 2021 Cheng Jian - 4.19.90-2105.4.0.0088 +- tools arch x86: Sync asm/cpufeatures.h with the with the kernel +- cpufreq: intel_pstate: Also use CPPC nominal_perf for base_frequency +- ACPI / CPPC: Fix guaranteed performance handling +- perf vendor events: Add JSON metrics for Cascadelake server +- perf vendor events: Add stepping in CPUID string for x86 +- cpufreq: intel_pstate: Fix compilation for !CONFIG_ACPI +- cpufreq: intel_pstate: Add base_frequency attribute +- ACPI / CPPC: Add support for guaranteed performance +- EDAC, skx: Fix randconfig builds in a better way +- EDAC, skx: Fix randconfig builds +- EDAC, skx_edac: Add address translation for non-volatile DIMMs +- ACPI/ADXL: Add address translation interface using an ACPI DSM +- x86/mce: Add macros for the corrected error count bit field +- x86/mce: Use BIT_ULL(x) for bit mask definitions +- x86/cpufeatures: Enumerate the new AVX512 BFLOAT16 instructions +- tools/testing/selftests/exec: fix link error +- NFSv4.1: Don't rebind to the same source port when reconnecting to the server +- genirq: Sanitize state handling in check_irq_resend() +- genirq: Add return value to check_irq_resend() +- irqchip/gic-v2, v3: Prevent SW resends entirely +- irqchip/git-v3-its: Implement irq_retrigger callback for device-triggered LPIs +- irqchip/gic-v2, v3: Implement irq_chip->irq_retrigger() +- genirq: Walk the irq_data hierarchy when resending an interrupt +- genirq: Add protection against unsafe usage of generic_handle_irq() + +* Mon May 24 2021 Cheng Jian - 4.19.90-2105.3.0.0087 +- jbd2: don't abort the journal when freeing buffers +- jbd2: ensure abort the journal if detect IO error when writing original buffer back +- jbd2: remove the out label in __jbd2_journal_remove_checkpoint() +- x86/unwind/orc: Remove boot-time ORC unwind tables sorting +- scripts/sorttable: Implement build-time ORC unwind table sorting +- scripts/sorttable: Rename 'sortextable' to 'sorttable' +- scripts/sortextable: Refactor the do_func() function +- scripts/sortextable: Remove dead code +- scripts/sortextable: Clean up the code to meet the kernel coding style better +- scripts/sortextable: Rewrite error/success handling +- treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 378 +- ext4: Fix bug on in ext4_es_cache_extent as ext4_split_extent_at failed +- Revert "ext4: Fix bug on in ext4_es_cache_extent as ext4_split_extent_at failed" +- nfs4.0: Refetch lease_time after clientid update +- nfs4: Rename nfs41_setup_state_renewal +- nfs4: Make nfs4_proc_get_lease_time available for nfs4.0 +- nfs: Fix copy-and-paste error in debug message +- ext4: cleanup in-core orphan list if ext4_truncate() failed to get a transaction handle +- bluetooth: eliminate the potential race condition when removing the HCI controller +- mm: enhance variables check and sync for pin mem +- perf jit: Fix inaccurate DWARF line table +- perf jvmti: Remove redundant jitdump line table entries +- perf jvmti: Fix demangling Java symbols +- perf tests: Add test for the java demangler +- perf jvmti: Do not report error when missing debug information +- perf jvmti: Fix jitdump for methods without debug info +- bpf: Fix leakage of uninitialized bpf stack under speculation +- bpf: Fix masking negation logic upon negative dst register +- bcache: add readahead cache policy options via sysfs interface +- mm/page_alloc: fix managed_pages of zone is incorrect and out of bounds +- freezer: Add unsafe version of freezable_schedule_timeout_interruptible() for NFS +- NFS: Allow signal interruption of NFS4ERR_DELAYed operations +- SUNRPC: Make "no retrans timeout" soft tasks behave like softconn for timeouts +- SUNRPC: Don't let RPC_SOFTCONN tasks time out if the transport is connected +- ext4: fix check to prevent false positive report of incorrect used inodes +- livepatch/x86_64: Fix the deadlock when insmoding livepatch kernel module +- tools/testing/selftests: add self-test for verifying load alignment +- fs/binfmt_elf: use PT_LOAD p_align values for suitable start address +- ext4: introduce ext4_sb_bread_unmovable() to replace sb_bread_unmovable() +- ext4: use ext4_sb_bread() instead of sb_bread() +- ext4: introduce ext4_sb_breadahead_unmovable() to replace sb_breadahead_unmovable() +- ext4: use ext4_buffer_uptodate() in __ext4_get_inode_loc() +- ext4: use common helpers in all places reading metadata buffers +- ext4: introduce new metadata buffer read helpers +- ext4: treat buffers contining write errors as valid in ext4_sb_bread() +- bpf: Fix truncation handling for mod32 dst reg wrt zero +- bpf: Fix 32 bit src register truncation on div/mod +- arm64: bpf: implement jitting of JMP32 +- x86_64: bpf: implement jitting of JMP32 +- bpf: JIT blinds support JMP32 +- bpf: interpreter support for JMP32 +- tools: bpftool: teach cfg code about JMP32 +- bpf: disassembler support JMP32 +- bpf: verifier support JMP32 +- bpf: refactor verifier min/max code for condition jump +- bpf: allocate 0x06 to new eBPF instruction class JMP32 + +* Wed May 12 2021 Cheng Jian - 4.19.90-2105.2.0.0086 +- ovl: allow upperdir inside lowerdir +- ACPI: x86: Call acpi_boot_table_init() after acpi_table_upgrade() +- ACPI: tables: x86: Reserve memory occupied by ACPI tables +- x86/crash: Fix crash_setup_memmap_entries() out-of-bounds access +- locking/qrwlock: Fix ordering in queued_write_lock_slowpath() +- net: ip6_tunnel: Unregister catch-all devices +- netfilter: nft_limit: avoid possible divide error in nft_limit_init +- netfilter: conntrack: do not print icmpv6 as unknown via /proc +- scsi: libsas: Reset num_scatter if libata marks qc as NODATA +- arm64: alternatives: Move length validation in alternative_{insn, endif} +- arm64: fix inline asm in load_unaligned_zeropad() +- readdir: make sure to verify directory entry for legacy interfaces too +- neighbour: Disregard DEAD dst in neigh_update +- driver core: Fix locking bug in deferred_probe_timeout_work_func() +- netfilter: x_tables: fix compat match/target pad out-of-bound write +- workqueue: Move the position of debug_work_activate() in __queue_work() +- xfrm: interface: fix ipv4 pmtu check to honor ip header df +- net-ipv6: bugfix - raw & sctp - switch to ipv6_can_nonlocal_bind() +- net: ensure mac header is set in virtio_net_hdr_to_skb() +- fs: direct-io: fix missing sdio->boundary +- net: ipv6: check for validity before dereferencing cfg->fc_nlinfo.nlh +- cifs: Silently ignore unknown oplock break handle +- cifs: revalidate mapping when we open files for SMB1 POSIX +- scsi: target: pscsi: Clean up after failure in pscsi_map_sg() +- mm: fix race by making init_zero_pfn() early_initcall +- tracing: Fix stack trace event size +- PM: runtime: Fix ordering in pm_runtime_get_suppliers() +- PM: runtime: Fix race getting/putting suppliers at probe +- ext4: do not iput inode under running transaction in ext4_rename() +- locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling +- thermal/core: Add NULL pointer check before using cooling device stats +- scsi: st: Fix a use after free in st_open() +- vhost: Fix vhost_vq_reset() +- rpc: fix NULL dereference on kmalloc failure +- ext4: fix bh ref count on error paths +- ipv6: weaken the v4mapped source check +- tcp: relookup sock for RST+ACK packets handled by obsolete req sock +- nfs: we don't support removing system.nfs4_acl +- NFSv4.2: fix return value of _nfs4_get_security_label() +- nfs: fix PNFS_FLEXFILE_LAYOUT Kconfig default +- pNFS/NFSv4: Try to return invalid layout in pnfs_layout_process() +- pNFS/NFSv4: Fix a layout segment leak in pnfs_layout_process() +- NFSv4.2: condition READDIR's mask for security label based on LSM state +- NFSv4.2: support EXCHGID4_FLAG_SUPP_FENCE_OPS 4.2 EXCHANGE_ID flag +- NFS: fix nfs_path in case of a rename retry +- NFSv4.1 handle ERR_DELAY error reclaiming locking state on delegation recall +- NFS: Don't return layout segments that are in use +- NFS: Don't move layouts to plh_return_segs list while in use +- SUNRPC reverting d03727b248d0 ("NFSv4 fix CLOSE not waiting for direct IO compeletion") +- NFSv4 fix CLOSE not waiting for direct IO compeletion +- NFSv4.1 fix rpc_call_done assignment for BIND_CONN_TO_SESSION +- nfs: Fix potential posix_acl refcnt leak in nfs3_set_acl +- NFSv4/pnfs: Return valid stateids in nfs_layout_find_inode_by_stateid() +- NFSv4.1 make cachethis=no for writes +- NFS/pnfs: Fix pnfs_generic_prepare_to_resend_writes() +- NFS/pnfs: Bulk destroy of layouts needs to be safe w.r.t. umount +- cgroup/files: support boot parameter to control if disable files cgroup +- efi: Fix a race and a buffer overflow while reading efivars via sysfs +- RDMA/hns: Allocate one more recv SGE for HIP08 +- mm: memcontrol: fix slub memory accounting +- mm, sl[ou]b: improve memory accounting +- mm: fix numa stats for thp migration +- mm/vmscan: count layzfree pages and fix nr_isolated_* mismatch +- SUNRPC: Close a race with transport setup and module put +- sunrpc: Change the place of endtime in struct krb5_ctx +- bpf: Tighten speculative pointer arithmetic mask +- bpf: Move sanitize_val_alu out of op switch +- bpf: Refactor and streamline bounds check into helper +- bpf: Improve verifier error messages for users +- bpf: Rework ptr_limit into alu_limit and add common error path +- bpf: Ensure off_reg has no mixed signed bounds for all types +- bpf: Move off_reg into sanitize_ptr_alu +- bpf: Add sanity check for upper ptr_limit +- bpf: Simplify alu_limit masking for pointer arithmetic + +* Tue May 11 2021 Cheng Jian - 4.19.90-2104.26.0.0085 +- add kabi list for aarch64 and x86_64 + +* Sat May 08 2021 Cheng Jian - 4.19.90-2104.26.0.0084 +- pid: fix pid recover method kabi change +- config: enable kernel hotupgrade features by default +- kexec: Add quick kexec support for kernel +- arm64: smp: Add support for cpu park +- pid: add pid reserve method for checkpoint and restore +- mm: add pin memory method for checkpoint add restore +- Revert "sched: Introduce qos scheduler for co-location" +- Revert "sched: Throttle qos cfs_rq when current cpu is running online task" +- Revert "sched: Enable qos scheduler config" +- Revert "memcg: support priority for oom" +- Revert "memcg: enable CONFIG_MEMCG_QOS by default" +- Revert "memcg: fix kabi broken when enable CONFIG_MEMCG_QOS" +- f2fs: fix to avoid out-of-bounds memory access +- ext4: Reduce ext4 timestamp warnings +- livepatch: Restoring code segment permissions after stop_machine completed +- livepatch: Delete redundant variable 'flag' +- memcg: fix kabi broken when enable CONFIG_MEMCG_QOS +- memcg: enable CONFIG_MEMCG_QOS by default +- memcg: support priority for oom +- sched: Enable qos scheduler config +- sched: Throttle qos cfs_rq when current cpu is running online task +- sched: Introduce qos scheduler for co-location +- ipv6: route: convert comma to semicolon +- ipv6/route: Add a missing check on proc_dointvec +- netfilter: xtables: avoid BUG_ON +- SUNRPC: Test whether the task is queued before grabbing the queue spinlocks +- SUNRPC: If there is no reply expected, bail early from call_decode +- SUNRPC: Fix backchannel latency metrics +- sunrpc: convert to time64_t for expiry +- sunrpc: Fix potential leaks in sunrpc_cache_unhash() +- SUNRPC: Skip zero-refcount transports +- SUNRPC: Fix buffer handling of GSS MIC without slack +- SUNRPC: Don't allow compiler optimisation of svc_xprt_release_slot() +- SUNRPC/nfs: Fix return value for nfs4_callback_compound() +- net/sunrpc: return 0 on attempt to write to "transports" +- net/sunrpc: Fix return value for sysctl sunrpc.transports +- sunrpc: raise kernel RPC channel buffer size +- sunrpc: add missing newline when printing parameter 'pool_mode' by sysfs +- xprtrdma: Fix trace point use-after-free race +- SUNRPC: Fix backchannel RPC soft lockups +- SUNRPC/cache: Fix unsafe traverse caused double-free in cache_purge +- nfsd: export upcalls must not return ESTALE when mountd is down +- sunrpc/cache: handle missing listeners better. +- xprtrdma: Fix handling of RDMA_ERROR replies +- xprtrdma: Expose transport header errors +- sunrpc: destroy rpc_inode_cachep after unregister_filesystem +- xprtrdma: fix incorrect header size calculations +- nvme: fix ns removal hang when failing to revalidate due to a transient error +- kernel/cputime: do not update cputime when cpu offline +- perf/x86: Always store regs->ip in perf_callchain_kernel() +- perf/x86: Make perf callchains work without CONFIG_FRAME_POINTER +- irqchip/gic-v3: Do not enable irqs when handling spurious interrups +- config: enable CONFIG_HW_RANDOM_HISI_V2 by default +- hwrng: add data_mode to support rand data with post process +- hwrng: add HiSilicon TRNG driver + +* Sun Apr 25 2021 Cheng Jian - 4.19.90-2104.21.0.0083 +- bcache: Rewrite patch to delay to invalidate cache data +- nfc: Avoid endless loops caused by repeated llcp_sock_connect() +- nfc: fix memory leak in llcp_sock_connect() +- nfc: fix refcount leak in llcp_sock_connect() +- nfc: fix refcount leak in llcp_sock_bind() + +* Thu Apr 22 2021 Cheng Jian - 4.19.90-2104.19.0.0082 +- KVM: arm64: Support the vCPU preemption check +- KVM: arm64: Add interface to support vCPU preempted check +- KVM: arm64: Support pvsched preempted via shared structure +- KVM: arm64: Implement PV_SCHED_FEATURES call +- KVM: arm64: Document PV-sched interface +- KVM: Check preempted_in_kernel for involuntary preemption +- KVM: Boost vCPUs that are delivering interrupts +- arm64/spinlock: fix a -Wunused-function warning +- locking/osq: Use optimized spinning loop for arm64 +- arm/arm64: Make use of the SMCCC 1.1 wrapper +- arm/arm64: Provide a wrapper for SMCCC 1.1 calls +- KVM: Implement kvm_put_guest() +- KVM: arm/arm64: Factor out hypercall handling from PSCI code + +* Thu Apr 22 2021 Cheng Jian - 4.19.90-2104.18.0.0081 +- bcache: Add a sample of userspace prefetch client +- bcache: Delay to invalidate cache data in writearound write +- bcache: inflight prefetch requests block overlapped normal requests +- bcache: provide a switch to bypass all IO requests +- bcache: add a framework to perform prefetch + +* Thu Apr 22 2021 Cheng Jian - 4.19.90-2104.17.0.0080 +- net/hinic: Fix null pointer dereference in hinic_physical_port_id +- kvm: debugfs: Export x86 kvm exits to vcpu_stat +- ext4: fix time overflow +- ext4: drop legacy pre-1970 encoding workaround +- fuse: fix live lock in fuse_iget() +- fuse: fix bad inode +- net/sctp: fix race condition in sctp_destroy_sock +- config: set config hip08 prefetch default value +- ext4: do not set SB_ACTIVE in ext4_orphan_cleanup() +- RDMA/hns: add eq and cq time cfg compatibility support. +- nvme: fix incorrect behavior when BLKROSET is called by the user +- nvme-fc: fix error loop in create_hw_io_queues +- nvme-fc: Fix wrong return value in __nvme_fc_init_request() +- nvme-multipath: fix deadlock between ana_work and scan_work +- nvme: fix deadlock caused by ANA update wrong locking +- nvme-multipath: Fix memory leak with ana_log_buf +- nvme-fc: fix module unloads while lports still pending +- ipmi: remve duplicate code in __ipmi_bmc_register() +- ipmi_si_intf: Fix race in timer shutdown handling +- ipmi_ssif: fix unexpected driver unregister warning +- ipmi_si: fix unexpected driver unregister warning +- ipmi:ssif: Only unregister the platform driver if it was registered +- ipmi: Make ipmi_interfaces_srcu variable static +- ipmi: Fix return value when a message is truncated +- ipmi: Free the address list on module cleanup +- net: hns3: clear VF down state bit before request link status +- config: disable config ARM64_BOOTPARAM_HOTPLUG_CPU0 by default +- config: disable CONFIG_SATA_ZHAOXIN by default + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.16.0.0079 +- config/arm64: fix kabi by disable CONFIG_NVME_MULTIPATH + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.15.0.0078 +- config/x86: enable SHRINK_PAGECACHE +- arm64: Add config switch and kernel parameter for CPU0 hotplug + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.14.0.0077 +- arm64: errata: enable HISILICON_ERRATUM_HIP08_RU_PREFETCH +- arm64: errata: fix kabi changed for cpu_errata +- arm64: errata: add option to disable cache readunique prefetch on 1620 + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.13.0.0076 +- firewire: nosy: Fix a use-after-free bug in nosy_ioctl() + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.12.0.0075 +- iommu/arm-smmu-v3: Reduce contention during command-queue insertion +- iommu/arm-smmu-v3: Operate directly on low-level queue where possible +- iommu/arm-smmu-v3: Move low-level queue fields out of arm_smmu_queue +- iommu/arm-smmu-v3: Drop unused 'q' argument from Q_OVF macro +- iommu/arm-smmu-v3: Separate s/w and h/w views of prod and cons indexes +- iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops +- iommu/io-pgtable-arm: Remove redundant call to io_pgtable_tlb_sync() +- iommu/arm-smmu-v3: Increase maximum size of queues +- iommu/io-pgtable: Replace IO_PGTABLE_QUIRK_NO_DMA with specific flag +- iommu: Allow io-pgtable to be used outside of drivers/iommu/ +- iommu: Fix flush_tlb_all typo +- iommu: Change tlb_range_add to iotlb_range_add and tlb_sync to iotlb_sync + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.11.0.0074 +- io_uring: order refnode recycling +- io_uring: get an active ref_node from files_data +- io_uring: fix racy req->flags modification +- io_uring: defer file table grabbing request cleanup for locked requests +- io_uring: batch put_task_struct() +- tasks: add put_task_struct_many() +- io_uring: fix missing io_queue_linked_timeout() +- io_uring: deduplicate io_grab_files() calls +- io_uring: don't do opcode prep twice +- io_uring: don't open-code recv kbuf managment +- io_uring: extract io_put_kbuf() helper +- io_uring: simplify file ref tracking in submission state +- io_uring: move BUFFER_SELECT check into *recv[msg] +- io_uring: free selected-bufs if error'ed +- io_uring: don't forget cflags in io_recv() +- io_uring: remove extra checks in send/recv +- io_uring: indent left {send,recv}[msg]() +- io-wq: update hash bits +- io_uring: get rid of atomic FAA for cq_timeouts +- io_uring: consolidate *_check_overflow accounting +- io_uring: de-unionise io_kiocb +- io_uring: follow **iovec idiom in io_import_iovec +- io_uring: mark ->work uninitialised after cleanup +- io_uring/io-wq: move RLIMIT_FSIZE to io-wq +- io_uring: alloc ->io in io_req_defer_prep() +- io_uring: inline io_req_work_grab_env() +- io_uring: fix racy IOPOLL completions +- io_uring: always let io_iopoll_complete() complete polled io +- io_uring: don't recurse on tsk->sighand->siglock with signalfd +- io_uring: don't use poll handler if file can't be nonblocking read/written +- io_uring: fix linked deferred ->files cancellation +- io_uring: fix cancel of deferred reqs with ->files +- io_uring: flush timeouts that should already have expired +- io_uring: find and cancel head link async work on files exit +- io_uring: always plug for any number of IOs +- io_uring: fix recursive completion locking on oveflow flush +- io_uring: enable lookup of links holding inflight files +- io_uring: place cflags into completion data +- io_uring: remove sequence from io_kiocb +- io_uring: use non-intrusive list for defer +- io_uring: remove init for unused list +- io_uring: add req->timeout.list +- io_uring: use completion list for CQ overflow +- io_uring: use inflight_entry list for iopoll'ing +- io_uring: rename ctx->poll into ctx->iopoll +- io_uring: share completion list w/ per-op space +- io_uring: get rid of __req_need_defer() +- io_uring: only call kfree() for a non-zero pointer +- io_uring: fix a use after free in io_async_task_func() +- io_uring: remove nr_events arg from iopoll_check() +- io_uring: don't delay iopoll'ed req completion +- io_uring: fix lost cqe->flags +- io_uring: keep queue_sqe()'s fail path separately +- io_uring: fix mis-refcounting linked timeouts +- io_uring: use new io_req_task_work_add() helper throughout +- io_uring: abstract out task work running +- io_uring: do grab_env() just before punting +- io_uring: factor out grab_env() from defer_prep() +- io_uring: do init work in grab_env() +- io_uring: don't pass def into io_req_work_grab_env +- io_uring: fix function args for !CONFIG_NET +- io_uring: set @poll->file after @poll init +- io_uring: remove REQ_F_MUST_PUNT +- io_uring: remove setting REQ_F_MUST_PUNT in rw +- io_uring: optimise io_req_find_next() fast check +- io_uring: kill REQ_F_TIMEOUT_NOSEQ +- io_uring: kill REQ_F_TIMEOUT +- io_uring: replace find_next() out param with ret +- io_uring: fix missing io_grab_files() +- io_uring: don't mark link's head for_async +- io_uring: fix feeding io-wq with uninit reqs +- io_uring: fix punting req w/o grabbed env +- io_uring: fix req->work corruption +- io_uring: simplify io_async_task_func() +- io_uring: fix NULL mm in io_poll_task_func() +- io_uring: use task_work for links if possible +- io_uring: do task_work_run() during iopoll +- io_uring: clean up req->result setting by rw +- io_uring: cosmetic changes for batch free +- io_uring: batch-free linked requests as well +- io_uring: dismantle req early and remove need_iter +- io_uring: remove inflight batching in free_many() +- io_uring: fix refs underflow in io_iopoll_queue() +- io_uring: enable READ/WRITE to use deferred completions +- io_uring: pass in completion state to appropriate issue side handlers +- io_uring: pass down completion state on the issue side +- io_uring: add 'io_comp_state' to struct io_submit_state +- io_uring: provide generic io_req_complete() helper +- io_uring: add missing REQ_F_COMP_LOCKED for nested requests +- io_uring: clean up io_kill_linked_timeout() locking +- io_uring: deduplicate freeing linked timeouts +- io_uring: kill REQ_F_LINK_NEXT +- io_uring: fix stalled deferred requests +- io_uring: add IORING_OP_OPENAT2 for compatablity +- arm64: fix kabi with io_uring interface +- x86: fix kabi with io_uring interface +- io_uring: fix provide_buffers sign extension +- io_uring: ignore double poll add on the same waitqueue head +- io_uring: fix SQPOLL IORING_OP_CLOSE cancelation state +- io_uring: make ctx cancel on exit targeted to actual ctx +- io_uring: fix error path cleanup in io_sqe_files_register() +- io_uring: ensure open/openat2 name is cleaned on cancelation +- io_uring: sanitize double poll handling +- io_uring: fail poll arm on queue proc failure +- io_uring: allow non-fixed files with SQPOLL +- io_uring: ensure consistent view of original task ->mm from SQPOLL +- io_uring: stash ctx task reference for SQPOLL +- io_uring: don't miscount pinned memory +- io_uring: don't burn CPU for iopoll on exit +- io_uring: fix imbalanced sqo_mm accounting +- io_uring: return locked and pinned page accounting +- io_uring: fix missing ->mm on exit +- io_uring: fix NULL-mm for linked reqs +- io_uring: account locked memory before potential error case +- io_uring: don't touch 'ctx' after installing file descriptor +- io_uring: remove dead 'ctx' argument and move forward declaration +- io_uring: fix recvmsg setup with compat buf-select +- io_uring: fix shift-out-of-bounds when round up cq size +- io_uring: round-up cq size before comparing with rounded sq size +- io_uring: use type appropriate io_kiocb handler for double poll +- io_uring: fix double poll mask init +- io_uring: Fix sizeof() mismatch +- io_uring: keep a pointer ref_node in file_data +- io_uring: refactor *files_register()'s error paths +- io_uring: clean file_data access in files_register +- io-wq: fix use-after-free in io_wq_worker_running +- io_uring: fix potential ABBA deadlock in ->show_fdinfo() +- io_uring: always delete double poll wait entry on match +- io-wq: fix hang after cancelling pending hashed work +- io_uring: fix racy overflow count reporting +- io_uring: partially inline io_iopoll_getevents() +- io_uring: briefly loose locks while reaping events +- io_uring: fix stopping iopoll'ing too early +- io_uring: fix potential use after free on fallback request free +- io_uring: set table->files[i] to NULL when io_sqe_file_register failed +- io_uring: fix removing the wrong file in __io_sqe_files_update() +- io_uring: fix IOPOLL -EAGAIN retries +- io_uring: clear req->result on IOPOLL re-issue +- io_uring: hold 'ctx' reference around task_work queue + execute +- io_uring: use TWA_SIGNAL for task_work uncondtionally +- io_uring: Fix NULL pointer dereference in loop_rw_iter() +- io_uring: clear IORING_SQ_NEED_WAKEUP after executing task works +- io_uring: add a helper for async rw iovec prep +- io_uring: simplify io_req_map_rw() +- io_uring: extract io_sendmsg_copy_hdr() +- io_uring: use more specific type in rcv/snd msg cp +- io_uring: rename sr->msg into umsg +- io_uring: fix sq array offset calculation +- io_uring: fix lockup in io_fail_links() +- io_uring: fix ->work corruption with poll_add +- io_uring: missed req_init_async() for IOSQE_ASYNC +- io_uring: always allow drain/link/hardlink/async sqe flags +- io_uring: ensure double poll additions work with both request types +- io_uring: fix recvmsg memory leak with buffer selection +- io_uring: fix not initialised work->flags +- io_uring: fix missing msg_name assignment +- io_uring: account user memory freed when exit has been queued +- io_uring: fix memleak in io_sqe_files_register() +- io_uring: fix memleak in __io_sqe_files_update() +- io_uring: export cq overflow status to userspace +- io_uring: fix regression with always ignoring signals in io_cqring_wait() +- io_uring: use signal based task_work running +- task_work: teach task_work_add() to do signal_wake_up() +- io_uring: fix current->mm NULL dereference on exit +- io_uring: fix hanging iopoll in case of -EAGAIN +- io_uring: fix io_sq_thread no schedule when busy +- io-wq: return next work from ->do_work() directly +- io-wq: compact io-wq flags numbers +- io_uring: separate reporting of ring pages from registered pages +- io_uring: report pinned memory usage +- io_uring: rename ctx->account_mem field +- io_uring: add wrappers for memory accounting +- io_uring: use EPOLLEXCLUSIVE flag to aoid thundering herd type behavior +- io_uring: change the poll type to be 32-bits +- io_uring: fix possible race condition against REQ_F_NEED_CLEANUP +- io_uring: reap poll completions while waiting for refs to drop on exit +- io_uring: acquire 'mm' for task_work for SQPOLL +- io_uring: add memory barrier to synchronize io_kiocb's result and iopoll_completed +- io_uring: don't fail links for EAGAIN error in IOPOLL mode +- io_uring: cancel by ->task not pid +- io_uring: lazy get task +- io_uring: batch cancel in io_uring_cancel_files() +- io_uring: cancel all task's requests on exit +- io-wq: add an option to cancel all matched reqs +- io-wq: reorder cancellation pending -> running +- io_uring: fix lazy work init +- io_uring: fix io_kiocb.flags modification race in IOPOLL mode +- io_uring: check file O_NONBLOCK state for accept +- io_uring: avoid unnecessary io_wq_work copy for fast poll feature +- io_uring: avoid whole io_wq_work copy for requests completed inline +- io_uring: allow O_NONBLOCK async retry +- io_wq: add per-wq work handler instead of per work +- io_uring: don't arm a timeout through work.func +- io_uring: remove custom ->func handlers +- io_uring: don't derive close state from ->func +- io_uring: use kvfree() in io_sqe_buffer_register() +- io_uring: validate the full range of provided buffers for access +- io_uring: re-set iov base/len for buffer select retry +- io_uring: move send/recv IOPOLL check into prep +- io_uring: fix {SQ,IO}POLL with unsupported opcodes +- io_uring: disallow close of ring itself +- io_uring: fix overflowed reqs cancellation +- io_uring: off timeouts based only on completions +- io_uring: move timeouts flushing to a helper +- statx: hide interfaces no longer used by io_uring +- io_uring: call statx directly +- statx: allow system call to be invoked from io_uring +- io_uring: add io_statx structure +- io_uring: get rid of manual punting in io_close +- io_uring: separate DRAIN flushing into a cold path +- io_uring: don't re-read sqe->off in timeout_prep() +- io_uring: simplify io_timeout locking +- io_uring: fix flush req->refs underflow +- io_uring: don't submit sqes when ctx->refs is dying +- io_uring: async task poll trigger cleanup +- io_uring: add tee(2) support +- splice: export do_tee() +- io_uring: don't repeat valid flag list +- io_uring: rename io_file_put() +- io_uring: remove req->needs_fixed_files +- io_uring: cleanup io_poll_remove_one() logic +- io_uring: file registration list and lock optimization +- io_uring: add IORING_CQ_EVENTFD_DISABLED to the CQ ring flags +- io_uring: add 'cq_flags' field for the CQ ring +- io_uring: allow POLL_ADD with double poll_wait() users +- io_uring: batch reap of dead file registrations +- io_uring: name sq thread and ref completions +- io_uring: remove duplicate semicolon at the end of line +- io_uring: remove obsolete 'state' parameter +- io_uring: remove 'fd is io_uring' from close path +- io_uring: reset -EBUSY error when io sq thread is waken up +- io_uring: don't add non-IO requests to iopoll pending list +- io_uring: don't use kiocb.private to store buf_index +- io_uring: cancel work if task_work_add() fails +- io_uring: remove dead check in io_splice() +- io_uring: fix FORCE_ASYNC req preparation +- io_uring: don't prepare DRAIN reqs twice +- io_uring: initialize ctx->sqo_wait earlier +- io_uring: polled fixed file must go through free iteration +- io_uring: fix zero len do_splice() +- io_uring: don't use 'fd' for openat/openat2/statx +- splice: move f_mode checks to do_{splice,tee}() +- io_uring: handle -EFAULT properly in io_uring_setup() +- io_uring: fix mismatched finish_wait() calls in io_uring_cancel_files() +- io_uring: punt splice async because of inode mutex +- io_uring: check non-sync defer_list carefully +- io_uring: fix extra put in sync_file_range() +- io_uring: use cond_resched() in io_ring_ctx_wait_and_kill() +- io_uring: use proper references for fallback_req locking +- io_uring: only force async punt if poll based retry can't handle it +- io_uring: enable poll retry for any file with ->read_iter / ->write_iter +- io_uring: statx must grab the file table for valid fd +- io_uring: only restore req->work for req that needs do completion +- io_uring: don't count rqs failed after current one +- io_uring: kill already cached timeout.seq_offset +- io_uring: fix cached_sq_head in io_timeout() +- io_uring: only post events in io_poll_remove_all() if we completed some +- io_uring: io_async_task_func() should check and honor cancelation +- io_uring: check for need to re-wait in polled async handling +- io_uring: correct O_NONBLOCK check for splice punt +- io_uring: restore req->work when canceling poll request +- io_uring: move all request init code in one place +- io_uring: keep all sqe->flags in req->flags +- io_uring: early submission req fail code +- io_uring: track mm through current->mm +- io_uring: remove obsolete @mm_fault +- io_uring: punt final io_ring_ctx wait-and-free to workqueue +- io_uring: fix fs cleanup on cqe overflow +- io_uring: don't read user-shared sqe flags twice +- io_uring: remove req init from io_get_req() +- io_uring: alloc req only after getting sqe +- io_uring: simplify io_get_sqring +- io_uring: do not always copy iovec in io_req_map_rw() +- io_uring: ensure openat sets O_LARGEFILE if needed +- io_uring: initialize fixed_file_data lock +- io_uring: remove redundant variable pointer nxt and io_wq_assign_next call +- io_uring: fix ctx refcounting in io_submit_sqes() +- io_uring: process requests completed with -EAGAIN on poll list +- io_uring: remove bogus RLIMIT_NOFILE check in file registration +- io_uring: use io-wq manager as backup task if task is exiting +- io_uring: grab task reference for poll requests +- io_uring: retry poll if we got woken with non-matching mask +- io_uring: add missing finish_wait() in io_sq_thread() +- io_uring: refactor file register/unregister/update handling +- io_uring: cleanup io_alloc_async_ctx() +- io_uring: fix missing 'return' in comment +- io-wq: handle hashed writes in chains +- io-uring: drop 'free_pfile' in struct io_file_put +- io-uring: drop completion when removing file +- io_uring: Fix ->data corruption on re-enqueue +- io-wq: close cancel gap for hashed linked work +- io_uring: make spdxcheck.py happy +- io_uring: honor original task RLIMIT_FSIZE +- io-wq: hash dependent work +- io-wq: split hashing and enqueueing +- io-wq: don't resched if there is no work +- io-wq: remove duplicated cancel code +- io_uring: fix truncated async read/readv and write/writev retry +- io_uring: dual license io_uring.h uapi header +- io_uring: io_uring_enter(2) don't poll while SETUP_IOPOLL|SETUP_SQPOLL enabled +- io_uring: Fix unused function warnings +- io_uring: add end-of-bits marker and build time verify it +- io_uring: provide means of removing buffers +- io_uring: add IOSQE_BUFFER_SELECT support for IORING_OP_RECVMSG +- net: abstract out normal and compat msghdr import +- io_uring: add IOSQE_BUFFER_SELECT support for IORING_OP_READV +- io_uring: support buffer selection for OP_READ and OP_RECV +- io_uring: add IORING_OP_PROVIDE_BUFFERS +- io_uring: buffer registration infrastructure +- io_uring/io-wq: forward submission ref to async +- io-wq: optimise out *next_work() double lock +- io-wq: optimise locking in io_worker_handle_work() +- io-wq: shuffle io_worker_handle_work() code +- io_uring: get next work with submission ref drop +- io_uring: remove @nxt from handlers +- io_uring: make submission ref putting consistent +- io_uring: clean up io_close +- io_uring: Ensure mask is initialized in io_arm_poll_handler +- io_uring: remove io_prep_next_work() +- io_uring: remove extra nxt check after punt +- io_uring: use poll driven retry for files that support it +- io_uring: mark requests that we can do poll async in io_op_defs +- io_uring: add per-task callback handler +- io_uring: store io_kiocb in wait->private +- task_work_run: don't take ->pi_lock unconditionally +- io-wq: use BIT for ulong hash +- io_uring: remove IO_WQ_WORK_CB +- io-wq: remove unused IO_WQ_WORK_HAS_MM +- io_uring: extract kmsg copy helper +- io_uring: clean io_poll_complete +- io_uring: add splice(2) support +- io_uring: add interface for getting files +- splice: make do_splice public +- io_uring: remove req->in_async +- io_uring: don't do full *prep_worker() from io-wq +- io_uring: don't call work.func from sync ctx +- io_uring: io_accept() should hold on to submit reference on retry +- io_uring: consider any io_read/write -EAGAIN as final +- io_uring: make sure accept honor rlimit nofile +- io_uring: make sure openat/openat2 honor rlimit nofile +- io_uring: NULL-deref for IOSQE_{ASYNC,DRAIN} +- io_uring: ensure RCU callback ordering with rcu_barrier() +- io_uring: fix lockup with timeouts +- io_uring: free fixed_file_data after RCU grace period +- io-wq: remove io_wq_flush and IO_WQ_WORK_INTERNAL +- io-wq: fix IO_WQ_WORK_NO_CANCEL cancellation +- io_uring: fix 32-bit compatability with sendmsg/recvmsg +- io_uring: define and set show_fdinfo only if procfs is enabled +- io_uring: drop file set ref put/get on switch +- io_uring: import_single_range() returns 0/-ERROR +- io_uring: pick up link work on submit reference drop +- io-wq: ensure work->task_pid is cleared on init +- io-wq: remove spin-for-work optimization +- io_uring: fix poll_list race for SETUP_IOPOLL|SETUP_SQPOLL +- io_uring: fix personality idr leak +- io_uring: handle multiple personalities in link chains +- io_uring: fix __io_iopoll_check deadlock in io_sq_thread +- io_uring: prevent sq_thread from spinning when it should stop +- io_uring: fix use-after-free by io_cleanup_req() +- io_uring: remove unnecessary NULL checks +- io_uring: add missing io_req_cancelled() +- io_uring: prune request from overflow list on flush +- io-wq: don't call kXalloc_node() with non-online node +- io_uring: retain sockaddr_storage across send/recvmsg async punt +- io_uring: cancel pending async work if task exits +- io-wq: add io_wq_cancel_pid() to cancel based on a specific pid +- io-wq: make io_wqe_cancel_work() take a match handler +- io_uring: fix openat/statx's filename leak +- io_uring: fix double prep iovec leak +- io_uring: fix async close() with f_op->flush() +- io_uring: allow AT_FDCWD for non-file openat/openat2/statx +- io_uring: grab ->fs as part of async preparation +- io-wq: add support for inheriting ->fs +- io_uring: retry raw bdev writes if we hit -EOPNOTSUPP +- io_uring: add cleanup for openat()/statx() +- io_uring: fix iovec leaks +- io_uring: remove unused struct io_async_open +- io_uring: flush overflowed CQ events in the io_uring_poll() +- io_uring: statx/openat/openat2 don't support fixed files +- io_uring: fix deferred req iovec leak +- io_uring: fix 1-bit bitfields to be unsigned +- io_uring: get rid of delayed mm check +- io_uring: cleanup fixed file data table references +- io_uring: spin for sq thread to idle on shutdown +- io_uring: put the flag changing code in the same spot +- io_uring: iterate req cache backwards +- io_uring: punt even fadvise() WILLNEED to async context +- io_uring: fix sporadic double CQE entry for close +- io_uring: remove extra ->file check +- io_uring: don't map read/write iovec potentially twice +- io_uring: use the proper helpers for io_send/recv +- io_uring: prevent potential eventfd recursion on poll +- io_uring: add BUILD_BUG_ON() to assert the layout of struct io_uring_sqe +- io_uring: add ->show_fdinfo() for the io_uring file descriptor +- io_uring: add support for epoll_ctl(2) +- eventpoll: support non-blocking do_epoll_ctl() calls +- eventpoll: abstract out epoll_ctl() handler +- io_uring: fix linked command file table usage +- io_uring: support using a registered personality for commands +- io_uring: allow registering credentials +- io_uring: add io-wq workqueue sharing +- io-wq: allow grabbing existing io-wq +- io_uring/io-wq: don't use static creds/mm assignments +- io-wq: make the io_wq ref counted +- io_uring: fix refcounting with batched allocations at OOM +- io_uring: add comment for drain_next +- io_uring: don't attempt to copy iovec for READ/WRITE +- io_uring: honor IOSQE_ASYNC for linked reqs +- io_uring: prep req when do IOSQE_ASYNC +- io_uring: use labeled array init in io_op_defs +- io_uring: optimise sqe-to-req flags translation +- io_uring: remove REQ_F_IO_DRAINED +- io_uring: file switch work needs to get flushed on exit +- io_uring: hide uring_fd in ctx +- io_uring: remove extra check in __io_commit_cqring +- io_uring: optimise use of ctx->drain_next +- io_uring: add support for probing opcodes +- io_uring: account fixed file references correctly in batch +- io_uring: add opcode to issue trace event +- io_uring: remove 'fname' from io_open structure +- io_uring: enable option to only trigger eventfd for async completions +- io_uring: change io_ring_ctx bool fields into bit fields +- io_uring: file set registration should use interruptible waits +- io_uring: Remove unnecessary null check +- io_uring: add support for send(2) and recv(2) +- io_uring: remove extra io_wq_current_is_worker() +- io_uring: optimise commit_sqring() for common case +- io_uring: optimise head checks in io_get_sqring() +- io_uring: clamp to_submit in io_submit_sqes() +- io_uring: add support for IORING_SETUP_CLAMP +- io_uring: extend batch freeing to cover more cases +- io_uring: wrap multi-req freeing in struct req_batch +- io_uring: batch getting pcpu references +- pcpu_ref: add percpu_ref_tryget_many() +- io_uring: add IORING_OP_MADVISE +- mm: make do_madvise() available internally +- io_uring: add IORING_OP_FADVISE +- io_uring: allow use of offset == -1 to mean file position +- io_uring: add non-vectored read/write commands +- io_uring: improve poll completion performance +- io_uring: split overflow state into SQ and CQ side +- io_uring: add lookup table for various opcode needs +- io_uring: remove two unnecessary function declarations +- io_uring: move *queue_link_head() from common path +- io_uring: rename prev to head +- io_uring: add IOSQE_ASYNC +- io-wq: support concurrent non-blocking work +- io_uring: add support for IORING_OP_STATX +- fs: make two stat prep helpers available +- io_uring: avoid ring quiesce for fixed file set unregister and update +- io_uring: add support for IORING_OP_CLOSE +- io-wq: add support for uncancellable work +- percpu-refcount: Introduce percpu_ref_resurrect() +- percpu_ref: introduce PERCPU_REF_ALLOW_REINIT flag +- fs: make filename_lookup available externally +- fs: introduce __close_fd_get_file to support IORING_OP_CLOSE for io_uring +- io_uring: add support for IORING_OP_OPENAT +- fs: make build_open_flags() available internally +- io_uring: add support for fallocate() +- io_uring: don't cancel all work on process exit +- Revert "io_uring: only allow submit from owning task" +- io_uring: fix compat for IORING_REGISTER_FILES_UPDATE +- io_uring: only allow submit from owning task +- io_uring: ensure workqueue offload grabs ring mutex for poll list +- io_uring: clear req->result always before issuing a read/write request +- io_uring: be consistent in assigning next work from handler +- io-wq: cancel work if we fail getting a mm reference +- io_uring: don't setup async context for read/write fixed +- io_uring: remove punt of short reads to async context +- io-wq: add cond_resched() to worker thread +- io-wq: remove unused busy list from io_sqe +- io_uring: pass in 'sqe' to the prep handlers +- io_uring: standardize the prep methods +- io_uring: read 'count' for IORING_OP_TIMEOUT in prep handler +- io_uring: move all prep state for IORING_OP_{SEND,RECV}_MGS to prep handler +- io_uring: move all prep state for IORING_OP_CONNECT to prep handler +- io_uring: add and use struct io_rw for read/writes +- io_uring: use u64_to_user_ptr() consistently +- io_uring: io_wq_submit_work() should not touch req->rw +- io_uring: don't wait when under-submitting +- io_uring: warn about unhandled opcode +- io_uring: read opcode and user_data from SQE exactly once +- io_uring: make IORING_OP_TIMEOUT_REMOVE deferrable +- io_uring: make IORING_OP_CANCEL_ASYNC deferrable +- io_uring: make IORING_POLL_ADD and IORING_POLL_REMOVE deferrable +- io_uring: make HARDLINK imply LINK +- io_uring: any deferred command must have stable sqe data +- io_uring: remove 'sqe' parameter to the OP helpers that take it +- io_uring: fix pre-prepped issue with force_nonblock == true +- io-wq: re-add io_wq_current_is_worker() +- io_uring: fix sporadic -EFAULT from IORING_OP_RECVMSG +- io_uring: fix stale comment and a few typos +- io_uring: ensure we return -EINVAL on unknown opcode +- io_uring: add sockets to list of files that support non-blocking issue +- io_uring: only hash regular files for async work execution +- io_uring: run next sqe inline if possible +- io_uring: don't dynamically allocate poll data +- io_uring: deferred send/recvmsg should assign iov +- io_uring: sqthread should grab ctx->uring_lock for submissions +- io-wq: briefly spin for new work after finishing work +- io-wq: remove worker->wait waitqueue +- io_uring: allow unbreakable links +- io_uring: fix a typo in a comment +- io_uring: hook all linked requests via link_list +- io_uring: fix error handling in io_queue_link_head +- io_uring: use hash table for poll command lookups +- io-wq: clear node->next on list deletion +- io_uring: ensure deferred timeouts copy necessary data +- io_uring: allow IO_SQE_* flags on IORING_OP_TIMEOUT +- io_uring: handle connect -EINPROGRESS like -EAGAIN +- io_uring: remove io_wq_current_is_worker +- io_uring: remove parameter ctx of io_submit_state_start +- io_uring: mark us with IORING_FEAT_SUBMIT_STABLE +- io_uring: ensure async punted connect requests copy data +- io_uring: ensure async punted sendmsg/recvmsg requests copy data +- net: disallow ancillary data for __sys_{send,recv}msg_file() +- net: separate out the msghdr copy from ___sys_{send,recv}msg() +- io_uring: ensure async punted read/write requests copy iovec +- io_uring: add general async offload context +- io_uring: transform send/recvmsg() -ERESTARTSYS to -EINTR +- io_uring: use current task creds instead of allocating a new one +- io_uring: fix missing kmap() declaration on powerpc +- io_uring: add mapping support for NOMMU archs +- io_uring: make poll->wait dynamically allocated +- io-wq: shrink io_wq_work a bit +- io-wq: fix handling of NUMA node IDs +- io_uring: use kzalloc instead of kcalloc for single-element allocations +- io_uring: cleanup io_import_fixed() +- io_uring: inline struct sqe_submit +- io_uring: store timeout's sqe->off in proper place +- io_uring: remove superfluous check for sqe->off in io_accept() +- io_uring: async workers should inherit the user creds +- io-wq: have io_wq_create() take a 'data' argument +- io_uring: fix dead-hung for non-iter fixed rw +- io_uring: add support for IORING_OP_CONNECT +- net: add __sys_connect_file() helper +- io_uring: only return -EBUSY for submit on non-flushed backlog +- io_uring: only !null ptr to io_issue_sqe() +- io_uring: simplify io_req_link_next() +- io_uring: pass only !null to io_req_find_next() +- io_uring: remove io_free_req_find_next() +- io_uring: add likely/unlikely in io_get_sqring() +- io_uring: rename __io_submit_sqe() +- io_uring: improve trace_io_uring_defer() trace point +- io_uring: drain next sqe instead of shadowing +- io_uring: close lookup gap for dependent next work +- io_uring: allow finding next link independent of req reference count +- io_uring: io_allocate_scq_urings() should return a sane state +- io_uring: Always REQ_F_FREE_SQE for allocated sqe +- io_uring: io_fail_links() should only consider first linked timeout +- io_uring: Fix leaking linked timeouts +- io_uring: remove redundant check +- io_uring: break links for failed defer +- io-wq: remove extra space characters +- io-wq: wait for io_wq_create() to setup necessary workers +- io_uring: request cancellations should break links +- io_uring: correct poll cancel and linked timeout expiration completion +- io_uring: remove dead REQ_F_SEQ_PREV flag +- io_uring: fix sequencing issues with linked timeouts +- io_uring: make req->timeout be dynamically allocated +- io_uring: make io_double_put_req() use normal completion path +- io_uring: cleanup return values from the queueing functions +- io_uring: io_async_cancel() should pass in 'nxt' request pointer +- io_uring: make POLL_ADD/POLL_REMOVE scale better +- io-wq: remove now redundant struct io_wq_nulls_list +- io_uring: Fix getting file for non-fd opcodes +- io_uring: introduce req_need_defer() +- io_uring: clean up io_uring_cancel_files() +- io-wq: ensure free/busy list browsing see all items +- io_uring: ensure registered buffer import returns the IO length +- io-wq: ensure we have a stable view of ->cur_work for cancellations +- io_wq: add get/put_work handlers to io_wq_create() +- io_uring: Fix getting file for timeout +- io_uring: check for validity of ->rings in teardown +- io_uring: fix potential deadlock in io_poll_wake() +- io_uring: use correct "is IO worker" helper +- io_uring: make timeout sequence == 0 mean no sequence +- io_uring: fix -ENOENT issue with linked timer with short timeout +- io_uring: don't do flush cancel under inflight_lock +- io_uring: flag SQPOLL busy condition to userspace +- io_uring: make ASYNC_CANCEL work with poll and timeout +- io_uring: provide fallback request for OOM situations +- io_uring: convert accept4() -ERESTARTSYS into -EINTR +- io_uring: fix error clear of ->file_table in io_sqe_files_register() +- io_uring: separate the io_free_req and io_free_req_find_next interface +- io_uring: keep io_put_req only responsible for release and put req +- io_uring: remove passed in 'ctx' function parameter ctx if possible +- io_uring: reduce/pack size of io_ring_ctx +- io_uring: properly mark async work as bounded vs unbounded +- io-wq: add support for bounded vs unbunded work +- io-wq: io_wqe_run_queue() doesn't need to use list_empty_careful() +- io_uring: add support for backlogged CQ ring +- io_uring: pass in io_kiocb to fill/add CQ handlers +- io_uring: make io_cqring_events() take 'ctx' as argument +- io_uring: add support for linked SQE timeouts +- io_uring: abstract out io_async_cancel_one() helper +- io_uring: use inlined struct sqe_submit +- io_uring: Use submit info inlined into req +- io_uring: allocate io_kiocb upfront +- io_uring: io_queue_link*() right after submit +- io_uring: Merge io_submit_sqes and io_ring_submit +- io_uring: kill dead REQ_F_LINK_DONE flag +- io_uring: fixup a few spots where link failure isn't flagged +- io_uring: enable optimized link handling for IORING_OP_POLL_ADD +- io-wq: use proper nesting IRQ disabling spinlocks for cancel +- io_uring: add completion trace event +- io-wq: use kfree_rcu() to simplify the code +- io_uring: set -EINTR directly when a signal wakes up in io_cqring_wait +- io_uring: support for generic async request cancel +- io_uring: ensure we clear io_kiocb->result before each issue +- io_uring: io_wq_create() returns an error pointer, not NULL +- io_uring: fix race with canceling timeouts +- io_uring: support for larger fixed file sets +- io_uring: protect fixed file indexing with array_index_nospec() +- io_uring: add support for IORING_OP_ACCEPT +- net: add __sys_accept4_file() helper +- io_uring: io_uring: add support for async work inheriting files +- io_uring: replace workqueue usage with io-wq +- io-wq: small threadpool implementation for io_uring +- sched/core, workqueues: Distangle worker accounting from rq lock +- sched: Remove stale PF_MUTEX_TESTER bit +- io_uring: Fix mm_fault with READ/WRITE_FIXED +- io_uring: remove index from sqe_submit +- io_uring: add set of tracing events +- io_uring: add support for canceling timeout requests +- io_uring: add support for absolute timeouts +- io_uring: replace s->needs_lock with s->in_async +- io_uring: allow application controlled CQ ring size +- io_uring: add support for IORING_REGISTER_FILES_UPDATE +- io_uring: allow sparse fixed file sets +- io_uring: run dependent links inline if possible +- io_uring: don't touch ctx in setup after ring fd install +- io_uring: Fix leaked shadow_req +- io_uring: fix bad inflight accounting for SETUP_IOPOLL|SETUP_SQTHREAD +- io_uring: used cached copies of sq->dropped and cq->overflow +- io_uring: Fix race for sqes with userspace +- io_uring: Fix broken links with offloading +- io_uring: Fix corrupted user_data +- io_uring: correct timeout req sequence when inserting a new entry +- io_uring : correct timeout req sequence when waiting timeout +- io_uring: revert "io_uring: optimize submit_and_wait API" +- io_uring: fix logic error in io_timeout +- io_uring: fix up O_NONBLOCK handling for sockets +- io_uring: consider the overflow of sequence for timeout req +- io_uring: fix sequence logic for timeout requests +- io_uring: only flush workqueues on fileset removal +- io_uring: remove wait loop spurious wakeups +- io_uring: fix reversed nonblock flag for link submission +- io_uring: use __kernel_timespec in timeout ABI +- io_uring: make CQ ring wakeups be more efficient +- io_uring: compare cached_cq_tail with cq.head in_io_uring_poll +- io_uring: correctly handle non ->{read,write}_iter() file_operations +- io_uring: IORING_OP_TIMEOUT support +- io_uring: use cond_resched() in sqthread +- io_uring: fix potential crash issue due to io_get_req failure +- io_uring: ensure poll commands clear ->sqe +- io_uring: fix use-after-free of shadow_req +- io_uring: use kmemdup instead of kmalloc and memcpy +- io_uring: increase IORING_MAX_ENTRIES to 32K +- io_uring: make sqpoll wakeup possible with getevents +- io_uring: extend async work merging +- io_uring: limit parallelism of buffered writes +- io_uring: add io_queue_async_work() helper +- io_uring: optimize submit_and_wait API +- io_uring: add support for link with drain +- io_uring: fix wrong sequence setting logic +- io_uring: expose single mmap capability +- io_uring: allocate the two rings together +- io_uring: add need_resched() check in inner poll loop +- io_uring: don't enter poll loop if we have CQEs pending +- io_uring: fix potential hang with polled IO +- io_uring: fix an issue when IOSQE_IO_LINK is inserted into defer list +- io_uring: fix manual setup of iov_iter for fixed buffers +- io_uring: fix KASAN use after free in io_sq_wq_submit_work +- io_uring: ensure ->list is initialized for poll commands +- io_uring: track io length in async_list based on bytes +- io_uring: don't use iov_iter_advance() for fixed buffers +- io_uring: add a memory barrier before atomic_read +- io_uring: fix counter inc/dec mismatch in async_list +- io_uring: fix the sequence comparison in io_sequence_defer +- io_uring: fix io_sq_thread_stop running in front of io_sq_thread +- io_uring: add support for recvmsg() +- io_uring: add support for sendmsg() +- io_uring: add support for sqe links +- io_uring: punt short reads to async context +- uio: make import_iovec()/compat_import_iovec() return bytes on success +- io_uring: ensure req->file is cleared on allocation +- io_uring: fix memory leak of UNIX domain socket inode +- io_uring: Fix __io_uring_register() false success +- tools/io_uring: sync with liburing +- tools/io_uring: fix Makefile for pthread library link +- select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining() +- select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR +- signal: simplify set_user_sigmask/restore_user_sigmask +- signal: remove the wrong signal_pending() check in restore_user_sigmask() +- io_uring: use wait_event_interruptible for cq_wait conditional wait +- io_uring: adjust smp_rmb inside io_cqring_events +- io_uring: fix infinite wait in khread_park() on io_finish_async() +- io_uring: remove 'ev_flags' argument +- io_uring: fix failure to verify SQ_AFF cpu +- io_uring: fix race condition reading SQE data +- io_uring: use cpu_online() to check p->sq_thread_cpu instead of cpu_possible() +- io_uring: fix shadowed variable ret return code being not checked +- req->error only used for iopoll +- io_uring: add support for eventfd notifications +- io_uring: add support for IORING_OP_SYNC_FILE_RANGE +- io_uring: add support for marking commands as draining +- fs: add sync_file_range() helper +- io_uring: avoid page allocation warnings +- io_uring: drop req submit reference always in async punt +- io_uring: free allocated io_memory once +- io_uring: fix SQPOLL cpu validation +- io_uring: have submission side sqe errors post a cqe +- io_uring: remove unnecessary barrier after unsetting IORING_SQ_NEED_WAKEUP +- io_uring: remove unnecessary barrier after incrementing dropped counter +- io_uring: remove unnecessary barrier before reading SQ tail +- io_uring: remove unnecessary barrier after updating SQ head +- io_uring: remove unnecessary barrier before reading cq head +- io_uring: remove unnecessary barrier before wq_has_sleeper +- io_uring: fix notes on barriers +- io_uring: fix handling SQEs requesting NOWAIT +- io_uring: remove 'state' argument from io_{read,write} path +- io_uring: fix poll full SQ detection +- io_uring: fix race condition when sq threads goes sleeping +- io_uring: fix race condition reading SQ entries +- io_uring: fail io_uring_register(2) on a dying io_uring instance +- io_uring: fix CQ overflow condition +- io_uring: fix possible deadlock between io_uring_{enter,register} +- io_uring: drop io_file_put() 'file' argument +- io_uring: only test SQPOLL cpu after we've verified it +- io_uring: park SQPOLL thread if it's percpu +- io_uring: restrict IORING_SETUP_SQPOLL to root +- io_uring: fix double free in case of fileset regitration failure +- io_uring: offload write to async worker in case of -EAGAIN +- io_uring: fix big-endian compat signal mask handling +- io_uring: retry bulk slab allocs as single allocs +- io_uring: fix poll races +- io_uring: fix fget/fput handling +- io_uring: add prepped flag +- io_uring: make io_read/write return an integer +- io_uring: use regular request ref counts +- tools/io_uring: remove IOCQE_FLAG_CACHEHIT +- io_uring: add a few test tools +- io_uring: allow workqueue item to handle multiple buffered requests +- io_uring: add support for IORING_OP_POLL +- io_uring: add io_kiocb ref count +- io_uring: add submission polling +- io_uring: add file set registration +- net: split out functions related to registering inflight socket files +- io_uring: add support for pre-mapped user IO buffers +- io_uring: batch io_kiocb allocation +- io_uring: use fget/fput_many() for file references +- fs: add fget_many() and fput_many() +- io_uring: support for IO polling +- io_uring: add fsync support +- Add io_uring IO interface +- io_pgetevents: use __kernel_timespec +- pselect6: use __kernel_timespec +- ppoll: use __kernel_timespec +- signal: Add restore_user_sigmask() +- signal: Add set_user_sigmask() +- block: Initialize BIO I/O priority early +- block: prevent merging of requests with different priorities +- aio: Fix fallback I/O priority value +- block: Introduce get_current_ioprio() +- aio: Comment use of IOCB_FLAG_IOPRIO aio flag +- fs: fix kabi change since add iopoll +- fs: add an iopoll method to struct file_operations +- signal: Allow cifs and drbd to receive their terminating signals +- cifs: fix rmmod regression in cifs.ko caused by force_sig changes +- signal/cifs: Fix cifs_put_tcp_session to call send_sig instead of force_sig + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.10.0.0073 +- bpf, x86: Validate computation of branch displacements for x86-32 +- bpf, x86: Validate computation of branch displacements for x86-64 + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.9.0.0072 +- mm/vmalloc.c: fix percpu free VM area search criteria +- mm/vmalloc.c: avoid bogus -Wmaybe-uninitialized warning +- mm/vmap: add DEBUG_AUGMENT_LOWEST_MATCH_CHECK macro +- mm/vmap: add DEBUG_AUGMENT_PROPAGATE_CHECK macro +- mm/vmalloc.c: keep track of free blocks for vmap allocation + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.8.0.0071 +- config: Enable CONFIG_USERSWAP +- userswap: support userswap via userfaultfd +- userswap: add a new flag 'MAP_REPLACE' for mmap() +- mm, mempolicy: fix up gup usage in lookup_node +- mm/mempolicy: Allow lookup_node() to handle fatal signal +- mm/gup: Let __get_user_pages_locked() return -EINTR for fatal signal +- mm/gup: fix fixup_user_fault() on multiple retries +- mm/gup: allow VM_FAULT_RETRY for multiple times +- mm: allow VM_FAULT_RETRY for multiple times + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.7.0.0070 +- sched/fair: fix kabi broken due to adding fields in rq and sched_domain_shared +- sched/fair: fix try_steal compile error +- config: enable CONFIG_SCHED_STEAL by default +- sched/fair: introduce SCHED_STEAL +- disable stealing by default +- sched/fair: Provide idle search schedstats +- sched/fair: disable stealing if too many NUMA nodes +- sched/fair: Steal work from an overloaded CPU when CPU goes idle +- sched/fair: Provide can_migrate_task_llc +- sched/fair: Generalize the detach_task interface +- sched/fair: Hoist idle_stamp up from idle_balance +- sched/fair: Dynamically update cfs_overload_cpus +- sched/topology: Provide cfs_overload_cpus bitmap +- sched/topology: Provide hooks to allocate data shared per LLC +- sched: Provide sparsemask, a reduced contention bitmap + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.6.0.0069 +- sched/fair: fix kabi broken due to adding idle_h_nr_running in cfs_rq +- sched/fair: Make sched-idle CPU selection consistent throughout +- sched/fair: Optimize select_idle_cpu +- sched/fair: Fall back to sched-idle CPU if idle CPU isn't found +- sched/fair: Start tracking SCHED_IDLE tasks count in cfs_rq +- sched/core: Create task_has_idle_policy() helper + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.5.0.0068 +- ext4: add reclaim checks to xattr code +- locking/mutex: Fix non debug version of mutex_lock_io_nested() +- dm verity: add root hash pkcs#7 signature verification +- Revert "netfilter: x_tables: Update remaining dereference to RCU" +- netfilter: x_tables: Use correct memory barriers. +- Revert "netfilter: x_tables: Switch synchronization to RCU" +- arm64: kdump: update ppos when reading elfcorehdr +- netfilter: ctnetlink: fix dump of the expect mask attribute +- dm ioctl: fix out of bounds array access when no devices +- block: Suppress uevent for hidden device when removed +- NFS: Correct size calculation for create reply length +- cifs: Fix preauth hash corruption +- ext4: do not try to set xattr into ea_inode if value is empty +- kernel, fs: Introduce and use set_restart_fn() and arch_set_restart_data() +- nvme-rdma: fix possible hang when failing to set io queues +- sunrpc: fix refcount leak for rpc auth modules +- include/linux/sched/mm.h: use rcu_dereference in in_vfork() +- hrtimer: Update softirq_expires_next correctly after __hrtimer_get_next_event() +- scsi: target: core: Prevent underflow for service actions +- scsi: target: core: Add cmd length set before cmd complete +- PCI: Fix pci_register_io_range() memory leak +- Revert "mm, slub: consider rest of partial list if acquire_slab() fails" +- cifs: return proper error code in statfs(2) +- tcp: add sanity tests to TCP_QUEUE_SEQ +- tcp: annotate tp->write_seq lockless reads +- tcp: annotate tp->copied_seq lockless reads +- netfilter: x_tables: gpf inside xt_find_revision() +- net: Fix gro aggregation for udp encaps with zero csum +- dm table: fix zoned iterate_devices based device capability checks +- dm table: fix DAX iterate_devices based device capability checks +- dm table: fix iterate_devices based device capability checks +- dm bufio: subtract the number of initial sectors in dm_bufio_get_device_size +- swap: fix swapfile read/write offset +- mm/hugetlb.c: fix unnecessary address expansion of pmd sharing +- net: fix up truesize of cloned skb in skb_prepare_for_shift() +- xfs: Fix assert failure in xfs_setattr_size() +- arm64 module: set plt* section addresses to 0x0 +- hugetlb: fix update_and_free_page contig page struct assumption +- net: icmp: pass zeroed opts from icmp{,v6}_ndo_send before sending +- ipv6: silence compilation warning for non-IPV6 builds +- ipv6: icmp6: avoid indirect call for icmpv6_send() +- xfrm: interface: use icmp_ndo_send helper +- sunvnet: use icmp_ndo_send helper +- gtp: use icmp_ndo_send helper +- icmp: allow icmpv6_ndo_send to work with CONFIG_IPV6=n +- icmp: introduce helper for nat'd source address in network device context +- dm: fix deadlock when swapping to encrypted device +- printk: fix deadlock when kernel panic +- module: Ignore _GLOBAL_OFFSET_TABLE_ when warning for undefined symbols +- hugetlb: fix copy_huge_page_from_user contig page struct assumption +- x86: fix seq_file iteration for pat/memtype.c +- ACPI: property: Fix fwnode string properties matching +- blk-settings: align max_sectors on "logical_block_size" boundary +- mm/rmap: fix potential pte_unmap on an not mapped pte +- arm64: Add missing ISB after invalidating TLB in __primary_switch +- mm/hugetlb: fix potential double free in hugetlb_register_node() error path +- mm/memory.c: fix potential pte_unmap_unlock pte error +- ocfs2: fix a use after free on error +- tracepoint: Do not fail unregistering a probe due to memory failure +- isofs: release buffer head before return +- tcp: fix SO_RCVLOWAT related hangs under mem pressure +- random: fix the RNDRESEEDCRNG ioctl +- bfq: Avoid false bfq queue merging +- locking/static_key: Fix false positive warnings on concurrent dec/inc +- jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations +- KVM: fix memory leak in kvm_io_bus_unregister_dev() +- net: qrtr: fix a kernel-infoleak in qrtr_recvmsg() +- xen-blkback: don't leak persistent grants from xen_blkbk_map() +- KVM: SVM: Periodically schedule when unregistering regions on destroy +- gianfar: fix jumbo packets+napi+rx overrun crash +- usbip: fix stub_dev usbip_sockfd_store() races leading to gpf +- media: v4l: ioctl: Fix memory leak in video_usercopy +- block: only update parent bi_status when bio fail +- RDMA/hns: fix timer, gid_type, scc cfg +- block: respect queue limit of max discard segment +- block: Use non _rcu version of list functions for tag_set_list + +* Thu Apr 15 2021 Cheng Jian - 4.19.90-2104.4.0.0067 +- ext4: fix potential error in ext4_do_update_inode +- mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two) +- mm,hwpoison: return -EBUSY when migration fails +- config: Enable files cgroup on x86 +- ext4: Fix unreport netlink message to userspace when fs abort +- ext4: don't leak old mountpoint samples +- scsi: libiscsi: convert change of struct iscsi_conn to fix KABI +- scsi: libiscsi: Reset max/exp cmdsn during recovery +- scsi: iscsi_tcp: Fix shost can_queue initialization +- scsi: libiscsi: Add helper to calculate max SCSI cmds per session +- scsi: libiscsi: Fix iSCSI host workq destruction +- scsi: libiscsi: Fix iscsi_task use after free() +- scsi: libiscsi: Drop taskqueuelock +- scsi: libiscsi: Fix iscsi_prep_scsi_cmd_pdu() error handling +- scsi: libiscsi: Fix error count for active session +- ext4: fix timer use-after-free on failed mount +- loop: fix I/O error on fsync() in detached loop devices +- md/bitmap: fix memory leak of temporary bitmap +- md: get sysfs entry after redundancy attr group create +- md: fix deadlock causing by sysfs_notify +- md: fix the checking of wrong work queue +- md: flush md_rdev_misc_wq for HOT_ADD_DISK case +- md: don't flush workqueue unconditionally in md_open +- md: add new workqueue for delete rdev + +* Tue Apr 13 2021 Cheng Jian - 4.19.90-2104.3.0.0066 +- nvme-fabrics: fix kabi broken due to adding fields in struct nvme_ctrl + +* Thu Apr 01 2021 Jiachen Fan - 4.19.90-2104.2.0.0065 +- Add the option of "with_perf" +- Output jvmti plug-in as part of perf building + +* Wed Apr 07 2021 Cheng Jian - 4.19.90-2104.2.0.0064 +- x86/Kconfig: Drop vendor dependency for X86_UMIP +- x86/Kconfig: Rename UMIP config parameter +- iommu/vt-d:Add support for detecting ACPI device in RMRR +- USB:Fix kernel NULL pointer when unbind UHCI form vfio-pci +- x86/apic: Mask IOAPIC entries when disabling the local APIC +- xhci: fix issue with resume from system Sx state +- xhci: Adjust the UHCI Controllers bit value +- ALSA: hda: Add support of Zhaoxin NB HDAC codec +- ALSA: hda: Add support of Zhaoxin NB HDAC +- ALSA: hda: Add support of Zhaoxin SB HDAC +- xhci: Show Zhaoxin XHCI root hub speed correctly +- xhci: fix issue of cross page boundary in TRB prefetch +- PCI: Add ACS quirk for Zhaoxin Root/Downstream Ports +- PCI: Add ACS quirk for Zhaoxin multi-function devices +- xhci: Add Zhaoxin xHCI LPM U1/U2 feature support +- ata: sata_zhaoxin: Add support for Zhaoxin Serial ATA +- PCI: Add Zhaoxin Vendor ID +- x86/perf: Add hardware performance events support for Zhaoxin CPU. +- crypto: x86/crc32c-intel - Don't match some Zhaoxin CPUs +- x86/speculation/swapgs: Exclude Zhaoxin CPUs from SWAPGS vulnerability +- x86/speculation/spectre_v2: Exclude Zhaoxin CPUs from SPECTRE_V2 +- x86/mce: Add Zhaoxin LMCE support +- x86/mce: Add Zhaoxin CMCI support +- x86/mce: Add Zhaoxin MCE support +- x86/acpi/cstate: Add Zhaoxin processors support for cache flush policy in C3 +- x86/power: Optimize C3 entry on Centaur CPUs +- ACPI, x86: Add Zhaoxin processors support for NONSTOP TSC +- x86/cpu: Add detect extended topology for Zhaoxin CPUs +- x86/cpufeatures: Add Zhaoxin feature bits +- x86/cpu/centaur: Add Centaur family >=7 CPUs initialization support +- x86/cpu/centaur: Replace two-condition switch-case with an if statement +- x86/cpu: Remove redundant cpu_detect_cache_sizes() call +- x86/cpu: Create Zhaoxin processors architecture support file +- xhci: apply XHCI_PME_STUCK_QUIRK to Intel Comet Lake platforms +- xhci: Fix memory leak when caching protocol extended capability PSI tables - take 2 +- xhci: fix runtime pm enabling for quirky Intel hosts +- xhci: Force Maximum Packet size for Full-speed bulk devices to valid range. + +* Thu Apr 01 2021 Cheng Jian - 4.19.90-2104.1.0.0063 +- configs: add config BMA to config files +- Huawei BMA: Adding Huawei BMA driver: host_kbox_drv +- Huawei BMA: Adding Huawei BMA driver: cdev_veth_drv +- Huawei BMA: Adding Huawei BMA driver: host_veth_drv +- Huawei BMA: Adding Huawei BMA driver: host_cdev_drv +- Huawei BMA: Adding Huawei BMA driver: host_edma_drv +- scsi: ses: Fix crash caused by kfree an invalid pointer +- net: hns3: PF add support for pushing link status to VFs +- net: hns: update hns version to 21.2.1 +- net: hns: Remove unused macro AE_NAME_PORT_ID_IDX +- net: hns: use IRQ_NOAUTOEN to avoid irq is enabled due to request_irq +- net: hns: Replace zero-length array with flexible-array member +- hisilicon/hns: convert comma to semicolon +- net: hns: make arrays static, makes object smaller +- net: hns: Move static keyword to the front of declaration +- net: hns: use eth_broadcast_addr() to assign broadcast address +- net: hns: use true,false for bool variables +- net: hns: fix wrong display of "Advertised link modes" +- net: hns: fix ping failed when setting "autoneg off speed 100 duplex half" +- net: hns: fix variable used when DEBUG is defined +- net: hns: fix non-promiscuous mode does not take effect problem +- net: hns: remove redundant variable initialization +- treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152 +- net/hinic: update hinic version to 2.3.2.18 +- net/hinic: Add support for hinic PMD on VF +- net/hinic: Add XDP support for pass and drop actions +- net/hinic: permit configuration of rx-vlan-filter with ethtool +- locks: fix a memory leak bug in __break_lease() + +* Mon Mar 29 2021 Cheng Jian - 4.19.90-2103.4.0.0062 +- mm/vmscan: fix uncleaned mem_cgroup_uncharge +- staging: rtl8188eu: prevent ->ssid overflow in rtw_wx_set_scan() +- PCI: rpadlpar: Fix potential drc_name corruption in store functions +- perf/x86/intel: Fix a crash caused by zero PEBS status +- btrfs: fix race when cloning extent buffer during rewind of an old root +- bpf: Fix off-by-one for area size in creating mask to left +- bpf: Prohibit alu ops for pointer types not defining ptr_limit +- net/x25: prevent a couple of overflows +- drm/ttm/nouveau: don't call tt destroy callback on alloc failure. +- cgroup: Fix kabi broken by files_cgroup introduced +- arm64/mpam: fix a possible deadlock in mpam_enable +- config: arm64: build TCM driver to modules by default +- staging: TCM: add GMJS(Nationz Tech) TCM driver. +- config: enable config TXGBE by default +- x86/config: Set CONFIG_TXGBE=m by default +- net: txgbe: Add support for Netswift 10G NIC + +* Mon Mar 22 2021 Cheng Jian - 4.19.90-2103.3.0.0061 +- arm64/mpam: fix a memleak in add_schema +- scsi: check the whole result for reading write protect flag +- ext4: Fix bug on in ext4_es_cache_extent as ext4_split_extent_at failed +- md: add checkings before flush md_misc_wq +- dm: use noio when sending kobject event +- ext4: fix potential htree index checksum corruption +- quota: Fix memory leak when handling corrupted quota file +- quota: Sanity-check quota file headers on load +- block, bfq: invoke flush_idle_tree after reparent_active_queues in pd_offline +- block, bfq: make reparent_leaf_entity actually work only on leaf entities +- block, bfq: turn put_queue into release_process_ref in __bfq_bic_change_cgroup +- block, bfq: move forward the getting of an extra ref in bfq_bfqq_move +- block, bfq: get extra ref to prevent a queue from being freed during a group move +- perf/ftrace: Fix use-after-free in __ftrace_ops_list_func() +- fs/xfs: fix time overflow +- ext4: remove set but not used variable 'es' in ext4_jbd2.c +- ext4: remove set but not used variable 'es' +- ext4: don't try to processed freed blocks until mballoc is initialized +- ext4: drop ext4_handle_dirty_super() +- ext4: use sbi instead of EXT4_SB(sb) in ext4_update_super() +- ext4: save error info to sb through journal if available +- ext4: protect superblock modifications with a buffer lock +- ext4: drop sync argument of ext4_commit_super() +- ext4: combine ext4_handle_error() and save_error_info() +- ext4: defer saving error info from atomic context +- ext4: simplify ext4 error translation +- ext4: move functions in super.c +- ext4: make ext4_abort() use __ext4_error() +- ext4: standardize error message in ext4_protect_reserved_inode() +- ext4: save all error info in save_error_info() and drop ext4_set_errno() +- ext4: save the error code which triggered an ext4_error() in the superblock +- ext4: remove redundant sb checksum recomputation +- Revert "ext4: Protect superblock modifications with a buffer lock" + +* Mon Mar 15 2021 Cheng Jian - 4.19.90-2103.2.0.0060 +- xen-netback: respect gnttab_map_refs()'s return value +- Xen/gnttab: handle p2m update errors on a per-slot basis +- sysfs: fix kabi broken when add sysfs_emit and sysfs_emit_at +- scsi: iscsi: Verify lengths on passthrough PDUs +- scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE +- sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output +- scsi: iscsi: Restrict sessions and handles to admin capabilities +- ovl: do not fail because of O_NOATIME +- ovl: check permission to open real file +- ovl: call secutiry hook in ovl_real_ioctl() +- ovl: verify permissions in ovl_path_open() +- ovl: switch to mounter creds in readdir +- ovl: pass correct flags for opening real directory +- mm/swapfile.c: fix potential memory leak in sys_swapon +- hibernate: Allow uswsusp to write to swap +- mm/swapfile.c: move inode_lock out of claim_swapfile +- mm/swapfile.c: fix a comment in sys_swapon() +- vfs: don't allow writes to swap files +- mm: set S_SWAPFILE on blockdev swap devices +- block_dump: remove block_dump feature when dirting inode +- virtio-blk: modernize sysfs attribute creation +- nvme: register ns_id attributes as default sysfs groups +- ext4: Fix not report exception message when mount with errors=continue +- xen-blkback: fix error handling in xen_blkbk_map() +- xen-scsiback: don't "handle" error by BUG() +- xen-netback: don't "handle" error by BUG() +- xen-blkback: don't "handle" error by BUG() +- xen/arm: don't ignore return errors from set_phys_to_machine +- Xen/gntdev: correct error checking in gntdev_map_grant_pages() +- Xen/gntdev: correct dev_bus_addr handling in gntdev_map_grant_pages() +- Xen/x86: also check kernel mapping in set_foreign_p2m_mapping() +- Xen/x86: don't bail early from clear_foreign_p2m_mapping() + +* Thu Mar 11 2021 Cheng Jian - 4.19.90-2103.1.0.0059 +- brcmfmac: Loading the correct firmware for brcm43456 +- config: Enable the config option of the etmem feature +- etmem: add etmem-swap feature +- etmem: add etmem-scan feature +- moduleparam: Save information about built-in modules in separate file +- ovl: expand warning in ovl_d_real() +- net: watchdog: hold device global xmit lock during tx disable +- bfq-iosched: Revert "bfq: Fix computation of shallow depth" +- ovl: skip getxattr of security labels +- cap: fix conversions on getxattr +- ovl: perform vfs_getxattr() with mounter creds +- tracing: Check length before giving out the filter buffer +- tracing: Do not count ftrace events in top level enable output +- blk-mq: don't hold q->sysfs_lock in blk_mq_map_swqueue +- block: don't hold q->sysfs_lock in elevator_init_mq +- SUNRPC: Handle 0 length opaque XDR object data properly +- SUNRPC: Move simple_get_bytes and simple_get_netobj into private header +- fgraph: Initialize tracing_graph_pause at task creation +- tracing/kprobe: Fix to support kretprobe events on unloaded modules +- md: Set prev_flush_start and flush_bio in an atomic way +- mm: thp: fix MADV_REMOVE deadlock on shmem THP +- mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active +- mm: hugetlb: fix a race between isolating and freeing page +- mm: hugetlb: fix a race between freeing and dissolving the page +- mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page +- smb3: Fix out-of-bounds bug in SMB2_negotiate() +- cifs: report error instead of invalid when revalidating a dentry fails +- genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set +- kretprobe: Avoid re-registration of the same kretprobe earlier +- ovl: fix dentry leak in ovl_get_redirect +- memblock: do not start bottom-up allocations with kernel_end +- workqueue: Restrict affinity change to rescuer +- kthread: Extract KTHREAD_IS_PER_CPU +- sysctl: handle overflow in proc_get_long +- fs: fix lazytime expiration handling in __writeback_single_inode() +- writeback: Drop I_DIRTY_TIME_EXPIRE +- dm integrity: conditionally disable "recalculate" feature +- tracing: Fix race in trace_open and buffer resize call +- Revert "mm/slub: fix a memory leak in sysfs_slab_add()" +- net/rds: restrict iovecs length for RDS_CMSG_RDMA_ARGS +- net: fix iteration for sctp transport seq_files +- netfilter: conntrack: skip identical origin tuple in same zone only +- netfilter: flowtable: fix tcp and udp header checksum update +- netfilter: xt_recent: Fix attempt to update deleted entry +- af_key: relax availability checks for skb size calculation +- net: ip_tunnel: fix mtu calculation +- net_sched: gen_estimator: support large ewma log +- tcp: fix TLP timer not set when CA_STATE changes from DISORDER to OPEN +- net/mlx5: Fix memory leak on flow table creation error flow +- xfrm: fix disable_xfrm sysctl when used on xfrm interfaces +- xfrm: Fix oops in xfrm_replay_advance_bmp +- netfilter: nft_dynset: add timeout extension to template +- net: sit: unregister_netdevice on newlink's error path +- esp: avoid unneeded kmap_atomic call +- udp: Prevent reuseport_select_sock from reading uninitialized socks +- vrf: Fix fast path output packet handling with async Netfilter rules +- livepatch/core: Fix jump_label_apply_nops called multi times +- gpu: hibmc: Fix stuck when switch GUI to text. +- gpu: hibmc: Use drm get pci dev api. +- gpu: hibmc: Fix erratic display during startup stage. +- net: hns3: update hns3 version to 1.9.38.11 +- net: hns3: fix 'ret' may be used uninitialized problem +- net: hns3: update hns3 version to 1.9.38.10 +- net: hns3: adds support for setting pf max tx rate via sysfs +- ext4: find old entry again if failed to rename whiteout +- config: disable config TMPFS_INODE64 by default +- tmpfs: restore functionality of nr_inodes=0 +- tmpfs: support 64-bit inums per-sb +- tmpfs: per-superblock i_ino support +- Revert "scsi: sg: fix memory leak in sg_build_indirect" +- scsi: fix kabi for scsi_device +- scsi: core: Only re-run queue in scsi_end_request() if device queue is busy +- scsi: core: Run queue in case of I/O resource contention failure +- Revert "scsi: sd: block: Fix read-only flag residuals when partition table change" +- scsi: sd: block: Fix kabi change by 'scsi: sd: block: Fix regressions in read-only block device handling' +- scsi: sd: block: Fix read-only flag residuals when partition table change +- scsi: sd: block: Fix regressions in read-only block device handling +- proc/mounts: Fix kabi broken +- proc/mounts: add cursor +- list: introduce list_for_each_continue() + +* Wed Feb 24 2021 Cheng Jian - 4.19.90-2102.3.0.0058 +- arm64/mpam: Fix compile warning +- arm64/mpam: Sort domains when cpu online +- arm64/mpam: resctrl: Refresh cpu mask for handling cpuhp +- arm64/mpam: resctrl: Allow setting register MPAMCFG_MBW_MIN to 0 +- arm64/mpam: resctrl: Use resctrl_group_init_alloc() for default group +- arm64/mpam: resctrl: Add proper error handling to resctrl_mount() +- arm64/mpam: Supplement additional useful ctrl features for mount options +- ACPI/MPAM: Use acpi_map_pxm_to_node() to get node id for memory node +- arm64/mpam: Set per-cpu's closid to none zero for cdp +- arm64/mpam: Simplify mpamid cdp mapping process +- arm64/mpam: Filter schema control type with ctrl features +- arm64/mpam: resctrl: Add rmid file in resctrl sysfs +- arm64/mpam: Split header files into suitable location +- arm64/mpam: resctrl: Export resource's properties to info directory +- arm64/mpam: Add resctrl_ctrl_feature structure to manage ctrl features +- arm64/mpam: Add wait queue for monitor alloc and free +- arm64/mpam: Remap reqpartid,pmg to rmid and intpartid to closid +- arm64/mpam: Separate internal and downstream priority event +- arm64/mpam: Enabling registering and logging error interrupts +- arm64/mpam: Fix MPAM_ESR intPARTID_range error +- arm64/mpam: Integrate monitor data for Memory Bandwidth if cdp enabled +- arm64/mpam: Add hook-events id for ctrl features +- arm64/mpam: Re-plan intpartid narrowing process +- arm64/mpam: Restore extend ctrls' max width for checking schemata input +- arm64/mpam: Squash default priority from mpam device to class +- arm64/mpam: Store intpri and dspri for mpam device reset +- arm64/mpam: resctrl: Support priority and hardlimit(Memory bandwidth) configuration +- arm64/mpam: resctrl: Support cpus' monitoring for mon group +- arm64/mpam: resctrl: collect child mon group's monitor data +- arm64/mpam: Using software-defined id for rdtgroup instead of 32-bit integer +- arm64/mpam: Implement intpartid narrowing process +- arm64/mpam: resctrl: Remove unnecessary CONFIG_ARM64 +- arm64/mpam: resctrl: Remove ctrlmon sysfile +- arm64/mpam: Clean up header files and rearrange declarations +- arm64/mpam: resctrl: Support cdp on monitoring data +- arm64/mpam: Support cdp on allocating monitors +- arm64/mpam: resctrl: Move ctrlmon sysfile write/read function to mpam_ctrlmon.c +- arm64/mpam: resctrl: Update closid alloc and free process with bitmap +- arm64/mpam: resctrl: Update resources reset process +- arm64/mpam: Support cdp in mpam_sched_in() +- arm64/mpam: resctrl: Write and read schemata by schema_list +- arm64/mpam: resctrl: Use resctrl_group_init_alloc() to init schema list +- arm64/mpam: resctrl: Add helpers for init and destroy schemata list +- arm64/mpam: resctrl: Supplement cdpl2,cdpl3 for mount options +- arm64/mpam: resctrl: Append schemata CDP definitions +- arm64/mpam: resctrl: Rebuild configuration and monitoring pipeline +- arm64/mpam: Probe partid,pmg and feature capabilities' ranges from classes +- arm64/mpam: Add helper for getting MSCs' configuration +- arm64/mpam: Migrate old MSCs' discovery process to new branch +- drivers: base: cacheinfo: Add helper to search cacheinfo by of_node +- arm64/mpam: Implement helpers for handling configuration and monitoring +- arm64/mpam: resctrl: Handle cpuhp and resctrl_dom allocation +- arm64/mpam: resctrl: Re-synchronise resctrl's view of online CPUs +- arm64/mpam: Init resctrl resources' info from resctrl_res selected +- arm64/mpam: Pick MPAM resources and events for resctrl_res exported +- arm64/mpam: Allocate mpam component configuration arrays +- arm64/mpam: Summarize feature support during mpam_enable() +- arm64/mpam: Reset controls when CPUs come online +- arm64/mpam: Add helper for getting mpam sysprops +- arm64/mpam: Probe the features resctrl supports +- arm64/mpam: Supplement MPAM MSC register layout definitions +- arm64/mpam: Probe supported partid/pmg ranges from devices +- cacheinfo: Provide a helper to find a cacheinfo leaf +- arm64/mpam: Add mpam driver discovery phase and kbuild boiler plate +- arm64/mpam: Preparing for MPAM refactoring + + +* Mon Feb 22 2021 Cheng Jian - 4.19.90-2102.2.0.0057 +- powerpc: fix a compiling error for 'access_ok' +- mmap: fix a compiling error for 'MAP_CHECKNODE' +- futex: sched: fix UAF when free futex_exit_mutex in free_task() +- futex: sched: fix kabi broken in task_struct +- futex: Prevent exit livelock +- futex: Provide distinct return value when owner is exiting +- futex: Add mutex around futex exit +- futex: Provide state handling for exec() as well +- futex: Sanitize exit state handling +- futex: Mark the begin of futex exit explicitly +- futex: Set task::futex_state to DEAD right after handling futex exit +- futex: Split futex_mm_release() for exit/exec +- exit/exec: Seperate mm_release() +- futex: Replace PF_EXITPIDONE with a state +- futex: Move futex exit handling into futex code +- net: Disable NETIF_F_HW_TLS_RX when RXCSUM is disabled +- ipv6: set multicast flag on the multicast route +- net_sched: reject silly cell_log in qdisc_get_rtab() +- net_sched: avoid shift-out-of-bounds in tcindex_set_parms() +- ipv6: create multicast route with RTPROT_KERNEL +- udp: mask TOS bits in udp_v4_early_demux() +- kasan: fix incorrect arguments passing in kasan_add_zero_shadow +- kasan: fix unaligned address is unhandled in kasan_remove_zero_shadow +- skbuff: back tiny skbs with kmalloc() in __netdev_alloc_skb() too +- netfilter: rpfilter: mask ecn bits before fib lookup +- driver core: Extend device_is_dependent() +- dm integrity: fix a crash if "recalculate" used without "internal_hash" +- dm: avoid filesystem lookup in dm_get_dev_t() +- ACPI: scan: Make acpi_bus_get_device() clear return pointer on error +- net: ipv6: Validate GSO SKB before finish IPv6 processing +- net: skbuff: disambiguate argument and member for skb_list_walk_safe helper +- net: introduce skb_list_walk_safe for skb segment walking +- tipc: fix NULL deref in tipc_link_xmit() +- net: avoid 32 x truesize under-estimation for tiny skbs +- dm integrity: fix flush with external metadata device +- netfilter: nf_nat: Fix memleak in nf_nat_init +- netfilter: conntrack: fix reading nf_conntrack_buckets +- net: sunrpc: interpret the return value of kstrtou32 correctly +- mm, slub: consider rest of partial list if acquire_slab() fails +- ext4: fix superblock checksum failure when setting password salt +- NFS: nfs_igrab_and_active must first reference the superblock +- NFS/pNFS: Fix a leak of the layout 'plh_outstanding' counter +- pNFS: Mark layout for return if return-on-close was not sent +- NFS4: Fix use-after-free in trace_event_raw_event_nfs4_set_lock +- dump_common_audit_data(): fix racy accesses to ->d_name +- bfq: Fix computation of shallow depth +- dm integrity: fix the maximum number of arguments +- dm snapshot: flush merged data before committing metadata +- mm/hugetlb: fix potential missing huge page size info +- ACPI: scan: Harden acpi_device_add() against device ID overflows +- block: fix use-after-free in disk_part_iter_next +- vhost_net: fix ubuf refcount incorrectly when sendmsg fails +- virtio_net: Fix recursive call to cpus_read_lock() +- proc: fix lookup in /proc/net subdirectories after setns(2) +- proc: change ->nlink under proc_subdir_lock +- lib/genalloc: fix the overflow when size is too big +- scsi: scsi_transport_spi: Set RQF_PM for domain validation commands +- workqueue: Kick a worker based on the actual activation of delayed works +- dm verity: skip verity work if I/O error when system is shutting down +- module: delay kobject uevent until after module init call +- NFSv4: Fix a pNFS layout related use-after-free race when freeing the inode +- quota: Don't overflow quota file offsets +- module: set MODULE_STATE_GOING state when a module fails to load +- fcntl: Fix potential deadlock in send_sig{io, urg}() +- null_blk: Fix zone size initialization +- ext4: don't remount read-only with errors=continue on reboot +- vfio/pci: Move dummy_resources_list init in vfio_pci_probe() +- arm64: module/ftrace: intialize PLT at load time +- arm64: module: rework special section handling +- net: drop bogus skb with CHECKSUM_PARTIAL and offset beyond end of trimmed packet +- net/mlx5e: Fix two double free cases +- net/mlx5e: Fix memleak in mlx5e_create_l2_table_groups +- net: ipv6: fib: flush exceptions when purging route +- net: fix pmtu check in nopmtudisc mode +- net: ip: always refragment ip defragmented packets +- net: vlan: avoid leaks on register_vlan_dev() failures +- netfilter: xt_RATEEST: reject non-null terminated string from userspace +- netfilter: ipset: fix shift-out-of-bounds in htable_bits() +- netfilter: x_tables: Update remaining dereference to RCU +- net-sysfs: take the rtnl lock when accessing xps_rxqs_map and num_tc +- net-sysfs: take the rtnl lock when storing xps_rxqs +- net: sched: prevent invalid Scell_log shift count +- erspan: fix version 1 check in gre_parse_header() +- net: hns: fix return value check in __lb_other_process() +- ipv4: Ignore ECN bits for fib lookups in fib_compute_spec_dst() +- net-sysfs: take the rtnl lock when accessing xps_cpus_map and num_tc +- net-sysfs: take the rtnl lock when storing xps_cpus +- i40e: Fix Error I40E_AQ_RC_EINVAL when removing VFs +- lwt: Disable BH too in run_lwt_bpf() +- net/mlx5: Properly convey driver version to firmware +- vxlan: Copy needed_tailroom from lowerdev +- vxlan: Add needed_headroom for lower device +- ixgbe: avoid premature Rx buffer reuse +- xsk: Fix xsk_poll()'s return type +- net/mlx4_en: Handle TX error CQE +- net/mlx4_en: Avoid scheduling restart task if it is already running +- net/mlx5: Fix wrong address reclaim when command interface is down +- i40e: Fix removing driver while bare-metal VFs pass traffic +- net/tls: Protect from calling tls_dev_del for TLS RX twice +- net/tls: missing received data after fast remote close +- clocksource/drivers/arch_timer: Fix vdso_fix compile error for arm32 +- scsi/hifc:Fix the bug that the system may be oops during unintall hifc module. +- KVM: Enable PUD huge mappings only on 1620 +- fs: fix files.usage bug when move tasks +- scsi: do quiesce for enclosure driver +- ext4: fix bug for rename with RENAME_WHITEOUT +- mm: fix kabi broken +- mm: memcontrol: add struct mem_cgroup_extension +- mm: thp: don't need care deferred split queue in memcg charge move path +- mm: vmscan: protect shrinker idr replace with CONFIG_MEMCG +- mm: thp: make deferred split shrinker memcg aware +- mm: shrinker: make shrinker not depend on memcg kmem +- mm: move mem_cgroup_uncharge out of __page_cache_release() +- mm: thp: extract split_queue_* into a struct +- bonding: add documentation for peer_notif_delay +- bonding: fix value exported by Netlink for peer_notif_delay +- bonding: add an option to specify a delay between peer notifications +- arm64/ascend: mm: Fix hugetlb check node error +- fix virtio_gpu use-after-free while creating dumb +- ext4: add ext3 report error to userspace by netlink +- arm64/ascend: mm: Fix arm32 compile warnings +- Kconfig: disable KTASK by default +- netpoll: accept NULL np argument in netpoll_send_skb() +- netpoll: netpoll_send_skb() returns transmit status +- netpoll: move netpoll_send_skb() out of line +- netpoll: remove dev argument from netpoll_send_skb_on_dev() +- efi/arm: Revert "Defer persistent reservations until after paging_init()" +- arm64, mm, efi: Account for GICv3 LPI tables in static memblock reserve table +- block: better deal with the delayed not supported case in blk_cloned_rq_check_limits +- block: Return blk_status_t instead of errno codes +- ASoC: msm8916-wcd-digital: Select REGMAP_MMIO to fix build error +- irqchip/gic-v3: Fix compiling error on ARM32 with GICv3 +- PCI: Fix pci_slot_release() NULL pointer dereference +- md/cluster: fix deadlock when node is doing resync job +- md/cluster: block reshape with remote resync job +- ext4: fix deadlock with fs freezing and EA inodes +- ext4: fix a memory leak of ext4_free_data +- ACPI: PNP: compare the string length in the matching_id() +- Revert "ACPI / resources: Use AE_CTRL_TERMINATE to terminate resources walks" +- nfs_common: need lock during iterate through the list +- clocksource/drivers/arm_arch_timer: Correct fault programming of CNTKCTL_EL1.EVNTI +- NFS: switch nfsiod to be an UNBOUND workqueue. +- lockd: don't use interval-based rebinding over TCP +- SUNRPC: xprt_load_transport() needs to support the netid "rdma6" +- PCI: iproc: Fix out-of-bound array accesses +- PCI: Fix overflow in command-line resource alignment requests +- PCI: Bounds-check command-line resource alignment requests +- genirq/irqdomain: Don't try to free an interrupt that has no mapping +- spi: fix resource leak for drivers without .remove callback +- scsi: core: Fix VPD LUN ID designator priorities +- selinux: fix inode_doinit_with_dentry() LABEL_INVALID error handling +- sched: Reenable interrupts in do_sched_yield() +- sched/deadline: Fix sched_dl_global_validate() +- selinux: fix error initialization in inode_doinit_with_dentry() +- serial_core: Check for port state when tty is in error state +- arm64: syscall: exit userspace before unmasking exceptions +- netfilter: x_tables: Switch synchronization to RCU +- block: factor out requeue handling from dispatch code +- arm64: Change .weak to SYM_FUNC_START_WEAK_PI for arch/arm64/lib/mem*.S +- arm64: lse: Fix LSE atomics with LLVM +- arm64: lse: fix LSE atomics with LLVM's integrated assembler +- net: bridge: vlan: fix error return code in __vlan_add() +- tcp: fix cwnd-limited bug for TSO deferral where we send nothing +- tcp: select sane initial rcvq_space.space for big MSS +- netfilter: nf_tables: avoid false-postive lockdep splat +- tracing: Fix userstacktrace option for instances +- mm/swapfile: do not sleep with a spin lock held +- mm: list_lru: set shrinker map bit when child nr_items is not zero +- cifs: fix potential use-after-free in cifs_echo_request() +- ftrace: Fix updating FTRACE_FL_TRAMP +- net: ip6_gre: set dev->hard_header_len when using header_ops +- ipv4: Fix tos mask in inet_rtm_getroute() +- netfilter: bridge: reset skb->pkt_type after NF_INET_POST_ROUTING traversal +- bonding: wait for sysfs kobject destruction before freeing struct slave +- tcp: Set INET_ECN_xmit configuration in tcp_reinit_congestion_control +- sock: set sk_err to ee_errno on dequeue from errq +- ipv6: addrlabel: fix possible memory leak in ip6addrlbl_net_init +- efivarfs: revert "fix memory leak in efivarfs_create()" +- scsi: libiscsi: Fix NOP race condition +- nvme: free sq/cq dbbuf pointers when dbbuf set fails +- proc: don't allow async path resolution of /proc/self components +- arm64: pgtable: Ensure dirty bit is preserved across pte_wrprotect() +- arm64: pgtable: Fix pte_accessible() +- scsi: libiscsi: fix task hung when iscsid deamon exited +- mmap: fix a compiling error for 'MAP_PA32BIT' +- hifc: remove unnecessary __init specifier +- armv7 fix compile error +- cputime: fix undefined reference to get_idle_time when CONFIG_PROC_FS disabled +- memcg/ascend: enable kmem cgroup by default for ascend +- memcg/ascend: Check sysctl oom config for memcg oom +- bdi: fix compiler error in bdi_get_dev_name() +- arm64: fix compile error when CONFIG_HOTPLUG_CPU is disabled +- scsi: target: iscsi: Fix cmd abort fabric stop race +- scsi: target: fix hang when multiple threads try to destroy the same iscsi session +- scsi: target: remove boilerplate code +- ext4: Protect superblock modifications with a buffer lock +- arm64: arch_timer: only do cntvct workaround on VDSO path on D05 +- libata: transport: Use scnprintf() for avoiding potential buffer overflow +- Document: In the binding document, add enable-init-all-GICR field description. +- irqchip/irq-gic-v3: Add workaround bindings in device tree to init ts core GICR. +- asm-generic/io.h: Fix !CONFIG_GENERIC_IOMAP pci_iounmap() implementation +- hugetlbfs: Add dependency with ascend memory features +- net/mlx5: Disable QoS when min_rates on all VFs are zero +- sctp: change to hold/put transport for proto_unreach_timer +- net: Have netpoll bring-up DSA management interface +- mlxsw: core: Use variable timeout for EMAD retries +- ah6: fix error return code in ah6_input() +- tipc: fix memory leak in tipc_topsrv_start() +- sctp: Fix COMM_LOST/CANT_STR_ASSOC err reporting on big-endian platforms +- libceph: clear con->out_msg on Policy::stateful_server faults +- mlxsw: core: Fix use-after-free in mlxsw_emad_trans_finish() +- tipc: fix memory leak caused by tipc_buf_append() +- mlxsw: core: Fix memory leak on module removal +- irqchip/gic-v3-its: Unconditionally save/restore the ITS state on suspend. +- sbsa_gwdt: Add WDIOF_PRETIMEOUT flag to watchdog_info at defination +- NMI: Enable arm-pmu interrupt as NMI in Acensed. +- arm64/ascend: mm: Add MAP_CHECKNODE flag to check node hugetlb +- config: enable CONFIG_NVME_MULTIPATH by default +- mm/userfaultfd: do not access vma->vm_mm after calling handle_userfault() +- ext4: fix bogus warning in ext4_update_dx_flag() +- efivarfs: fix memory leak in efivarfs_create() +- libfs: fix error cast of negative value in simple_attr_write() +- xfs: revert "xfs: fix rmap key and record comparison functions" +- fail_function: Remove a redundant mutex unlock +- xfs: strengthen rmap record flags checking +- xfs: fix the minrecs logic when dealing with inode root child blocks +- ip_tunnels: Set tunnel option flag when tunnel metadata is present +- perf lock: Don't free "lock_seq_stat" if read_count isn't zero +- vfs: remove lockdep bogosity in __sb_start_write +- arm64: psci: Avoid printing in cpu_psci_cpu_die() +- tcp: only postpone PROBE_RTT if RTT is < current min_rtt estimate +- page_frag: Recover from memory pressure +- net: bridge: add missing counters to ndo_get_stats64 callback +- inet_diag: Fix error path to cancel the meseage in inet_req_diag_fill() +- devlink: Add missing genlmsg_cancel() in devlink_nl_sb_port_pool_fill() +- Convert trailing spaces and periods in path components +- net: sch_generic: fix the missing new qdisc assignment bug +- reboot: fix overflow parsing reboot cpu number +- Revert "kernel/reboot.c: convert simple_strtoul to kstrtoint" +- perf scripting python: Avoid declaring function pointers with a visibility attribute +- random32: make prandom_u32() output unpredictable +- net: Update window_clamp if SOCK_RCVBUF is set +- IPv6: Set SIT tunnel hard_header_len to zero +- don't dump the threads that had been already exiting when zapped. +- selinux: Fix error return code in sel_ib_pkey_sid_slow() +- ocfs2: initialize ip_next_orphan +- futex: Don't enable IRQs unconditionally in put_pi_state() +- uio: Fix use-after-free in uio_unregister_device() +- ext4: unlock xattr_sem properly in ext4_inline_data_truncate() +- ext4: correctly report "not supported" for {usr, grp}jquota when !CONFIG_QUOTA +- perf: Fix get_recursion_context() +- xfs: fix a missing unlock on error in xfs_fs_map_blocks +- xfs: fix brainos in the refcount scrubber's rmap fragment processor +- xfs: fix rmap key and record comparison functions +- xfs: set the unwritten bit in rmap lookup flags in xchk_bmap_get_rmapextents +- xfs: fix flags argument to rmap lookup when converting shared file rmaps +- nbd: fix a block_device refcount leak in nbd_release +- tick/common: Touch watchdog in tick_unfreeze() on all CPUs +- netfilter: use actual socket sk rather than skb sk when routing harder +- tpm: efi: Don't create binary_bios_measurements file for an empty log +- xfs: fix scrub flagging rtinherit even if there is no rt device +- xfs: flush new eof page on truncate to avoid post-eof corruption +- perf tools: Add missing swap for ino_generation +- netfilter: ipset: Update byte and packet counters regardless of whether they match +- xfs: set xefi_discard when creating a deferred agfl free log intent item +- net: xfrm: fix a race condition during allocing spi +- time: Prevent undefined behaviour in timespec64_to_ns() +- fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent +- scsi: core: Don't start concurrent async scan on same host +- blk-cgroup: Pre-allocate tree node on blkg_conf_prep +- blk-cgroup: Fix memleak on error path +- futex: Handle transient "ownerless" rtmutex state correctly +- tracing: Fix out of bounds write in get_trace_buf +- ftrace: Handle tracing when switching between context +- ftrace: Fix recursion check for NMI test +- ring-buffer: Fix recursion protection transitions between interrupt context +- kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled +- mm: mempolicy: fix potential pte_unmap_unlock pte error +- Fonts: Replace discarded const qualifier +- ptrace: fix task_join_group_stop() for the case when current is traced +- device property: Don't clear secondary pointer for shared primary firmware node +- device property: Keep secondary firmware node secondary by type +- ext4: fix invalid inode checksum +- ext4: fix error handling code in add_new_gdb +- ext4: fix leaking sysfs kobject after failed mount +- ring-buffer: Return 0 on success from ring_buffer_resize() +- perf python scripting: Fix printable strings in python3 scripts +- sgl_alloc_order: fix memory leak +- nbd: make the config put is called before the notifying the waiter +- cifs: handle -EINTR in cifs_setattr +- ext4: Detect already used quota file early +- ACPI: Add out of bounds and numa_off protections to pxm_to_node() +- xfs: don't free rt blocks when we're doing a REMAP bunmapi call +- arm64/mm: return cpu_all_mask when node is NUMA_NO_NODE +- uio: free uio id after uio file node is freed +- arm64: topology: Stop using MPIDR for topology information +- xfs: fix realtime bitmap/summary file truncation when growing rt volume +- mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race +- futex: Fix incorrect should_fail_futex() handling +- serial: pl011: Fix lockdep splat when handling magic-sysrq interrupt +- fuse: fix page dereference after free +- tcp: Prevent low rmem stalls with SO_RCVLOWAT. +- netem: fix zero division in tabledist +- efivarfs: Replace invalid slashes with exclamation marks in dentries. +- arm64: Run ARCH_WORKAROUND_1 enabling code on all CPUs +- config: set default value of CONFIG_TEST_FREE_PAGES +- mm/page_alloc.c: fix freeing non-compound pages +- mm, hwpoison: double-check page count in __get_any_page() +- mm: fix a race during THP splitting +- mm: fix check_move_unevictable_pages() on THP +- mlock: fix unevictable_pgs event counts on THP +- mm: swap: memcg: fix memcg stats for huge pages +- mm: swap: fix vmstats for huge pages +- mm: move nr_deactivate accounting to shrink_active_list() +- blk-throttle: don't check whether or not lower limit is valid if CONFIG_BLK_DEV_THROTTLING_LOW is off +- blk-cgroup: prevent rcu_sched detected stalls warnings in blkg_destroy_all() + + +* Tue Feb 09 2021 Cheng Jian - 4.19.90-2102.1.0.0056 +- nbd: freeze the queue while we're adding connections +- nbd: Fix memory leak in nbd_add_socket +- futex: Handle faults correctly for PI futexes +- futex: Simplify fixup_pi_state_owner() +- futex: Use pi_state_update_owner() in put_pi_state() +- rtmutex: Remove unused argument from rt_mutex_proxy_unlock() +- futex: Provide and use pi_state_update_owner() +- futex: Replace pointless printk in fixup_owner() +- futex: Ensure the correct return value from futex_lock_pi() +- inet: do not call sublist_rcv on empty list +- netfilter: add and use nf_hook_slow_list() +- netfilter: clear skb->next in NF_HOOK_LIST() +- scsi: target: Fix XCOPY NAA identifier lookup +- nfsd4: readdirplus shouldn't return parent of export +- HID: core: Correctly handle ReportSize being zero + +* Mon Jan 11 2021 Yang Yingliang - 4.19.90-2101.1.0.0055 +- net: hns3: update hns3 version to 1.9.38.9 +- net: hns3: optimize the process of queue reset +- net: hns3: fix loopback test of serdes and phy is failed if duplex is half +- net: hns3: format the output of the MAC address +- net: hns3: rename hns-customer to hns3_extension +- net: hns3: fix RoCE calling the wrong function problem +- net: hns3: Clear the CMDQ registers before unmapping BAR region +- net: hns3: fix for loopback failure when vlan filter is enable +- net: hns3: replace snprintf with scnprintf in hns3_dbg_cmd_read +- net: hns3: delete unused codes +- net: hns3: fix missing help info in debugfs +- net: hns3: add trace event support for PF/VF mailbox +- net: hns3: fix loopback failed when phy has no .set_loopback interface +- net: hns3: clear hardware resource when loading driver +- net: hns3: fix incorrect print value of vf_id and vport_id +- net: hns3: fix bug when initialize the RSS tuples for SCTP6 +- net: hns3: solve the problem of array uninitialized +- net: hns3: clean up for some coding style. +- net: hns3: adds a kernel message when restart autoneg. +- net: hns3: modify a print message +- net: hns3: provide .get_cmdq_stat interface for the client +- net: hns3: add a hardware error detect type +- net: hns3: implement .process_hw_error for hns3 client +- net: hns3: modify location of one print information +- net/hinic: update hinic version to 2.3.2.17 +- net/hinic: Modify the printing level of some logs +- net/hinic: Fix oops when memory is insufficient +- net/hinic: Set default features when probe netdev +- RDMA/hns: fix eth extended SGE err +- scsi: hisi_sas: Delete down() when handle Block-IO +- nvme-fabrics: reject I/O to offline device +- PCI: Add pci reset quirk for Huawei Intelligent NIC virtual function +- nvme: fix nvme_stop_queues cost long time error +- scsi: hisi_sas: fix logic bug when alloc device with MAX device num == 1 +- scsi: hisi_sas: mask corresponding RAS interrupts for hilink DFX exception +- scsi: hisi_sas: Directly trigger SCSI error handling for completion errors +- scsi: hisi_sas: use wait_for_completion_timeout() when clearing ITCT +- scsi: hisi_sas: Fix the conflict between device gone and host reset +- scsi: hisi_sas: Update all the registers after suspend and resume +- scsi: hisi_sas: Make slot buf minimum allocation of PAGE_SIZE +- scsi: hisi_sas: Reduce HISI_SAS_SGE_PAGE_CNT in size +- scsi: flip the default on use_clustering +- RDMA/hns: Disable UD on HIP08 +- powerpc/rtas: Restrict RTAS requests from userspace +- mwifiex: Fix possible buffer overflows in mwifiex_cmd_802_11_ad_hoc_start +- xenbus/xenbus_backend: Disallow pending watch messages +- xen/xenbus: Count pending messages for each watch +- xen/xenbus/xen_bus_type: Support will_handle watch callback +- xen/xenbus: Add 'will_handle' callback support in xenbus_watch_path() +- xen/xenbus: Allow watches discard events before queueing +- xen-blkback: set ring->xenblkd to NULL after kthread_stop() +- HID: core: Sanitize event code and type when mapping input +- cfg80211: add missing policy for NL80211_ATTR_STATUS_CODE +- speakup: Reject setting the speakup line discipline outside of speakup +- tty: Fix ->session locking +- tty: Fix ->pgrp locking in tiocspgrp() +- ALSA: rawmidi: Fix racy buffer resize under concurrent accesses +- jfs: Fix array index bounds check in dbAdjTree + +* Tue Dec 22 2020 Yang Yingliang - 4.19.90-2012.5.0.0054 +- Revert "mm/memory_hotplug: refrain from adding memory into an impossible node" + +* Mon Dec 21 2020 Yang Yingliang - 4.19.90-2012.4.0.0053 +- defconfig: update the defconfigs to support NVDIMM + +* Thu Dec 17 2020 Yang Yingliang - 4.19.90-2012.3.0.0052 +- scsi/hifc: fix the issue that the system is suspended during the pres +- mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked() +- romfs: fix uninitialized memory leak in romfs_dev_read() + +* Tue Dec 15 2020 Yang Yingliang - 4.19.90-2012.2.0.0051 +- scsi: libiscsi: Fix cmds hung when sd_shutdown + +* Thu Dec 10 2020 Yang Yingliang - 4.19.90-2012.1.0.0050 +- fanotify: fix merging marks masks with FAN_ONDIR +- scsi/hifc: fix the issue of npiv cannot be deleted + +* Sat Nov 28 2020 Yang Yingliang - 4.19.90-2011.6.0.0049 +- refcount_t: Add ACQUIRE ordering on success for dec(sub)_and_test() variants +- x86/asm: 'Simplify' GEN_*_RMWcc() macros +- Revert "refcount_t: Add ACQUIRE ordering on success for dec(sub)_and_test() variants" +- refcount_t: Add ACQUIRE ordering on success for dec(sub)_and_test() variants +- powerpc/64s: flush L1D after user accesses +- powerpc/uaccess: Evaluate macro arguments once, before user access is allowed +- powerpc: Fix __clear_user() with KUAP enabled +- powerpc: Implement user_access_begin and friends +- powerpc: Add a framework for user access tracking +- powerpc/64s: flush L1D on kernel entry +- powerpc/64s: move some exception handlers out of line + +* Mon Nov 23 2020 Yang Yingliang - 4.19.90-2011.4.0.0048 +- Bluetooth: fix kernel oops in store_pending_adv_report +- vt: Disable KD_FONT_OP_COPY +- fbcon: Fix global-out-of-bounds read in fbcon_get_font() +- Fonts: Support FONT_EXTRA_WORDS macros for built-in fonts +- fbdev, newport_con: Move FONT_EXTRA_WORDS macros into linux/font.h +- speakup: Do not let the line discipline be used several times +- mm/page_idle.c: skip offline pages +- mm/memory_hotplug: refrain from adding memory into an impossible node +- khugepaged: drain LRU add pagevec after swapin +- khugepaged: drain all LRU caches before scanning pages +- khugepaged: do not stop collapse if less than half PTEs are referenced +- powercap: restrict energy meter to root access +- Input: sunkbd - avoid use-after-free in teardown paths +- nbd: don't update block size after device is started + +* Wed Nov 18 2020 Yang Yingliang - 4.19.90-2011.3.0.0047 +- tools: perf: Fix build error in v4.19.y +- nvme-fabrics: modify default value to reconnect forever +- nvme-rdma: add module param to turn off inline data dynamically +- nvme-rdma: fix crash casue by destroy id while resolving addr +- nvme-rdma: avoid race between time out and tear down +- nvme-core: introduce sync io queues +- nvme-rdma: avoid repeated request completion +- nvme-rdma: fix crash due to incorrect cqe +- nvme-multipath: fix crash in nvme_mpath_clear_ctrl_paths +- nvme: fix controller removal race with scan work +- nvme-multipath: fix bogus request queue reference put +- nvme-multipath: fix deadlock due to head->lock +- nvme: don't protect ns mutation with ns->head->lock +- nvme: clear any SGL flags in passthru commands +- nvme: disable streams when get stream params failed +- nvme: revalidate after verifying identifiers +- nvme: release namespace head reference on error +- nvme: unlink head after removing last namespace +- nvme: Make nvme_uninit_ctrl symmetric to nvme_init_ctrl +- nvme: Fix ctrl use-after-free during sysfs deletion +- nvme-rdma: fix crash when connect rejected +- nvme-rdma: fix timeout handler +- nvme: Fix parsing of ANA log page +- nvme: release ida resources +- nvme: Add compat_ioctl handler for NVME_IOCTL_SUBMIT_IO +- nvme: introduce "Command Aborted By host" status code +- nvme: enable aen regardless of the presence of I/O queues +- nvme: make nvme_identify_ns propagate errors back +- nvme: pass status to nvme_error_status +- nvme: don't abort completed request in nvme_cancel_request +- nvme: put ns_head ref if namespace fails allocation +- nvme: implement Enhanced Command Retry +- nvme: wait until all completed request's complete fn is called +- blk-mq: introduce blk_mq_tagset_wait_completed_request() +- blk-mq: introduce blk_mq_request_completed() +- nvme-rdma: fix a segmentation fault during module unload +- mlx5: remove support for ib_get_vector_affinity +- nvme-rdma: fix possible use-after-free in connect timeout +- nvme-rdma: fix possible use-after-free in connect error flow +- nvme-rdma: use dynamic dma mapping per command +- nvme-rdma: remove redundant reference between ib_device and tagset +- scsi/hifc: add hifc driver compile config module +- scsi/hifc: add hifc driver FC service module +- scsi/hifc: add hifc driver scsi module +- scsi/hifc: add hifc driver io module +- scsi/hifc: add hifc driver port resource module +- scsi/hifc: add hifc driver port manager module +- scsi/hifc: add hifc driver chip resource module +- perf/core: Fix a memory leak in perf_event_parse_addr_filter() +- mm/rmap: fixup copying of soft dirty and uffd ptes +- mm: madvise: fix vma user-after-free +- svcrdma: fix bounce buffers for unaligned offsets and multiple pages +- net/mlx5: Don't call timecounter cyc2time directly from 1PPS flow +- net/tls: sendfile fails with ktls offload +- tipc: fix the skb_unshare() in tipc_buf_append() +- mlx4: handle non-napi callers to napi_poll +- net/mlx5e: Fix VLAN create flow +- net/mlx5e: Fix VLAN cleanup flow +- openvswitch: handle DNAT tuple collision +- xfrmi: drop ignore_df check before updating pmtu +- net: openvswitch: use div_u64() for 64-by-32 divisions +- e1000: Do not perform reset in reset_task if we are already down +- tipc: fix memory leak in service subscripting +- net: openvswitch: use u64 for meter bucket +- svcrdma: Fix leak of transport addresses +- net: sch_generic: aviod concurrent reset and enqueue op for lockless qdisc +- cpufreq: CPPC: put ACPI table after using it +- cpufreq : CPPC: Break out if HiSilicon CPPC workaround is matched +- tty/amba-pl011: Call acpi_put_table() to fix memory leak +- irqchip/gicv3: Call acpi_put_table() to fix memory leak +- partitions/efi: Fix partition name parsing in GUID partition entry +- tty: make FONTX ioctl use the tty pointer they were actually passed +- vt: keyboard, extend func_buf_lock to readers +- vt: keyboard, simplify vt_kdgkbsent +- binder: fix UAF when releasing todo list +- bpf: Fix clobbering of r2 in bpf_gen_ld_abs +- bpf: Remove recursion prevention from rcu free callback +- ipvs: Fix uninit-value in do_ip_vs_set_ctl() +- xfs: make sure the rt allocator doesn't run off the end +- ip_gre: set dev->hard_header_len and dev->needed_headroom properly +- crypto: ccp - fix error handling +- netfilter: nf_fwd_netdev: clear timestamp in forwarding path +- netfilter: conntrack: connection timeout after re-register +- vfio iommu type1: Fix memory leak in vfio_iommu_type1_pin_pages +- vfio/pci: Clear token on bypass registration failure +- ext4: limit entries returned when counting fsmap records +- watchdog: Use put_device on error +- watchdog: Fix memleak in watchdog_cdev_register +- watchdog: initialize device before misc_register +- ramfs: fix nommu mmap with gaps in the page cache +- lib/crc32.c: fix trivial typo in preprocessor condition +- xfs: fix high key handling in the rt allocator's query_range function +- xfs: limit entries returned when counting fsmap records +- mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary +- mm/memcg: fix device private memcg accounting +- netfilter: nf_log: missing vlan offload tag and proto +- ipvs: clear skb->tstamp in forwarding path +- cifs: Return the error from crypt_message when enc/dec key not found. +- cifs: remove bogus debug code +- icmp: randomize the global rate limiter +- tcp: fix to update snd_wl1 in bulk receiver fast path +- net/sched: act_tunnel_key: fix OOB write in case of IPv6 ERSPAN tunnels +- net/ipv4: always honour route mtu during forwarding +- net: fix pos incrementment in ipv6_route_seq_next +- ipv4: Restore flowi4_oif update before call to xfrm_lookup_route +- mm: khugepaged: recalculate min_free_kbytes after memory hotplug as expected by khugepaged +- perf: Fix task_function_call() error handling +- bonding: set dev->needed_headroom in bond_setup_by_slave() +- xfrm: Use correct address family in xfrm_state_find +- xfrm: clone whole liftime_cur structure in xfrm_do_migrate +- xfrm: clone XFRMA_SEC_CTX in xfrm_do_migrate +- xfrm: clone XFRMA_REPLAY_ESN_VAL in xfrm_do_migrate +- xfrm: clone XFRMA_SET_MARK in xfrm_do_migrate +- sctp: fix sctp_auth_init_hmacs() error path +- cifs: Fix incomplete memory allocation on setxattr path +- mm/khugepaged: fix filemap page_to_pgoff(page) != offset +- nvme-core: put ctrl ref when module ref get fail +- usermodehelper: reset umask to default before executing user process +- netfilter: ctnetlink: add a range check for l3/l4 protonum +- ep_create_wakeup_source(): dentry name can change under you... +- epoll: EPOLL_CTL_ADD: close the race in decision to take fast path +- epoll: replace ->visited/visited_list with generation count +- epoll: do not insert into poll queues until all sanity checks are done +- mm: don't rely on system state to detect hot-plug operations +- mm: replace memmap_context by meminit_context +- random32: Restore __latent_entropy attribute on net_rand_state +- nfs: Fix security label length not being reset +- nvme-core: get/put ctrl and transport module in nvme_dev_open/release() +- ftrace: Move RCU is watching check after recursion check +- mm, THP, swap: fix allocating cluster for swapfile by mistake +- kprobes: Fix to check probe enabled before disarm_kprobe_ftrace() +- tracing: fix double free +- bpf: Fix a rcu warning for bpffs map pretty-print +- lockdep: fix order in trace_hardirqs_off_caller() +- nvme: explicitly update mpath disk capacity on revalidation +- perf parse-events: Use strcmp() to compare the PMU name +- vfio/pci: fix racy on error and request eventfd ctx +- nvme: fix possible deadlock when I/O is blocked +- cifs: Fix double add page to memcg when cifs_readpages +- vfio/pci: Clear error and request eventfd ctx after releasing +- perf kcore_copy: Fix module map when there are no modules loaded +- perf metricgroup: Free metric_events on error +- perf util: Fix memory leak of prefix_if_not_in +- perf stat: Fix duration_time value for higher intervals +- perf evsel: Fix 2 memory leaks +- vfio/pci: fix memory leaks of eventfd ctx +- printk: handle blank console arguments passed in. +- arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0 register +- fuse: don't check refcount after stealing page +- perf mem2node: Avoid double free related to realloc +- bdev: Reduce time holding bd_mutex in sync in blkdev_close() +- mm/mmap.c: initialize align_offset explicitly for vm_unmapped_area +- mm/vmscan.c: fix data races using kswapd_classzone_idx +- mm/filemap.c: clear page error before actual read +- mm/kmemleak.c: use address-of operator on section symbols +- NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests() +- PCI: pciehp: Fix MSI interrupt race +- SUNRPC: Fix a potential buffer overflow in 'svc_print_xprts()' +- nvme-multipath: do not reset on unknown status +- perf cpumap: Fix snprintf overflow check +- serial: 8250: 8250_omap: Terminate DMA before pushing data on RX timeout +- serial: 8250_omap: Fix sleeping function called from invalid context during probe +- serial: 8250_port: Don't service RX FIFO if throttled +- perf parse-events: Fix 3 use after frees found with clang ASAN +- xfs: mark dir corrupt when lookup-by-hash fails +- xfs: don't ever return a stale pointer from __xfs_dir3_free_read +- mm: avoid data corruption on CoW fault into PFN-mapped VMA +- perf jevents: Fix leak of mapfile memory +- random: fix data races at timer_rand_state +- selinux: sel_avc_get_stat_idx should increase position index +- audit: CONFIG_CHANGE don't log internal bookkeeping as an event +- skbuff: fix a data race in skb_queue_len() +- mm/swapfile.c: swap_next should increase position index +- tracing: Set kernel_stack's caller size properly +- ACPI: EC: Reference count query handlers under lock +- sctp: move trace_sctp_probe_path into sctp_outq_sack +- ipv6_route_seq_next should increase position index +- rt_cpu_seq_next should increase position index +- neigh_stat_seq_next() should increase position index +- xfs: fix log reservation overflows when allocating large rt extents +- kernel/sys.c: avoid copying possible padding bytes in copy_to_user +- xfs: fix attr leaf header freemap.size underflow +- fix dget_parent() fastpath race +- net: silence data-races on sk_backlog.tail +- mm: fix double page fault on arm64 if PTE_AF is cleared +- sdei_watchdog: avoid possible false hardlockup +- xen/pciback: use lateeoi irq binding +- xen/pvcallsback: use lateeoi irq binding +- xen/scsiback: use lateeoi irq binding +- xen/netback: use lateeoi irq binding +- xen/blkback: use lateeoi irq binding +- xen/events: fix race in evtchn_fifo_unmask() +- xen/events: add a proper barrier to 2-level uevent unmasking +- arm64: fix abi change caused by ILP32 + +* Fri Oct 30 2020 Yang Yingliang - 4.19.90-2010.2.0.0046 +- rtc: cmos: Revert "rtc: Fix the AltCentury value on AMD/Hygon platform" +- NTB: Fix static check warning in perf_clear_test +- NTB: ntb_perf: Fix address err in perf_copy_chunk +- NTB: Fix an error in get link status +- rtc: Fix the AltCentury value on AMD/Hygon platform +- tools/power turbostat: Add support for Hygon Fam 18h (Dhyana) RAPL +- tools/power turbostat: Fix caller parameter of get_tdp_amd() +- tools/power turbostat: Also read package power on AMD F17h (Zen) +- tools/power turbostat: Add support for AMD Fam 17h (Zen) RAPL +- NTB: Add Hygon Device ID +- x86/amd_nb: Make hygon_nb_misc_ids static +- i2c-piix4: Add Hygon Dhyana SMBus support +- x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die processors +- hwmon: (k10temp) Add Hygon Dhyana support +- tools/cpupower: Add Hygon Dhyana support +- EDAC, amd64: Add Hygon Dhyana support +- cpufreq: Add Hygon Dhyana support +- ACPI: Add Hygon Dhyana support +- x86/xen: Add Hygon Dhyana support to Xen +- x86/kvm: Add Hygon Dhyana support to KVM +- x86/mce: Add Hygon Dhyana support to the MCA infrastructure +- x86/bugs: Add Hygon Dhyana to the respective mitigation machinery +- x86/apic: Add Hygon Dhyana support +- x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge +- x86/amd_nb: Check vendor in AMD-only functions +- x86/alternative: Init ideal_nops for Hygon Dhyana +- x86/events: Add Hygon Dhyana support to PMU infrastructure +- x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana +- x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number +- x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana +- x86/cpu: Create Hygon Dhyana architecture support file +- kvm: debugfs: aarch64 export cpu time related items to debugfs +- kvm: debugfs: export remaining aarch64 kvm exit reasons to debugfs +- kvm: debugfs: Export vcpu stat via debugfs +- kvm: fix compile error when including linux/kvm.h +- kvm: arm64: add KVM_CAP_ARM_CPU_FEATURE extension +- kvm: arm64: make ID registers configurable +- kvm: arm64: emulate the ID registers +- arm64: add a helper function to traverse arm64_ftr_regs +- xen/events: defer eoi in case of excessive number of events +- xen/events: use a common cpu hotplug hook for event channels +- xen/events: switch user event channels to lateeoi model +- xen/events: add a new "late EOI" evtchn framework +- xen/events: avoid removing an event channel while handling it +- net/hinic: update hinic version to 2.3.2.16 +- net/hinic: Allowed to send commands when only hot activation of ucode +- net/hinic: Fix ethtool loopback test failure +- net/hinic: VF is not allowed to configure global resources +- net/hinic: Allow to remove administratively set MAC on VFs +- net/hinic: Fix the driver does not report an error when setting MAC fails +- Bluetooth: MGMT: Fix not checking if BT_HS is enabled +- Bluetooth: Disable High Speed by default +- Bluetooth: L2CAP: Fix calling sk_filter on non-socket based channel +- Bluetooth: A2MP: Fix not initializing all members +- perf/core: Fix race in the perf_mmap_close() function +- geneve: add transport ports in route lookup for geneve +- ext4: only set last error block when check system zone failed +- xfs: Fix tail rounding in xfs_alloc_file_space() +- KEYS: reaching the keys quotas correctly +- serial: 8250: Avoid error message on reprobe +- mm: memcg: fix memcg reclaim soft lockup +- mm/thp: fix __split_huge_pmd_locked() for migration PMD +- kprobes: fix kill kprobe which has been marked as gone +- percpu: fix first chunk size calculation for populated bitmap +- spi: Fix memory leak on splited transfers +- nvme-rdma: cancel async events before freeing event struct +- nvme-fc: cancel async events before freeing event struct +- NFS: Zero-stateid SETATTR should first return delegation +- scsi: target: iscsi: Fix hang in iscsit_access_np() when getting tpg->np_login_sem +- scsi: target: iscsi: Fix data digest calculation +- xfs: initialize the shortform attr header padding entry +- block: ensure bdi->io_pages is always initialized +- dm writecache: handle DAX to partitions on persistent memory correctly +- libata: implement ATA_HORKAGE_MAX_TRIM_128M and apply to Sandisks +- uaccess: Add non-pagefault user-space write function +- uaccess: Add non-pagefault user-space read functions +- xfs: don't update mtime on COW faults +- include/linux/log2.h: add missing () around n in roundup_pow_of_two() +- perf jevents: Fix suspicious code in fixregex() +- xfs: fix xfs_bmap_validate_extent_raw when checking attr fork of rt files +- fix regression in "epoll: Keep a reference on files added to the check list" +- perf tools: Correct SNOOPX field offset +- cpuidle: Fixup IRQ state +- tpm: Unify the mismatching TPM space buffer sizes +- device property: Fix the secondary firmware node handling in set_primary_fwnode() +- PM: sleep: core: Fix the handling of pending runtime resume requests +- writeback: Fix sync livelock due to b_dirty_time processing +- writeback: Avoid skipping inode writeback +- writeback: Protect inode->i_io_list with inode->i_lock +- serial: 8250: change lock order in serial8250_do_startup() +- serial: 8250_exar: Fix number of ports for Commtech PCIe cards +- serial: pl011: Don't leak amba_ports entry on driver register error +- serial: pl011: Fix oops on -EPROBE_DEFER +- vt_ioctl: change VT_RESIZEX ioctl to check for error return from vc_resize() +- vt: defer kfree() of vc_screenbuf in vc_do_resize() +- blk-mq: order adding requests to hctx->dispatch and checking SCHED_RESTART +- fs: prevent BUG_ON in submit_bh_wbc() +- ext4: handle option set by mount flags correctly +- ext4: handle read only external journal device +- ext4: don't BUG on inconsistent journal feature +- jbd2: make sure jh have b_transaction set in refile/unfile_buffer +- scsi: fcoe: Memory leak fix in fcoe_sysfs_fcf_del() +- scsi: iscsi: Do not put host in iscsi_set_flashnode_param() +- locking/lockdep: Fix overflow in presentation of average lock-time +- PCI: Fix pci_create_slot() reference count leak +- xfs: Don't allow logging of XFS_ISTALE inodes +- iommu/iova: Don't BUG on invalid PFNs +- mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible +- do_epoll_ctl(): clean the failure exits up a bit +- epoll: Keep a reference on files added to the check list +- efi: add missed destroy_workqueue when efisubsys_init fails +- RDMA/bnxt_re: Do not add user qps to flushlist +- vfio/type1: Add proper error unwind for vfio_iommu_replay() +- fs/signalfd.c: fix inconsistent return codes for signalfd4 +- xfs: Fix UBSAN null-ptr-deref in xfs_sysfs_init +- virtio_ring: Avoid loop when vq is broken in virtqueue_poll +- xfs: fix inode quota reservation checks +- scsi: target: tcmu: Fix crash in tcmu_flush_dcache_range on ARM +- spi: Prevent adding devices below an unregistering controller +- jbd2: add the missing unlock_buffer() in the error path of jbd2_write_superblock() +- ext4: fix checking of directory entry validity for inline directories +- mm, page_alloc: fix core hung in free_pcppages_bulk() +- mm: include CMA pages in lowmem_reserve at boot +- kernel/relay.c: fix memleak on destroy relay channel +- khugepaged: adjust VM_BUG_ON_MM() in __khugepaged_enter() +- khugepaged: khugepaged_test_exit() check mmget_still_valid() +- perf probe: Fix memory leakage when the probe point is not found +- xfs: fix duplicate verification from xfs_qm_dqflush() +- xfs: reset buffer write failure state on successful completion +- xfs: fix partially uninitialized structure in xfs_reflink_remap_extent +- xfs: clear PF_MEMALLOC before exiting xfsaild thread +- xfs: acquire superblock freeze protection on eofblocks scans +- xfs: Fix deadlock between AGI and AGF with RENAME_WHITEOUT +- macvlan: validate setting of multiple remote source MAC addresses +- blk-mq: insert flush request to the front of dispatch queue +- blk-mq: Rerun dispatching in the case of budget contention +- blk-mq: Add blk_mq_delay_run_hw_queues() API call +- blk-mq: In blk_mq_dispatch_rq_list() "no budget" is a reason to kick +- blk-mq: Put driver tag in blk_mq_dispatch_rq_list() when no budget +- blk-mq: insert passthrough request into hctx->dispatch directly +- arm64/ascend: Fix register_persistent_clock definition +- net: add __must_check to skb_put_padto() +- netfilter: nf_tables: incorrect enum nft_list_attributes definition +- tcp_bbr: adapt cwnd based on ack aggregation estimation +- tcp_bbr: refactor bbr_target_cwnd() for general inflight provisioning +- ipv4: Update exception handling for multipath routes via same device +- tipc: use skb_unshare() instead in tipc_buf_append() +- tipc: fix shutdown() of connection oriented socket +- tipc: Fix memory leak in tipc_group_create_member() +- ipv6: avoid lockdep issue in fib6_del() +- ip: fix tos reflection in ack and reset packets +- af_key: pfkey_dump needs parameter validation +- SUNRPC: stop printk reading past end of string +- net: handle the return value of pskb_carve_frag_list() correctly +- net/mlx5e: Don't support phys switch id if not in switchdev mode +- net: disable netpoll on fresh napis +- tipc: fix shutdown() of connectionless socket +- sctp: not disable bh in the whole sctp_get_port_local() +- net: ethernet: mlx4: Fix memory allocation in mlx4_buddy_init() +- netfilter: nfnetlink: nfnetlink_unicast() reports EAGAIN instead of ENOBUFS +- netfilter: nf_tables: fix destination register zeroing +- netfilter: nf_tables: add NFTA_SET_USERDATA if not null +- scsi: fcoe: Fix I/O path allocation +- ipvlan: fix device features +- tipc: fix uninit skb->data in tipc_nl_compat_dumpit() +- net: Fix potential wrong skb->protocol in skb_vlan_untag() +- gre6: Fix reception with IP6_TNL_F_RCV_DSCP_COPY +- bonding: fix active-backup failover for current ARP slave +- bonding: fix a potential double-unregister +- bonding: show saner speed for broadcast mode +- i40e: Fix crash during removing i40e driver +- i40e: Set RX_ONLY mode for unicast promiscuous on VLAN +- svcrdma: Fix another Receive buffer leak +- net/compat: Add missing sock updates for SCM_RIGHTS +- net: initialize fastreuse on inet_inherit_port +- net: refactor bind_bucket fastreuse into helper +- net/tls: Fix kmap usage +- net: Set fput_needed iff FDPUT_FPUT is set +- af_packet: TPACKET_V3: fix fill status rwlock imbalance +- ipvs: allow connection reuse for unconfirmed conntrack +- xfrm: Fix crash when the hold queue is used. +- net sched: fix reporting the first-time use timestamp +- IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads +- fib: add missing attribute validation for tun_id +- net/mlx5: Fix mlx5_ifc_query_lag_out_bits +- mpls: fix warning with multi-label encap +- hdlc_ppp: add range checks in ppp_cp_parse_cr() +- spi/ascend: Add spi-cpld to device tree compatibility list +- net: hns3: update hns3 version to 1.9.38.8 +- net: hns3: modify the sensitive words +- block: allow for_each_bvec to support zero len bvec +- HID: hid-input: clear unmapped usages +- net/nfc/rawsock.c: add CAP_NET_RAW check. +- arm64/ascend: Implement the read_persistend_clock64 for aarch64 +- ext4: clear buffer verified flag if read metadata from disk +- ext4: Fix bdev write error check failed when mount fs with ro +- loop: Report EOPNOTSUPP properly + +* Wed Sep 23 2020 Yang Yingliang - 4.19.90-2009.3.0.0045 +- acpi/arm64: check the returned logical CPU number of 'acpi_map_cpuid()' +- staging: most: net: fix buffer overflow +- block: Do not discard buffers under a mounted filesystem +- block: refactor bd_start_claiming +- fs: Don't invalidate page buffers in block_write_full_page() +- ilp32: fix compile problem when ARM64_ILP32 and UBSAN are both enabled +- locking/percpu-rwsem: use this_cpu_{inc|dec}() for read_count +- scsi: libsas: Set data_dir as DMA_NONE if libata marks qc as NODATA +- Btrfs: fix selftests failure due to uninitialized i_mode in test inodes +- btrfs: inode: Verify inode mode to avoid NULL pointer dereference +- drm/ttm: fix incrementing the page pointer for huge pages +- drm/ttm: fix start page for huge page check in ttm_put_pages() +- media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors +- fbcon: remove now unusued 'softback_lines' cursor() argument +- fbcon: remove soft scrollback code +- mm/hugetlb: fix a race between hugetlb sysctl handlers +- nfs: Fix getxattr kernel panic and memory overflow +- net/packet: fix overflow in tpacket_rcv +- net/packet: make tp_drops atomic +- ext4: fix potential negative array index in do_split() +- rbd: require global CAP_SYS_ADMIN for mapping and unmapping +- xfs: fix boundary test in xfs_attr_shortform_verify +- xfs: use the latest extent at writeback delalloc conversion time +- xfs: validate writeback mapping using data fork seq counter +- xfs: create delalloc bmapi wrapper for full extent allocation +- xfs: refactor AGI unlinked bucket updates +- xfs: add xfs_verify_agino_or_null helper +- xfs: clean up iunlink functions +- arm64/ascend: enable ascend features for Ascend910 platform +- arm64/ascend: Add auto tuning hugepage module +- arm64/ascend: Enable CONFIG_ASCEND_AUTO_TUNING_HUGEPAGE for hulk_defconfig +- arm64/ascend: Notifier will return a freed val to indecate print logs +- arm64/ascend: Add hugepage flags change interface +- arm64/ascend: Add set hugepage number helper function +- arm64/ascend: Add mmap hook when alloc hugepage +- arm64/ascend: Add new CONFIG for auto-tuning hugepage +- dm thin metadata: Fix use-after-free in dm_bm_set_read_only +- dm thin metadata: Avoid returning cmd->bm wild pointer on error +- dm cache metadata: Avoid returning cmd->bm wild pointer on error +- watchdog: Enable CONFIG_ASCEND_WATCHDOG_SYSFS_CONFIGURE in hulk_defconfig +- watchdog: Add interface to config timeout and pretimeout in sysfs +- mm/swapfile: fix and annotate various data races +- serial: 8250: fix null-ptr-deref in serial8250_start_tx() +- timekeeping: Prevent 32bit truncation in scale64_check_overflow() +- lib : kobject: fix refcount imblance on kobject_rename +- genirq/debugfs: Add missing sanity checks to interrupt injection +- ovl: fix WARN_ON nlink drop to zero +- ovl: fix some xino configurations +- ovl: fix corner case of non-constant st_dev; st_ino +- ovl: fix corner case of conflicting lower layer uuid +- ovl: generalize the lower_fs[] array +- ovl: simplify ovl_same_sb() helper +- ovl: generalize the lower_layers[] array +- ovl: fix lookup failure on multi lower squashfs +- fat: don't allow to mount if the FAT length == 0 +- serial: amba-pl011: Make sure we initialize the port.lock spinlock +- perf top: Fix wrong hottest instruction highlighted +- xfs: prohibit fs freezing when using empty transactions +- xfs: Use scnprintf() for avoiding potential buffer overflow +- xfs: use bitops interface for buf log item AIL flag check +- xfs: fix some memory leaks in log recovery +- xfs: convert EIO to EFSCORRUPTED when log contents are invalid +- xfs: fix inode fork extent count overflow +- nvme: fix memory leak caused by incorrect subsystem free +- nvme: fix possible deadlock when nvme_update_formats fails +- dm verity: don't prefetch hash blocks for already-verified data +- arm64: kprobes: Recover pstate.D in single-step exception handler +- nbd: fix possible page fault for nbd disk +- nbd: rename the runtime flags as NBD_RT_ prefixed +- jbd2: flush_descriptor(): Do not decrease buffer head's ref count +- Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues" +- ACPICA: Win OSL: Replace get_tick_count with get_tick_count64 +- ext4: avoid fetching btime in ext4_getattr() unless requested +- mm: pagewalk: fix termination condition in walk_pte_range() +- mm/huge_memory.c: use head to check huge zero page +- mm/page-writeback.c: improve arithmetic divisions +- mm/page-writeback.c: use div64_ul() for u64-by-unsigned-long divide +- PCI: PM/ACPI: Refresh all stale power state data in pci_pm_complete() +- ACPI: PM: Fix regression in acpi_device_set_power() +- ACPI: PM: Allow transitions to D0 to occur in special cases +- ACPI: PM: Avoid evaluating _PS3 on transitions from D3hot to D3cold +- iommu/arm-smmu: Mark expected switch fall-through +- efi/memreserve: Register reservations as 'reserved' in /proc/iomem +- compat_ioctl: handle SIOCOUTQNSD +- mm: slub: fix conversion of freelist_corrupted() +- khugepaged: retract_page_tables() remember to test exit +- kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler +- ftrace: Setup correct FTRACE_FL_REGS flags for module +- mm/page_counter.c: fix protection usage propagation +- driver core: Avoid binding drivers to dead devices +- genirq/affinity: Make affinity setting if activated opt-in +- mm/mmap.c: Add cond_resched() for exit_mmap() CPU stalls +- sched: correct SD_flags returned by tl->sd_flags() +- sched/fair: Fix NOHZ next idle balance +- xattr: break delegations in {set, remove}xattr +- firmware: Fix a reference count leak. +- ext4: fix direct I/O read error +- arm64: csum: Fix handling of bad packets +- arm64/alternatives: move length validation inside the subsection +- bpf: Fix map leak in HASH_OF_MAPS map +- dm integrity: fix integrity recalculation that is improperly skipped +- io-mapping: indicate mapping failure +- vt: Reject zero-sized screen buffer size. +- fuse: fix weird page warning +- printk: queue wake_up_klogd irq_work only if per-CPU areas are ready +- genirq/affinity: Handle affinity setting on inactive interrupts correctly +- sched/fair: handle case of task_h_load() returning 0 +- sched: Fix unreliable rseq cpu_id for new tasks +- timer: Fix wheel index calculation on last level +- timer: Prevent base->clk from moving backward +- uio_pdrv_genirq: fix use without device tree and no interrupt +- fuse: Fix parameter for FS_IOC_{GET, SET}FLAGS +- ovl: fix unneeded call to ovl_change_flags() +- ovl: relax WARN_ON() when decoding lower directory file handle +- ovl: inode reference leak in ovl_is_inuse true case. +- arm64/alternatives: don't patch up internal branches +- arm64/alternatives: use subsections for replacement sequences +- block: release bip in a right way in error path +- cifs: update ctime and mtime during truncate +- dm zoned: assign max_io_len correctly +- virtio-blk: free vblk-vqs in error path of virtblk_probe() +- mm/slub: fix stack overruns with SLUB_STATS +- mm/slub.c: fix corrupted freechain in deactivate_slab() +- mm: fix swap cache node allocation mask +- dm writecache: add cond_resched to loop in persistent_memory_claim() +- dm writecache: correct uncommitted_block when discarding uncommitted entry +- ring-buffer: Zero out time extend if it is nested and not absolute +- mm/slab: use memzero_explicit() in kzfree() +- sched/core: Fix PI boosting between RT and DEADLINE tasks +- sched/deadline: Initialize ->dl_boosted +- efi/esrt: Fix reference count leak in esre_create_sysfs_entry. +- loop: replace kill_bdev with invalidate_bdev +- fanotify: fix ignore mask logic for events on child and on dir +- md: add feature flag MD_FEATURE_RAID0_LAYOUT +- kretprobe: Prevent triggering kretprobe from within kprobe_flush_task +- ext4: avoid race conditions when remounting with options that change dax +- ext4: fix partial cluster initialization when splitting extent +- selinux: fix double free +- arm64: hw_breakpoint: Don't invoke overflow handler on uaccess watchpoints +- lib/zlib: remove outdated and incorrect pre-increment optimization +- vfio/mdev: Fix reference count leak in add_mdev_supported_type +- PCI: dwc: Fix inner MSI IRQ domain registration +- dm zoned: return NULL if dmz_get_zone_for_reclaim() fails to find a zone +- ipmi: use vzalloc instead of kmalloc for user creation +- PCI: Fix pci_register_host_bridge() device_register() error handling +- drivers: base: Fix NULL pointer exception in __platform_driver_probe() if a driver developer is foolish +- scsi: sr: Fix sr_probe() missing deallocate of device minor +- vfio/pci: fix memory leaks in alloc_perm_bits() +- PCI: Allow pci_resize_resource() for devices on root bus +- ipmi: fix sleep-in-atomic in free_user at cleanup SRCU user->release_barrier +- Revert "ipmi: fix sleep-in-atomic in free_user at cleanup SRCU user->release_barrier" +- kernel/cpu_pm: Fix uninitted local in cpu_pm +- ext4: fix race between ext4_sync_parent() and rename() +- ext4: fix EXT_MAX_EXTENT/INDEX to check for zeroed eh_max +- mm: initialize deferred pages with interrupts enabled +- cpuidle: Fix three reference count leaks +- spi: dw: Return any value retrieved from the dma_transfer callback +- PCI: Don't disable decoding when mmio_always_on is set +- sched/core: Fix illegal RCU from offline CPUs +- audit: fix a net reference leak in audit_list_rules_send() +- audit: fix a net reference leak in audit_send_reply() +- spi: dw: Fix Rx-only DMA transfers +- spi: dw: Enable interrupts in accordance with DMA xfer mode +- arm64: insn: Fix two bugs in encoding 32-bit logical immediates +- spi: dw: Zero DMA Tx and Rx configurations on stack +- perf: Add cond_resched() to task_function_call() +- mm/slub: fix a memory leak in sysfs_slab_add() +- proc: Use new_inode not new_inode_pseudo +- ovl: initialize error in ovl_copy_xattr +- spi: Fix controller unregister order +- spi: No need to assign dummy value in spi_unregister_controller() +- spi: dw: Fix controller unregister order +- ACPI: CPPC: Fix reference count leak in acpi_cppc_processor_probe() +- ACPI: sysfs: Fix reference count leak in acpi_sysfs_add_hotplug_profile() +- efi/efivars: Add missing kobject_put() in sysfs entry creation error path +- aio: fix async fsync creds +- mm: add kvfree_sensitive() for freeing sensitive data objects +- sched/fair: Don't NUMA balance for kthreads +- lib: Reduce user_access_begin() boundaries in strncpy_from_user() and strnlen_user() +- tun: correct header offsets in napi frags mode +- spi: dw: use "smp_mb()" to avoid sending spi data error +- Revert "cgroup: Add memory barriers to plug cgroup_rstat_updated() race window" +- iommu: Fix reference count leak in iommu_group_alloc. +- mm: remove VM_BUG_ON(PageSlab()) from page_mapcount() +- exec: Always set cap_ambient in cap_bprm_set_creds +- padata: purge get_cpu and reorder_via_wq from padata_do_serial +- padata: initialize pd->cpu with effective cpumask +- padata: Replace delayed timer with immediate workqueue in padata_reorder +- fix multiplication overflow in copy_fdtable() +- exec: Move would_dump into flush_old_exec +- cifs: fix leaked reference on requeued write +- arm64: fix the flush_icache_range arguments in machine_kexec +- NFSv4: Fix fscache cookie aux_data to ensure change_attr is included +- nfs: fscache: use timespec64 in inode auxdata +- NFS: Fix fscache super_cookie index_key from changing after umount +- ipc/util.c: sysvipc_find_ipc() incorrectly updates position index +- net: phy: fix aneg restart in phy_ethtool_set_eee +- virtio-blk: handle block_device_operations callbacks after hot unplug +- shmem: fix possible deadlocks on shmlock_user_lock +- ipc/mqueue.c: change __do_notify() to bypass check_kill_permission() +- coredump: fix crash when umh is disabled +- mm/page_alloc: fix watchdog soft lockups during set_zone_contiguous() +- arm64: hugetlb: avoid potential NULL dereference +- cifs: protect updating server->dstaddr with a spinlock +- vfio: avoid possible overflow in vfio_iommu_type1_pin_pages +- propagate_one(): mnt_set_mountpoint() needs mount_lock +- ext4: check for non-zero journal inum in ext4_calculate_overhead +- ext4: convert BUG_ON's to WARN_ON's in mballoc.c +- ext4: increase wait time needed before reuse of deleted inode numbers +- ext4: use matching invalidatepage in ext4_writepage +- mm: shmem: disable interrupt when acquiring info->lock in userfaultfd_copy path +- perf/core: fix parent pid/tid in task exit events +- vt: don't hardcode the mem allocation upper bound +- audit: check the length of userspace generated audit records +- tpm/tpm_tis: Free IRQ if probing fails +- mm/ksm: fix NULL pointer dereference when KSM zero page is enabled +- mm/hugetlb: fix a addressing exception caused by huge_pte_offset +- vmalloc: fix remap_vmalloc_range() bounds checks +- KEYS: Avoid false positive ENOMEM error on key read +- loop: Better discard support for block devices +- ipc/util.c: sysvipc_find_ipc() should increase position index +- scsi: iscsi: Report unbind session event when the target has been removed +- watchdog: reset last_hw_keepalive time at start +- ext4: fix extent_status fragmentation for plain files +- bpf: fix buggy r0 retval refinement for tracing helpers +- NFS: Fix memory leaks in nfs_pageio_stop_mirroring() +- percpu_counter: fix a data race at vm_committed_as +- cifs: Allocate encryption header through kmalloc +- ext4: do not commit super on read-only bdev +- NFS: direct.c: Fix memory leak of dreq when nfs_get_lock_context fails +- irqchip/mbigen: Free msi_desc on device teardown +- ext4: use non-movable memory for superblock readahead +- mm/vmalloc.c: move 'area->pages' after if statement +- ext4: do not zeroout extents beyond i_disksize +- tracing: Fix the race between registering 'snapshot' event trigger and triggering 'snapshot' operation +- keys: Fix proc_keys_next to increase position index +- ext4: fix incorrect inodes per group in error message +- ext4: fix incorrect group count in ext4_fill_super error message +- ovl: fix value of i_ino for lower hardlink corner case +- dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone() +- ipmi: fix hung processes in __get_guid() +- libata: Return correct status in sata_pmp_eh_recover_pm() when ATA_DFLAG_DETACH is set +- kmod: make request_module() return an error when autoloading is disabled +- NFS: Fix a page leak in nfs_destroy_unlinked_subrequests() +- dm verity fec: fix memory leak in verity_fec_dtr +- dm writecache: add cond_resched to avoid CPU hangs +- mm: Use fixed constant in page_frag_alloc instead of size + 1 +- tpm: tpm2_bios_measurements_next should increase position index +- tpm: tpm1_bios_measurements_next should increase position index +- tpm: Don't make log failures fatal +- PCI/ASPM: Clear the correct bits when enabling L1 substates +- md: check arrays is suspended in mddev_detach before call quiesce operations +- irqchip/gic-v4: Provide irq_retrigger to avoid circular locking dependency +- block: Fix use-after-free issue accessing struct io_cq +- genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy() +- libata: Remove extra scsi_host_put() in ata_scsi_add_hosts() +- sched: Avoid scale real weight down to zero +- block: keep bdi->io_pages in sync with max_sectors_kb for stacked devices +- firmware: arm_sdei: fix double-lock on hibernate with shared events +- arm64: Fix size of __early_cpu_boot_status +- random: always use batched entropy for get_random_u{32, 64} +- padata: always acquire cpu_hotplug_lock before pinst->lock +- bpf: Explicitly memset some bpf info structures declared on the stack +- bpf: Explicitly memset the bpf_attr structure +- libfs: fix infoleak in simple_attr_read() +- bpf/btf: Fix BTF verification of enum members in struct/union +- genirq: Fix reference leaks on irq affinity notifiers +- scsi: sd: Fix optimal I/O size for devices that change reported values +- scsi: ipr: Fix softlockup when rescanning devices in petitboot +- nfs: add minor version to nfs_server_key for fscache +- arm64: smp: fix crash_smp_send_stop() behaviour +- arm64: smp: fix smp_send_stop() behaviour +- mm, slub: prevent kmalloc_node crashes and memory leaks +- mm: slub: be more careful about the double cmpxchg of freelist +- block, bfq: fix overwrite of bfq_group pointer in bfq_find_set_group() +- mm: slub: add missing TID bump in kmem_cache_alloc_bulk() +- driver core: Fix creation of device links with PM-runtime flags +- driver core: Remove device link creation limitation +- driver core: Add device link flag DL_FLAG_AUTOPROBE_CONSUMER +- driver core: Make driver core own stateful device links +- driver core: Fix adding device links to probing suppliers +- driver core: Remove the link if there is no driver with AUTO flag +- jbd2: fix data races at struct journal_head +- signal: avoid double atomic counter increments for user accounting +- cifs_atomic_open(): fix double-put on late allocation failure +- workqueue: don't use wq_select_unbound_cpu() for bound works +- virtio-blk: fix hw_queue stopped on arbitrary error +- dm writecache: verify watermark during resume +- dm: report suspended device during destroy +- dm cache: fix a crash due to incorrect work item cancelling +- mm: fix possible PMD dirty bit lost in set_pmd_migration_entry() +- mm, numa: fix bad pmd by atomically check for pmd_trans_huge when marking page tables prot_numa +- cifs: don't leak -EAGAIN for stat() during reconnect +- audit: always check the netlink payload length in audit_receive_msg() +- audit: fix error handling in audit_data_to_entry() +- ext4: potential crash on allocation error in ext4_alloc_flex_bg_array() +- cifs: Fix mode output in debugging statements +- ipmi:ssif: Handle a possible NULL pointer reference +- irqchip/gic-v3-its: Fix misuse of GENMASK macro +- ata: ahci: Add shutdown to freeze hardware resources of ahci +- bpf, offload: Replace bitwise AND by logical AND in bpf_prog_offload_info_fill +- genirq/proc: Reject invalid affinity masks (again) +- ext4: fix race between writepages and enabling EXT4_EXTENTS_FL +- ext4: rename s_journal_flag_rwsem to s_writepages_rwsem +- ext4: fix mount failure with quota configured as module +- ext4: fix potential race between s_flex_groups online resizing and access +- ext4: fix potential race between s_group_info online resizing and access +- ext4: fix potential race between online resizing and write operations +- ext4: fix a data race in EXT4_I(inode)->i_disksize +- genirq/irqdomain: Make sure all irq domain flags are distinct +- Revert "ipc, sem: remove uneeded sem_undo_list lock usage in exit_sem()" +- jbd2: fix ocfs2 corrupt when clearing block group bits +- vt: vt_ioctl: fix race in VT_RESIZEX +- vt: fix scrollback flushing on background consoles +- NFS: Fix memory leaks +- brd: check and limit max_part par +- irqchip/gic-v3-its: Reference to its_invall_cmd descriptor when building INVALL +- irqchip/gic-v3: Only provision redistributors that are enabled in ACPI +- bpf: map_seq_next should always increase position index +- cifs: fix NULL dereference in match_prepath +- driver core: platform: fix u32 greater or equal to zero comparison +- irqchip/mbigen: Set driver .suppress_bind_attrs to avoid remove problems +- module: avoid setting info->name early in case we can fall back to info->mod->name +- watchdog/softlockup: Enforce that timestamp is valid on boot +- arm64: fix alternatives with LLVM's integrated assembler +- scsi: iscsi: Don't destroy session if there are outstanding connections +- iommu/arm-smmu-v3: Use WRITE_ONCE() when changing validity of an STE +- driver core: platform: Prevent resouce overflow from causing infinite loops +- selinux: ensure we cleanup the internal AVC counters on error in avc_update() +- selinux: ensure we cleanup the internal AVC counters on error in avc_insert() +- jbd2: clear JBD2_ABORT flag before journal_reset to update log tail info when load journal +- uio: fix a sleep-in-atomic-context bug in uio_dmem_genirq_irqcontrol() +- ext4: fix ext4_dax_read/write inode locking sequence for IOCB_NOWAIT +- cpu/hotplug, stop_machine: Fix stop_machine vs hotplug order +- nvme: fix the parameter order for nvme_get_log in nvme_get_fw_slot_info +- arm64: ssbs: Fix context-switch when SSBS is present on all CPUs +- ext4: improve explanation of a mount failure caused by a misconfigured kernel +- ext4: fix checksum errors with indexed dirs +- ext4: don't assume that mmp_nodename/bdevname have NUL +- arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly +- arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly +- padata: fix null pointer deref of pd->pinst +- arm64: ptrace: nofpsimd: Fail FP/SIMD regset operations +- arm64: cpufeature: Fix the type of no FP/SIMD capability +- NFSv4: try lease recovery on NFS4ERR_EXPIRED +- NFS: Revalidate the file size on a fatal write error +- nfs: NFS_SWAP should depend on SWAP +- PCI: Don't disable bridge BARs when assigning bus resources +- perf/core: Fix mlock accounting in perf_mmap() +- clocksource: Prevent double add_timer_on() for watchdog_timer +- x86/apic/msi: Plug non-maskable MSI affinity race +- mm/page_alloc.c: fix uninitialized memmaps on a partially populated last section +- mm: return zero_resv_unavail optimization +- mm: zero remaining unavailable struct pages +- ext4: fix deadlock allocating crypto bounce page from mempool +- aio: prevent potential eventfd recursion on poll +- eventfd: track eventfd_signal() recursion depth +- watchdog: fix UAF in reboot notifier handling in watchdog core code +- jbd2_seq_info_next should increase position index +- NFS: Directory page cache pages need to be locked when read +- NFS: Fix memory leaks and corruption in readdir +- padata: Remove broken queue flushing +- dm writecache: fix incorrect flush sequence when doing SSD mode commit +- dm: fix potential for q->make_request_fn NULL pointer +- dm crypt: fix benbi IV constructor crash if used in authenticated mode +- dm space map common: fix to ensure new block isn't already in use +- dm zoned: support zone sizes smaller than 128MiB +- ovl: fix wrong WARN_ON() in ovl_cache_update_ino() +- alarmtimer: Unregister wakeup source when module get fails +- irqdomain: Fix a memory leak in irq_domain_push_irq() +- rcu: Avoid data-race in rcu_gp_fqs_check_wake() +- ipc/msg.c: consolidate all xxxctl_down() functions +- kernel/module: Fix memleak in module_add_modinfo_attrs() +- mm/migrate.c: also overwrite error when it is bigger than zero +- mm/memory_hotplug: shrink zones when offlining memory +- mm/memory_hotplug: fix try_offline_node() +- mm/memunmap: don't access uninitialized memmap in memunmap_pages() +- drivers/base/node.c: simplify unregister_memory_block_under_nodes() +- mm/hotplug: kill is_dev_zone() usage in __remove_pages() +- mm/memory_hotplug: remove "zone" parameter from sparse_remove_one_section +- mm/memory_hotplug: make unregister_memory_block_under_nodes() never fail +- mm/memory_hotplug: remove memory block devices before arch_remove_memory() +- mm/memory_hotplug: create memory block devices after arch_add_memory() +- drivers/base/memory: pass a block_id to init_memory_block() +- mm/memory_hotplug: allow arch_remove_memory() without CONFIG_MEMORY_HOTREMOVE +- s390x/mm: implement arch_remove_memory() +- mm/memory_hotplug: make __remove_pages() and arch_remove_memory() never fail +- powerpc/mm: Fix section mismatch warning +- mm/memory_hotplug: make __remove_section() never fail +- mm/memory_hotplug: make unregister_memory_section() never fail +- mm, memory_hotplug: update a comment in unregister_memory() +- drivers/base/memory.c: clean up relics in function parameters +- mm/memory_hotplug: release memory resource after arch_remove_memory() +- mm, memory_hotplug: add nid parameter to arch_remove_memory +- drivers/base/memory.c: remove an unnecessary check on NR_MEM_SECTIONS +- mm, sparse: pass nid instead of pgdat to sparse_add_one_section() +- mm, sparse: drop pgdat_resize_lock in sparse_add/remove_one_section() +- arm64/mm: add temporary arch_remove_memory() implementation +- s390x/mm: fail when an altmap is used for arch_add_memory() +- mm/memory_hotplug: simplify and fix check_hotplug_memory_range() +- scsi: iscsi: Avoid potential deadlock in iscsi_if_rx func +- sd: Fix REQ_OP_ZONE_REPORT completion handling +- tun: add mutex_unlock() call and napi.skb clearing in tun_get_user() +- bpf: fix BTF limits +- scsi: libfc: fix null pointer dereference on a null lport +- iommu: Use right function to get group for device +- NFSv4/flexfiles: Fix invalid deref in FF_LAYOUT_DEVID_NODE() +- NFS: Add missing encode / decode sequence_maxsz to v4.2 operations +- driver core: Fix PM-runtime for links added during consumer probe +- driver core: Fix possible supplier PM-usage counter imbalance +- net: phy: fixed_phy: Fix fixed_phy not checking GPIO +- driver core: Do not call rpm_put_suppliers() in pm_runtime_drop_link() +- driver core: Fix handling of runtime PM flags in device_link_add() +- driver core: Do not resume suppliers under device_links_write_lock() +- driver core: Avoid careless re-use of existing device links +- driver core: Fix DL_FLAG_AUTOREMOVE_SUPPLIER device link flag handling +- Revert "efi: Fix debugobjects warning on 'efi_rts_work'" +- scsi: core: scsi_trace: Use get_unaligned_be*() +- scsi: sd: enable compat ioctls for sed-opal +- NFSv4.x: Drop the slot if nfs4_delegreturn_prepare waits for layoutreturn +- NFSv2: Fix a typo in encode_sattr() +- scsi: sd: Clear sdkp->protection_type if disk is reformatted without PI +- scsi: enclosure: Fix stale device oops with hot replug +- xprtrdma: Fix completion wait during device removal +- xprtrdma: Fix use-after-free in rpcrdma_post_recvs +- tcp: cache line align MAX_TCP_HEADER +- svcrdma: Fix trace point use-after-free race +- net: stricter validation of untrusted gso packets +- net: bridge: enfore alignment for ethernet address +- net: use correct this_cpu primitive in dev_recursion_level +- net: core: reduce recursion limit value +- ipv4: fill fl4_icmp_{type, code} in ping_v4_sendmsg +- net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb +- vlan: consolidate VLAN parsing code and limit max parsing depth +- svcrdma: Fix page leak in svc_rdma_recv_read_chunk() +- i40e: Memory leak in i40e_config_iwarp_qvlist +- i40e: Fix of memory leak and integer truncation in i40e_virtchnl.c +- i40e: Wrong truncation from u16 to u8 +- i40e: add num_vectors checker in iwarp handler +- Revert "vxlan: fix tos value before xmit" +- openvswitch: Prevent kernel-infoleak in ovs_ct_put_key() +- net: gre: recompute gre csum for sctp over gre tunnels +- vxlan: Ensure FDB dump is performed under RCU +- ipv6: fix memory leaks on IPV6_ADDRFORM path +- ipv4: Silence suspicious RCU usage warning +- igb: reinit_locked() should be called with rtnl_lock +- net/mlx5e: fix bpf_prog reference count leaks in mlx5e_alloc_rq +- mlxsw: core: Free EMAD transactions using kfree_rcu() +- mlxsw: core: Increase scope of RCU read-side critical section +- mlx4: disable device on shutdown +- net/mlx5: Verify Hardware supports requested ptp function on a given pin +- rds: Prevent kernel-infoleak in rds_notify_queue_get() +- rtnetlink: Fix memory(net_device) leak when ->newlink fails +- udp: Improve load balancing for SO_REUSEPORT. +- udp: Copy has_conns in reuseport_grow(). +- sctp: shrink stream outq when fails to do addstream reconf +- sctp: shrink stream outq only when new outcnt < old outcnt +- tcp: allow at most one TLP probe per flight +- net: udp: Fix wrong clean up for IS_UDPLITE macro +- net-sysfs: add a newline when printing 'tx_timeout' by sysfs +- ip6_gre: fix null-ptr-deref in ip6gre_init_net() +- dev: Defer free of skbs in flush_backlog +- bonding: check return value of register_netdevice() in bond_newlink() +- ipvs: fix the connection sync failed in some cases +- mlxsw: destroy workqueue when trap_register in mlxsw_emad_init +- bonding: check error value of register_netdevice() immediately +- tipc: clean up skb list lock handling on send path +- libceph: don't omit recovery_deletes in target_copy() +- sched: consistently handle layer3 header accesses in the presence of VLANs +- tcp: md5: allow changing MD5 keys in all socket states +- tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers +- tcp: md5: do not send silly options in SYNCOOKIES +- tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key() +- tcp: make sure listeners don't initialize congestion-control state +- tcp: fix SO_RCVLOWAT possible hangs under high mem pressure +- net_sched: fix a memory leak in atm_tc_init() +- llc: make sure applications use ARPHRD_ETHER +- l2tp: remove skb_dst_set() from l2tp_xmit_skb() +- mlxsw: spectrum_router: Remove inappropriate usage of WARN_ON() +- i40e: protect ring accesses with READ- and WRITE_ONCE +- ixgbe: protect ring accesses with READ- and WRITE_ONCE +- SUNRPC: Properly set the @subbuf parameter of xdr_buf_subsegment() +- sunrpc: fixed rollback in rpc_gssd_dummy_populate() +- netfilter: ipset: fix unaligned atomic access +- xfrm: Fix double ESP trailer insertion in IPsec crypto offload. +- net: Do not clear the sock TX queue in sk_set_socket() +- net: Fix the arp error in some cases +- sch_cake: don't call diffserv parsing code when it is not needed +- tcp_cubic: fix spurious HYSTART_DELAY exit upon drop in min RTT +- sch_cake: fix a few style nits +- sch_cake: don't try to reallocate or unshare skb unconditionally +- ip_tunnel: fix use-after-free in ip_tunnel_lookup() +- ip6_gre: fix use-after-free in ip6gre_tunnel_lookup() +- tcp: grow window for OOO packets only for SACK flows +- tcp: don't ignore ECN CWR on pure ACK +- sctp: Don't advertise IPv4 addresses if ipv6only is set on the socket +- net: increment xmit_recursion level in dev_direct_xmit() +- net: place xmit recursion in softnet data +- net: fix memleak in register_netdevice() +- mld: fix memory leak in ipv6_mc_destroy_dev() +- net: sched: export __netdev_watchdog_up() +- net: core: device_rename: Use rwsem instead of a seqcount +- sched/rt, net: Use CONFIG_PREEMPTION.patch +- e1000e: Do not wake up the system via WOL if device wakeup is disabled +- xdp: Fix xsk_generic_xmit errno +- net/filter: Permit reading NET in load_bytes_relative when MAC not set +- net: sunrpc: Fix off-by-one issues in 'rpc_ntop6' +- igb: Report speed and duplex as unknown when device is runtime suspended +- e1000e: Relax condition to trigger reset for ME workaround +- e1000e: Disable TSO for buffer overrun workaround +- ixgbe: fix signed-integer-overflow warning +- macvlan: Skip loopback packets in RX handler +- net/mlx5e: IPoIB, Drop multicast packets that this interface sent +- netfilter: nft_nat: return EOPNOTSUPP if type or flags are not supported +- e1000: Distribute switch variables for initialization +- ixgbe: Fix XDP redirect on archs with PAGE_SIZE above 4K +- vxlan: Avoid infinite loop when suppressing NS messages with invalid options +- bridge: Avoid infinite loop when suppressing NS messages with invalid options +- ipv6: fix IPV6_ADDRFORM operation logic +- l2tp: do not use inet_hash()/inet_unhash() +- l2tp: add sk_family checks to l2tp_validate_socket +- devinet: fix memleak in inetdev_init() +- netfilter: nf_conntrack_pptp: fix compilation warning with W=1 build +- bonding: Fix reference count leak in bond_sysfs_slave_add. +- xsk: Add overflow check for u64 division, stored into u32 +- esp6: get the right proto for transport mode in esp6_gso_encap +- netfilter: nf_conntrack_pptp: prevent buffer overflows in debug code +- netfilter: nfnetlink_cthelper: unbreak userspace helper support +- netfilter: ipset: Fix subcounter update skip +- netfilter: nft_reject_bridge: enable reject with bridge vlan +- ip_vti: receive ipip packet by calling ip_tunnel_rcv +- vti4: eliminated some duplicate code. +- xfrm: fix a NULL-ptr deref in xfrm_local_error +- xfrm: fix a warning in xfrm_policy_insert_list +- xfrm interface: fix oops when deleting a x-netns interface +- xfrm: call xfrm_output_gso when inner_protocol is set in xfrm_output +- xfrm: allow to accept packets with ipv6 NEXTHDR_HOP in xfrm_input +- libceph: ignore pool overlay and cache logic on redirects +- mlxsw: spectrum: Fix use-after-free of split/unsplit/type_set in case reload fails +- net/mlx4_core: fix a memory leak bug. +- net/mlx5e: Update netdev txq on completions during closure +- sctp: Start shutdown on association restart if in SHUTDOWN-SENT state and socket is closed +- sctp: Don't add the shutdown timer if its already been added +- net/mlx5: Add command entry handling completion +- net: ipip: fix wrong address family in init error path +- net: inet_csk: Fix so_reuseport bind-address cache in tb->fast* +- __netif_receive_skb_core: pass skb by reference +- netfilter: nft_set_rbtree: Introduce and use nft_rbtree_interval_start() +- tcp: fix SO_RCVLOWAT hangs with fat skbs +- net: tcp: fix rx timestamp behavior for tcp_recvmsg +- net: ipv4: really enforce backoff for redirects +- tcp: fix error recovery in tcp_zerocopy_receive() +- Revert "ipv6: add mtu lock check in __ip6_rt_update_pmtu" +- net: fix a potential recursive NETDEV_FEAT_CHANGE +- drop_monitor: work around gcc-10 stringop-overflow warning +- netfilter: nf_osf: avoid passing pointer to local var +- netfilter: nat: never update the UDP checksum when it's 0 +- sctp: Fix bundling of SHUTDOWN with COOKIE-ACK +- net/mlx5: Fix command entry leak in Internal Error State +- net/mlx5: Fix forced completion access non initialized command entry +- tipc: fix partial topology connection closure +- sch_sfq: validate silly quantum values +- sch_choke: avoid potential panic in choke_reset() +- net_sched: sch_skbprio: add message validation to skbprio_change() +- net/mlx4_core: Fix use of ENOSPC around mlx4_counter_alloc() +- fq_codel: fix TCA_FQ_CODEL_DROP_BATCH_SIZE sanity checks +- cgroup, netclassid: remove double cond_resched +- sctp: Fix SHUTDOWN CTSN Ack in the peer restart case +- net/mlx5: Fix failing fw tracer allocation on s390 +- svcrdma: Fix leak of svc_rdma_recv_ctxt objects +- mlxsw: Fix some IS_ERR() vs NULL bugs +- vrf: Check skb for XFRM_TRANSFORMED flag +- xfrm: Always set XFRM_TRANSFORMED in xfrm{4, 6}_output_finish +- vrf: Fix IPv6 with qdisc and xfrm +- sched: etf: do not assume all sockets are full blown +- macvlan: fix null dereference in macvlan_device_event() +- ipv6: fix restrict IPV6_ADDRFORM operation +- ipv6: restrict IPV6_ADDRFORM operation +- arm64/ascend: Set mem_sleep_current to PM_SUSPEND_ON for ascend platform +- mm/swap_state: fix a data race in swapin_nr_pages +- arm64: secomp: fix the secure computing mode 1 syscall check for ilp32 +- vti4: removed duplicate log message. +- KEYS: Don't write out to userspace while holding key semaphore +- netfilter: nf_tables: report EOPNOTSUPP on unsupported flags/object type +- net: revert default NAPI poll timeout to 2 jiffies +- net: ipv6: do not consider routes via gateways for anycast address check +- net: ipv4: devinet: Fix crash when add/del multicast IP with autojoin +- mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE +- ipv6: don't auto-add link-local address to lag ports +- net: Fix Tx hash bound checking +- sctp: fix possibly using a bad saddr with a given dst +- sctp: fix refcount bug in sctp_wfree +- net, ip_tunnel: fix interface lookup with no key +- ipv4: fix a RCU-list lock in fib_triestat_seq_show +- vti6: Fix memory leak of skb if input policy check fails +- netfilter: nft_fwd_netdev: validate family and chain type +- netfilter: flowtable: reload ip{v6}h in nf_flow_tuple_ip{v6} +- xfrm: policy: Fix doulbe free in xfrm_policy_timer +- xfrm: add the missing verify_sec_ctx_len check in xfrm_add_acquire +- xfrm: fix uctx len check in verify_sec_ctx_len +- vti[6]: fix packet tx through bpf_redirect() in XinY cases +- xfrm: handle NETDEV_UNREGISTER for xfrm device +- ceph: check POOL_FLAG_FULL/NEARFULL in addition to OSDMAP_FULL/NEARFULL +- vxlan: check return value of gro_cells_init() +- tcp: repair: fix TCP_QUEUE_SEQ implementation +- net: ip_gre: Accept IFLA_INFO_DATA-less configuration +- net: ip_gre: Separate ERSPAN newlink / changelink callbacks +- net_sched: keep alloc_hash updated after hash allocation +- net_sched: cls_route: remove the right filter from hashtable +- net/packet: tpacket_rcv: avoid a producer race condition +- net: cbs: Fix software cbs to consider packet sending time +- mlxsw: spectrum_mr: Fix list iteration in error path +- Revert "ipv6: Fix handling of LLA with VRF and sockets bound to VRF" +- Revert "vrf: mark skb for multicast or link-local as enslaved to VRF" +- ipv4: ensure rcu_read_lock() in cipso_v4_error() +- netfilter: nft_tunnel: add missing attribute validation for tunnels +- netfilter: nft_payload: add missing attribute validation for payload csum flags +- netfilter: cthelper: add missing attribute validation for cthelper +- netfilter: x_tables: xt_mttg_seq_next should increase position index +- netfilter: xt_recent: recent_seq_next should increase position index +- netfilter: synproxy: synproxy_cpu_seq_next should increase position index +- netfilter: nf_conntrack: ct_cpu_seq_next should increase position index +- macvlan: add cond_resched() during multicast processing +- bonding/alb: make sure arp header is pulled before accessing it +- devlink: validate length of region addr/len +- tipc: add missing attribute validation for MTU property +- net/ipv6: remove the old peer route if change it to a new one +- net/ipv6: need update peer route when modify metric +- net: fq: add missing attribute validation for orphan mask +- devlink: validate length of param values +- net/packet: tpacket_rcv: do not increment ring index on drop +- netlink: Use netlink header as base to calculate bad attribute offset +- net/ipv6: use configured metric when add peer route +- ipvlan: don't deref eth hdr before checking it's set +- ipvlan: do not use cond_resched_rcu() in ipvlan_process_multicast() +- ipvlan: do not add hardware address of master to its unicast filter list +- ipvlan: add cond_resched_rcu() while processing muticast backlog +- ipv6/addrconf: call ipv6_mc_up() for non-Ethernet interface +- inet_diag: return classid for all socket types +- gre: fix uninit-value in __iptunnel_pull_header +- cgroup, netclassid: periodically release file_lock on classid updating +- netfilter: nf_flowtable: fix documentation +- netfilter: nft_tunnel: no need to call htons() when dumping ports +- net: netlink: cap max groups which will be considered in netlink_bind() +- net/tls: Fix to avoid gettig invalid tls record +- ipv6: Fix nlmsg_flags when splitting a multipath route +- ipv6: Fix route replacement with dev-only route +- sctp: move the format error check out of __sctp_sf_do_9_1_abort +- net: sched: correct flower port blocking +- net: fib_rules: Correctly set table field when table number exceeds 8 bits +- netfilter: xt_hashlimit: limit the max size of hashtable +- mlxsw: spectrum_dpipe: Add missing error path +- bpf: Return -EBADRQC for invalid map type in __bpf_tx_xdp_map +- mlx5: work around high stack usage with gcc +- netfilter: nft_tunnel: add the missing ERSPAN_VERSION nla_policy +- net/sched: flower: add missing validation of TCA_FLOWER_FLAGS +- net/sched: matchall: add missing validation of TCA_MATCHALL_FLAGS +- core: Don't skip generic XDP program execution for cloned SKBs +- net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx +- net/mlx5: IPsec, Fix esp modify function attribute +- net_sched: fix a resource leak in tcindex_set_parms() +- bonding/alb: properly access headers in bond_alb_xmit() +- sunrpc: expiry_time should be seconds not timeval +- tcp: clear tp->segs_{in|out} in tcp_disconnect() +- tcp: clear tp->data_segs{in|out} in tcp_disconnect() +- tcp: clear tp->delivered in tcp_disconnect() +- tcp: clear tp->total_retrans in tcp_disconnect() +- net_sched: fix an OOB access in cls_tcindex +- l2tp: Allow duplicate session creation with UDP +- cls_rsvp: fix rsvp_policy +- net: Fix skb->csum update in inet_proto_csum_replace16(). +- xfrm: interface: do not confirm neighbor when do pmtu update +- xfrm interface: fix packet tx through bpf_redirect() +- vti[6]: fix packet tx through bpf_redirect() +- netfilter: nft_tunnel: ERSPAN_VERSION must not be null +- igb: Fix SGMII SFP module discovery for 100FX/LX. +- ixgbe: Fix calculation of queue with VFs and flow director on interface flap +- ixgbevf: Remove limit of 10 entries for unicast filter list +- net_sched: ematch: reject invalid TCF_EM_SIMPLE +- netfilter: nf_tables: add __nft_chain_type_get() +- netfilter: ipset: use bitmap infrastructure completely +- netfilter: nft_osf: add missing check for DREG attribute +- tcp: do not leave dangling pointers in tp->highest_sack +- tcp_bbr: improve arithmetic division in bbr_update_bw() +- Revert "udp: do rmem bulk free even if the rx sk queue is empty" +- net-sysfs: Fix reference count leak +- net_sched: fix datalen for ematch +- net: rtnetlink: validate IFLA_MTU attribute in rtnl_create_link() +- net, ip_tunnel: fix namespaces move +- net, ip6_tunnel: fix namespaces move +- net: ip6_gre: fix moving ip6gre between namespaces +- ipv6: sr: remove SKB_GSO_IPXIP6 on End.D* actions +- packet: fix data-race in fanout_flow_is_huge() +- net: neigh: use long type to store jiffies delta +- xsk: Fix registration of Rx-only sockets +- net: netem: correct the parent's backlog when corrupted packet was dropped +- net: netem: fix error path for corrupted GSO frames +- act_mirred: Fix mirred_init_module error handling +- ip6erspan: remove the incorrect mtu limit for ip6erspan +- llc: fix sk_buff refcounting in llc_conn_state_process() +- llc: fix another potential sk_buff leak in llc_ui_sendmsg() +- net: sched: cbs: Avoid division by zero when calculating the port rate +- net/rds: Fix 'ib_evt_handler_call' element in 'rds_ib_stat_names' +- xsk: avoid store-tearing when assigning umem +- xsk: avoid store-tearing when assigning queues +- net/sched: cbs: Set default link speed to 10 Mbps in cbs_set_port_rate +- i40e: reduce stack usage in i40e_set_fc +- net/rds: Add a few missing rds_stat_names entries +- net: fix bpf_xdp_adjust_head regression for generic-XDP +- tipc: reduce risk of wakeup queue starvation +- xfrm interface: ifname may be wrong in logs +- xdp: fix possible cq entry leak +- net/tls: fix socket wmem accounting on fallback with netem +- net: netem: fix backlog accounting for corrupted GSO frames +- bpf: fix the check that forwarding is enabled in bpf_ipv6_fib_lookup +- net: core: support XDP generic on stacked devices. +- signal/bpfilter: Fix bpfilter_kernl to use send_sig not force_sig +- net/mlx5: Delete unused FPGA QPN variable +- mlxsw: spectrum: Set minimum shaper on MC TCs +- mlxsw: reg: QEEC: Add minimum shaper fields +- tipc: fix wrong timeout input for tipc_wait_for_cond() +- tipc: update mon's self addr when node addr generated +- mlxsw: spectrum_qdisc: Include MC TCs in Qdisc counters +- mlxsw: spectrum: Wipe xstats.backlog of down ports +- tcp: fix marked lost packets not being retransmitted +- af_unix: add compat_ioctl support +- ethtool: reduce stack usage with clang +- fs: fix kabi broken introduced by fixing CVE-2020-14381 +- futex: Unbreak futex hashing +- futex: Fix inode life-time issue +- block/bio-integrity: don't free 'buf' if bio_integrity_add_page() failed +- arm64/ascend: set the correct dvpp mmap area when no MAP_DVPP flags +- ext4: fix error pointer dereference +- ext4: Avoid freeing inodes on dirty list +- writeback: Export inode_io_list_del() +- blktrace: ensure our debugfs dir exists +- blktrace: fix debugfs use after free +- loop: be paranoid on exit and prevent new additions / removals +- Revert "block: rename 'q->debugfs_dir' and 'q->blk_trace->dir' in blk_unregister_queue()" +- ext4: force buffer up-to-date while marking it dirty +- ext4: fix a data race at inode->i_disksize +- ext4: fix a data race at inode->i_blocks +- jbd2: abort journal if free a async write error metadata buffer +- ext4: abort the filesystem if failed to async write metadata buffer +- net: hns3: update hns3 version to 1.9.38.7 +- net: hns3: initialize the message content sent to the VF +- net: hns3: check vlan id before using it +- net: hns3: check RSS key index before using +- net: hns3: check cmdq message parameters sent from VF +- config: add certs dir to CONFIG_MODULE_SIG_KEY +- net/hinic: Fix Oops when probing hinic driver + +* Mon Sep 14 2020 xinghe - 4.19.90-2008.6.0.0044 +- add perf-tip file fix cannot load perf-tips warning + +* Mon Aug 31 2020 Yang Yingliang - 4.19.90-2008.6.0.0043 +- arm64/config: enable TIPC module for openEuler +- net: hns3: update hns3 version to 1.9.38.6 +- net: hns3: add support for dumping MAC umv counter in debugfs +- net: hns3: fix bug when PF set the duplicate MAC address for VFs +- net/hinic: Check the legality of out_size in nictool +- net/hinic: Fix out-of-bounds access when setting ets +- net/hinic: Rename camelCase used in nictool +- net/hinic: Fix alignment and code style +- net/hinic: Delete unused heartbeat enhancement feature +- net/hinic: Delete the unused chip fault handling process +- net/hinic: Delete unused microcode back pressure feature +- net/hinic: Fix misspelled word and wrong print format +- net/hinic: update hinic version to 2.3.2.15 +- net/hinic: Add the maximum value of the module parameter poll_weight +- net/hinic: Add pause/pfc mutual exclusion protection +- net/hinic: Add lock for mgmt channel event_flag +- net/hinic: Fix signed integer overflow +- nfsd: apply umask on fs without ACL support +- arm64/ascend: use ascend_enable_full to enable ascend platform +- sbsa_gwdt: Enable ARM_SBSA_WATCHDOG_PANIC_NOTIFIER in hulk_defconfig +- sbsa_gwdt: Introduce a panic notifier +- memcg/ascend: Support not account pages of cdm for memcg +- dt-bindings: iommu: Add Message Based SPI for hisilicon +- iommu: support message based spi for smmu +- nbd_genl_status: null check for nla_nest_start +- config: Add default value for CONFIG_ASCEND_INIT_ALL_GICR +- irq-gic-v3: Add support to init ts core GICR +- ascend: mm/hugetlb: Enable ASCEND_CHARGE_MIGRAGE_HUGEPAGES for hulk_defconfig +- ascend: mm/hugetlb: Enable charge migrate hugepages +- config: Add default value for CONFIG_SERIAL_ATTACHED_MBIGEN +- serial: amba-pl011: Fix serial port discard interrupt when interrupt signal line of serial port is connected to mbigen. +- iommu: fix a mistake for iommu_unregister_device_fault_handler +- printk: Export a symbol. +- arm64/ascend: Enable ASCEND_IOPF_HIPRI for hulk_defconfig +- arm64/ascend: Enable iopf hipri feature for Ascend Platform +- mm: Check numa node hugepages enough when mmap hugetlb +- arm64/ascend: Enable CONFIG_ASCEND_OOM for hulk_defconfig +- arm64/ascend: Add new enable_oom_killer interface for oom contrl +- svm: add support for allocing memory which is within 4G physical address in svm_mmap +- suspend: export cpu_suspend/cpu_resume/psci_ops +- printk: export log_buf_addr_get/log_buf_len_get +- arm64/ascend: fix memleak when remove svm +- iommu: fix NULL pointer when release iopf queue +- arm64/ascend: Enable ASCEND_DVPP_MMAP for hulk_defconfig +- arm64/ascend: Don't use the DvPP mmap space for svm. +- arm64/ascend: Enable DvPP mmap features for Ascend Platform +- usb: xhci: Add workaround for phytium +- arm64: topology: Support PHYTIUM CPU +- arm64: mm: define NET_IP_ALIGN to 0 +- arm64: ilp32: fix kabi change +- config: add CONFIG_ARM64_ILP32 in defconfigs +- arm64: ilp32: fix compile warning cause by 'VA_BITS' +- arm64:ilp32: add ARM64_ILP32 to Kconfig +- arm64:ilp32: add vdso-ilp32 and use for signal return +- arm64: ptrace: handle ptrace_request differently for aarch32 and ilp32 +- arm64: ilp32: introduce ilp32-specific sigframe and ucontext +- arm64: signal32: move ilp32 and aarch32 common code to separated file +- arm64: signal: share lp64 signal structures and routines to ilp32 +- arm64: ilp32: introduce syscall table for ILP32 +- arm64: ilp32: share aarch32 syscall handlers +- arm64: ilp32: introduce binfmt_ilp32.c +- arm64: change compat_elf_hwcap and compat_elf_hwcap2 prefix to a32 +- arm64: introduce binfmt_elf32.c +- arm64: ilp32: add is_ilp32_compat_{task, thread} and TIF_32BIT_AARCH64 +- arm64: introduce is_a32_compat_{task, thread} for AArch32 compat +- arm64: uapi: set __BITS_PER_LONG correctly for ILP32 and LP64 +- arm64: rename functions that reference compat term +- arm64: rename COMPAT to AARCH32_EL0 +- arm64: ilp32: add documentation on the ILP32 ABI for ARM64 +- thread: move thread bits accessors to separated file +- asm-generic: Drop getrlimit and setrlimit syscalls from default list +- 32-bit userspace ABI: introduce ARCH_32BIT_OFF_T config option +- compat ABI: use non-compat openat and open_by_handle_at variants +- ptrace: Add compat PTRACE_{G, S}ETSIGMASK handlers +- arm64: signal: Make parse_user_sigframe() independent of rt_sigframe layout +- scsi: libsas: Check link status in ATA prereset() +- scsi: libsas: Remove postreset from sas_sata_ops + +* Wed Aug 19 2020 Yang Yingliang - 4.19.90-2008.3.0.0042 +- x86/mm: split vmalloc_sync_all() +- kexec/uefi: copy secure_boot flag in boot params across kexec reboot +- x86/config: enable CONFIG_HINIC by default +- cgroup: add missing skcd->no_refcnt check in cgroup_sk_clone() +- Revert "cgroup: add missing skcd->no_refcnt check in cgroup_sk_clone()" +- cgroup: add missing skcd->no_refcnt check in cgroup_sk_clone() +- ext4: Correctly restore system zone info when remount fails +- ext4: Handle add_system_zone() failure in ext4_setup_system_zone() +- ext4: Fold ext4_data_block_valid_rcu() into the caller +- ext4: Check journal inode extents more carefully +- ext4: Don't allow overlapping system zones +- ext4: Handle error of ext4_setup_system_zone() on remount +- nfs: set invalid blocks after NFSv4 writes +- cgroup1: don't call release_agent when it is "" +- cgroup-v1: cgroup_pidlist_next should update position index +- cgroup: Iterate tasks that did not finish do_exit() +- cgroup: cgroup_procs_next should increase position index +- mm/vmscan.c: don't round up scan size for online memory cgroup +- cgroup: saner refcounting for cgroup_root +- cgroup: Prevent double killing of css when enabling threaded cgroup +- mm: memcg/slab: fix memory leak at non-root kmem_cache destroy +- mm: memcg/slab: synchronize access to kmem_cache dying flag using a spinlock +- mm/memcg: fix refcount error while moving and swapping +- memcg: fix NULL pointer dereference in __mem_cgroup_usage_unregister_event +- mm/memcontrol.c: lost css_put in memcg_expand_shrinker_maps() +- random32: move the pseudo-random 32-bit definitions to prandom.h +- random32: remove net_rand_state from the latent entropy gcc plugin +- random: fix circular include dependency on arm64 after addition of percpu.h +- ARM: percpu.h: fix build error +- random32: update the net random state on interrupt and activity +- vgacon: Fix for missing check in scrollback handling +- memcg: fix memcg_kmem_bypass() for remote memcg charging +- arm64/numa: cdm: Cacheline aligned cdmmask to improve performance +- mm/page_alloc.c: ratelimit allocation failure warnings more aggressively +- iomap: fix sub-page uptodate handling +- net/hinic: Add dfx information +- net/hinic: Add read chip register interface +- net/hinic: Synchronize time to firmware every hour +- net: add {READ|WRITE}_ONCE() annotations on ->rskq_accept_head +- net: avoid possible false sharing in sk_leave_memory_pressure() +- sctp: add chunks to sk_backlog when the newsk sk_socket is not set +- netfilter: ctnetlink: honor IPS_OFFLOAD flag +- fork, memcg: alloc_thread_stack_node needs to set tsk->stack +- net/udp_gso: Allow TX timestamp with UDP GSO +- inet: frags: call inet_frags_fini() after unregister_pernet_subsys() +- netfilter: ebtables: CONFIG_COMPAT: reject trailing data after last rule +- netfilter: nft_flow_offload: add entry to flowtable after confirmation +- perf/core: Fix the address filtering fix +- netfilter: nft_set_hash: bogus element self comparison from deactivation path +- fs/nfs: Fix nfs_parse_devname to not modify it's argument +- ip_tunnel: Fix route fl4 init in ip_md_tunnel_xmit +- net/mlx5: Take lock with IRQs disabled to avoid deadlock +- xfs: Sanity check flags of Q_XQUOTARM call +- cgroup: fix KABI broken by "cgroup: fix cgroup_sk_alloc() for sk_clone_lock()" +- cgroup: fix cgroup_sk_alloc() for sk_clone_lock() +- net: memcg: fix lockdep splat in inet_csk_accept() +- net: memcg: late association of sock to memcg +- cgroup: memcg: net: do not associate sock with unrelated cgroup +- net/hinic: Retry to get ack after VF message timeout +- net/hinic: Fix register_chrdev_region fails for major number 921 +- net/hinic: Fix mgmt message timeout during firmware hot upgrade +- net/hinic: Correct return and features from set_features callback +- net/hinic: Hinic only supports csum offloading of vxlan/ipip tunnel packets +- net/hinic: Set net device link down when the chip fault +- net/hinic: Delete unused UFO codes +- net/hinic: Delete the remaining old linux kernel adaptation interface +- net/hinic: Delete the old kernel version adaptation interface in netdev ops +- net/hinic: Delete the old kernel version adaptation interface in ethtool ops +- net/hinic: Delete useless linux adaptation functions +- net/hinic: Delete unused functions and macro definitions in ossl +- netfilter: nat: check the bounds of nf_nat_l3protos and nf_nat_l4protos + +* Web Jul 29 2020 Yang Yingliang - 4.19.90-2007.2.0.0041 +- mm, vmstat: reduce zone->lock holding time by /proc/pagetypeinfo +- kernel/notifier.c: intercept duplicate registrations to avoid infinite loops +- macvlan: use skb_reset_mac_header() in macvlan_queue_xmit() +- scsi: qedf: remove memset/memcpy to nfunc and use func instead +- ext4: Send ext4_handle_error message after set sb->s_flags +- tcp: refine rule to allow EPOLLOUT generation under mem pressure +- netfilter: nf_tables: fix flowtable list del corruption +- netfilter: nf_tables: store transaction list locally while requesting module +- netfilter: nf_tables: remove WARN and add NLA_STRING upper limits +- netfilter: nft_tunnel: fix null-attribute check +- netfilter: arp_tables: init netns pointer in xt_tgdtor_param struct +- netfilter: fix a use-after-free in mtype_destroy() +- mm/huge_memory.c: thp: fix conflict of above-47bit hint address and PMD alignment +- mm/huge_memory.c: make __thp_get_unmapped_area static +- mm/page-writeback.c: avoid potential division by zero in wb_min_max_ratio() +- mm: memcg/slab: call flush_memcg_workqueue() only if memcg workqueue is valid +- mm/shmem.c: thp, shmem: fix conflict of above-47bit hint address and PMD alignment +- iommu: Remove device link to group on failure +- netfilter: ipset: avoid null deref when IPSET_ATTR_LINENO is present +- netfilter: conntrack: dccp, sctp: handle null timeout argument +- netfilter: arp_tables: init netns pointer in xt_tgchk_param struct +- tty: always relink the port +- tty: link tty and port before configuring it as console +- chardev: Avoid potential use-after-free in 'chrdev_open()' +- net: hns3: update hns3 version to 1.9.38.5 +- net: hns3: fix the number of queues +- net: hns3: fixes a promoiscuous mode +- net: hns3: fix driver bug +- net: hns3: fix for VLAN config when reset +- net: hns3: fix bug when calculating the +- net: hns3: fix speed unknown issue in bond +- net: hns3: fix a missing return in hclge_set_vlan_filter() +- net: hns3: update hns3 version to 1.9.38.3 +- net: hns3: remove redundant codes entered by mistake +- net/hinic: Fix out-of-bounds when receiving mbox messages +- RDMA/hns: Modify the code based on the review comments +- Revert "zram: convert remaining CLASS_ATTR() to CLASS_ATTR_RO()" +- config: set CONFIG_CAN_DEBUG_DEVICES for arm64 hulk_defconfig +- config: add CONFIG_CAN_J1939 in defconfigs +- can: j1939: fix address claim code example +- can: j1939: j1939_sk_bind(): take priv after lock is held +- can: j1939: warn if resources are still linked on destroy +- can: j1939: j1939_can_recv(): add priv refcounting +- can: j1939: transport: j1939_cancel_active_session(): use hrtimer_try_to_cancel() instead of hrtimer_cancel() +- can: j1939: make sure socket is held as long as session exists +- can: j1939: transport: make sure the aborted session will be deactivated only once +- can: j1939: socket: rework socket locking for j1939_sk_release() and j1939_sk_sendmsg() +- can: j1939: main: j1939_ndev_to_priv(): avoid crash if can_ml_priv is NULL +- can: j1939: move j1939_priv_put() into sk_destruct callback +- can: af_can: export can_sock_destruct() +- can: j1939: transport: j1939_xtp_rx_eoma_one(): Add sanity check for correct total message size +- can: j1939: transport: j1939_session_fresh_new(): make sure EOMA is send with the total message size set +- can: j1939: fix memory leak if filters was set +- can: j1939: fix resource leak of skb on error return paths +- can: add support of SAE J1939 protocol +- can: af_can: use spin_lock_bh() for &net->can.can_rcvlists_lock +- can: af_can: remove NULL-ptr checks from users of can_dev_rcv_lists_find() +- can: make use of preallocated can_ml_priv for per device struct can_dev_rcv_lists +- can: af_can: can_pernet_exit(): no need to iterate over and cleanup registered CAN devices +- can: af_can: can_rx_register(): use max() instead of open coding it +- can: af_can: give variable holding the CAN receiver and the receiver list a sensible name +- can: af_can: rename find_dev_rcv_lists() to can_dev_rcv_lists_find() +- can: af_can: rename find_rcv_list() to can_rcv_list_find() +- can: proc: give variable holding the CAN per device receive lists a sensible name +- can: af_can: give variable holding the CAN per device receive lists a sensible name +- can: proc: give variables holding CAN statistics a sensible name +- can: af_can: give variables holding CAN statistics a sensible name +- can: af_can: can_pernet_init(): Use preferred style kzalloc(sizeof()) usage +- can: extend sockaddr_can to include j1939 members +- can: add socket type for CAN_J1939 +- can: introduce CAN_REQUIRED_SIZE macro +- can: introduce CAN midlayer private and allocate it automatically +- net: hns3: update hns3 version to 1.9.38.3 +- net: hns3: clean code for security +- net: hns3: modify an incorrect type in +- net: hns3: check queue id range before +- net: hns3: fix error handling for desc filling +- net: hns3: fix for not calculating tx BD send size correctly +- net: hns3: fix for not unmapping tx buffer correctly +- net: hns3: fix desc filling bug when skb is expanded or lineared +- net: hns3: drop the WQ_MEM_RECLAIM flag when allocating wq +- net: hns3: optimize the parameter of hclge_update_port_base_vlan_cfg and ignore the send mailbox failure when VF is unalive +- net: hns3: use netif_tx_disable to stop the transmit queue +- net: hns3: add support of dumping mac reg in debugfs +- net: hns3: fix a fake tx timeout issue +- net: hns3: fix use-after-free when doing self test +- net: hns3: add a log for switching VLAN filter state +- net: hns3: fix problem of missing updating port information +- net: hns3: add vlan list lock to protect vlan list and fix duplicate node in vlan list +- net: hns3: fix bug for port base vlan configuration +- net: hns3: skip periodic service task if reset failed +- net: hns3: check reset pending after FLR prepare +- net: hns3: fix for mishandle of asserting VF reset fail +- net: hns3: fix for missing uninit debugfs when unload driver +- net: hns3: unify format of failed print information for clean up +- net: hns3: modify location of one print information +- net: hns3: fix return value error when query mac link status fail +- net: hns3: remove unnecessary mac enable in app loopback +- net: hns3: remove some useless code +- net: hns3: fix an inappropriate type assignment +- net: hns3: update hns3 version to 1.9.38.2 +- net: hns3: fix reset bug +- sdei_watchdog: fix compile error when CONFIG_HARDLOCKUP_DETECTOR is not set +- net/hinic: Add support for 128 qps +- net/hinic: Add support for X86 Arch +- fs/filescontrol: add a switch to enable / disable accounting of open fds +- usb: usbtest: fix missing kfree(dev->buf) in usbtest_disconnect +- vfio/pci: Fix SR-IOV VF handling with MMIO blocking +- signal: Export tracepoint symbol signal_generate +- x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches. +- x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS. +- x86/speculation: Add support for STIBP always-on preferred mode +- x86/speculation: Change misspelled STIPB to STIBP +- x86/speculation: Prevent rogue cross-process SSBD shutdown +- vfio-pci: Invalidate mmaps and block MMIO access on disabled memory +- vfio-pci: Fault mmaps to enable vma tracking +- vfio/type1: Support faulting PFNMAP vmas +- vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn() +- vfio_pci: Enable memory accesses before calling pci_map_rom +- net/hinic: Fix copying out of bounds when using tools to get statistics +- uacce: fix problem of parameter check +- net: hns3: update hns3 version to 1.9.38.1 +- net: hns3: add device name valid check +- ext4, jbd2: ensure panic by fix a race between jbd2 abort and ext4 error handlers +- Revert "ext4, jbd2: switch to use completion variable instead of JBD2_REC_ERR" +- x86/speculation: Add Ivy Bridge to affected list +- x86/speculation: Add SRBDS vulnerability and mitigation documentation +- x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation +- x86/cpu: Add 'table' argument to cpu_matches() +- x86/cpu: Add a steppings field to struct x86_cpu_id +- ext4: stop overwrite the errcode in ext4_setup_super +- panic/printk: fix zap_lock +- vt: keyboard: avoid signed integer overflow in k_ascii +- ext4: Fix block bitmap corruption when io error +- mm: Fix mremap not considering huge pmd devmap +- net-sysfs: Call dev_hold always in rx_queue_add_kobject +- net-sysfs: Call dev_hold always in netdev_queue_add_kobject +- net-sysfs: fix netdev_queue_add_kobject() breakage +- net-sysfs: Fix reference count leak in rx|netdev_queue_add_kobject +- SUNRPC: Fix xprt->timer use-after-free +- printk/panic: Avoid deadlock in printk() +- block: Fix use-after-free in blkdev_get() +- ata/libata: Fix usage of page address by page_address in ata_scsi_mode_select_xlat function +- media: go7007: fix a miss of snd_card_free +- vt: fix unicode console freeing with a common interface +- vt: don't use kmalloc() for the unicode screen buffer +- scsi: Fix kabi change due to add offline_already member in struct scsi_device +- scsi: core: avoid repetitive logging of device offline messages +- hfs: fix null-ptr-deref in hfs_find_init() +- ext4, jbd2: switch to use completion variable instead of JBD2_REC_ERR +- jbd2: clean __jbd2_journal_abort_hard() and __journal_abort_soft() +- jbd2: make sure ESHUTDOWN to be recorded in the journal superblock +- vt: vt_ioctl: fix use-after-free in vt_in_use() +- vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console +- vt: vt_ioctl: remove unnecessary console allocation checks +- vt: switch vt_dont_switch to bool +- vt: ioctl, switch VT_IS_IN_USE and VT_BUSY to inlines +- vt: selection, introduce vc_is_sel +- ALSA: proc: Avoid possible leaks of snd_info_entry objects +- net/hinic: update hinic version to 2.3.2.14 +- net/hinic: Fix memleak when create_singlethread_workqueue() is failed +- net/hinic: Fix VF driver loading failure during the firmware hot upgrade process +- net/hinic: Fix data inconsistency in the forwarding scenario when DCB is turned on +- net/hinic: Fix reboot -f stuck for a long time +- net/hinic: Add tx timeout dfx information +- net/hinic: Add a lock when registering the driver's global netdevice notifier +- net/hinic: Fix VF has a low probability of network failure on the virtual machine +- net/hinic: Fix the firmware compatibility bug in the MAC reuse scenario +- irqchip/gic-v3-its: Probe ITS page size for all GITS_BASERn registers +- selinux: properly handle multiple messages in selinux_netlink_send() +- media: tw5864: Fix possible NULL pointer dereference in tw5864_handle_frame +- arm64/mpam: Supplement err tips in info/last_cmd_status +- arm64/mpam: Fix unreset resources when mkdir ctrl group or umount resctrl +- MPAM / ACPI: Refactoring MPAM init process and set MPAM ACPI as entrance +- ACPI 6.x: Add definitions for MPAM table +- ACPI / PPTT: cacheinfo: Label caches based on fw_token +- ACPI / PPTT: Filthy hack to find _a_ backwards reference in the PPTT [ROTTEN] +- ACPI / PPTT: Add helper to validate cache nodes from an offset [dead] +- ACPI / processor: Add helper to convert acpi_id to a phys_cpuid +- ext4: report error to userspace by netlink +- pcie_cae add judgement about chip type +- Enable trust mode control for SR-IOV ports +- Added ethtool_ops interface to query optical module information +- Revert "consolemap: Fix a memory leaking bug in drivers/tty/vt/consolemap.c" +- ext4: fix support for inode sizes > 1024 bytes +- ext4: validate the debug_want_extra_isize mount option at parse time +- sunrpc: clean up properly in gss_mech_unregister() +- sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations. +- sunrpc: check that domain table is empty at module unload. +- arm64: smp: Increase secondary CPU boot timeout value +- KVM: arm64: Only flush VM for the first and the last vcpu +- media: remove videobuf-core.c +- ext4: mark block bitmap corrupted when found instead of BUGON +- bcache: fix potential deadlock problem in btree_gc_coalesce +- fs/binfmt_elf.c: allocate initialized memory in fill_thread_core_info() +- USB: gadget: fix illegal array access in binding with UDC + +* Wed Jun 03 2020 Xie XiuQi - 4.19.90-2005.2.0.0040 +- update req_distinguished_name for x509.genkey + +* Fri May 22 2020 Yang Yingliang - 4.19.90-2005.2.0.0039 +- signal: fix kabi changes in struct task_struct +- signal: Extend exec_id to 64bits +- livepatch/core: Fix compile error when CONFIG_JUMP_LABEL closed +- net/hinic: Adjust AEQ interrupt retransmission settings +- net/hinic: Number of VF queues cleared during initialization +- net/hinic: Reduce VF EQ queue depth in SDI mode +- net/hinic: Disable the CSUM offload capability of TUNNEL in SDI mode +- net/hinic: VF does not display firmware statistics +- net/hinic: SDI bare metal VF supports dynamic queue +- net/hinic: Support doorbell BAR size of 256K in SDI mode +- net/hinic: Supports variable SDI master host ppf_id +- net/hinic: Optimize SDI interrupt aggregation parameters +- netlabel: cope with NULL catmap +- netprio_cgroup: Fix unlimited memory leak of v2 cgroups +- net: hns3: update hns3 version to 1.9.38.0 +- net: hns3: solve the unlock 2 times when rocee init fault +- scsi: sg: add sg_remove_request in sg_write +- KVM: SVM: Fix potential memory leak in svm_cpu_init() +- ptp: free ptp device pin descriptors properly +- spi: spi-dw: Add lock protect dw_spi rx/tx to prevent concurrent calls +- drivers sfc: Fix cross page write error +- drivers sysctl: add read and write interface of pmbus +- net/hinic: Fix TX timeout under ipip tunnel packet +- xsk: Add missing check on user supplied headroom size +- fs/namespace.c: fix mountpoint reference counter race +- USB: core: Fix free-while-in-use bug in the USB S-Glibrary +- block, bfq: fix use-after-free in bfq_idle_slice_timer_body +- mwifiex: Fix possible buffer overflows in mwifiex_cmd_append_vsie_tlv() +- mwifiex: Fix possible buffer overflows in mwifiex_ret_wmm_get_status() +- scsi: mptfusion: Fix double fetch bug in ioctl +- mt76: fix array overflow on receiving too many fragments for a packet +- net: hns3: change the order of reinitializing RoCE and VF during reset +- net: hns3: update hns3 version to 1.9.37.9 +- Revert "scsi: fix failing unload of a LLDD module" +- s390/mm: fix page table upgrade vs 2ndary address mode accesses +- pcie_cae support getting chipnums of this system +- net: hns3: remove the unnecessary ccflags + +* Wed May 06 2020 Yang Yingliang - 4.19.90-2005.1.0.0038 +- perf: Make perf able to build with latest libbfd +- nbd: use blk_mq_queue_tag_inflight_iter() +- blk-mq: use blk_mq_queue_tag_inflight_iter() in debugfs + +* Tue Apr 28 2020 Yang Yingliang - 4.19.90-2004.1.0.0037 +- net: hns3: update hns3 version to 1.9.37.8 +- net: hns3: optimize FD tuple inspect +- net: hns3: fix unsupported config for RSS +- net: hns3: disable auto-negotiation off with 1000M setting in ethtool +- net: hns3: update VF mac list configuration as PF +- net: hns3: modify magic number in hclge_dbg_dump_ncl_config +- net: hns3: do mac configuration instead of rollback when malloc mac node fail +- net: hns3: update the device mac address asynchronously +- net: hns3: add one parameter for function hns3_nic_maybe_stop_tx() +- net: hns3: delete unnecessary logs after kzalloc fails +- net: hns3: fix some coding style found by codereview +- net: hns3: use uniform format "failed to xxx" to print fail message +- net: hns3: add debug information for flow table when failed +- net: hns3: modify hclge_restore_fd_entries()'s return type to void +- net: hns3: splice two "if" logic as one +- net: hns3: clean up some coding style issue +- net: hns3: modify definition location of struct hclge_mac_ethertype_idx_rd_cmd +- net: hns3: modify comment of macro HNAE3_MIN_VECTOR_NUM +- net: hns3: modify one macro into unsigned type +- net: hns3: delete unused macro HCLGEVF_MPF_ENBALE +- net: hns3: modify definition location of struct hclge_vf_vlan_cfg +- net: hns3: remove unnecessary 'ret' variable in hclge_misc_err_recovery() +- net: hns3: remove unnecessary register info in hclge_reset_err_handle() +- net: hns3: misc cleanup for VF reset +- net: hns3: merge mac state HCLGE_MAC_TO_DEL and HCLGE_MAC_DEL_FAIL +- net: hns3: update hns3 version to 1.9.37.7 +- scsi: hisi_sas: do not reset the timer to wait for phyup when phy already up +- net: hns3: add suspend/resume function for hns3 driver +- btrfs: tree-checker: Enhance chunk checker to validate chunk profile +- net/hinic: fix the problem that out-of-bounds access +- scsi: sg: fix memory leak in sg_build_indirect +- scsi: sg: add sg_remove_request in sg_common_write +- btrfs: Don't submit any btree write bio if the fs has errors +- btrfs: extent_io: Handle errors better in extent_write_full_page() +- net/hinic: Delete useless header files +- powerpc/powernv/idle: Restore AMR/UAMOR/AMOR after idle +- media: xirlink_cit: add missing descriptor sanity checks +- Input: add safety guards to input_set_keycode() +- f2fs: fix to avoid memory leakage in f2fs_listxattr +- media: stv06xx: add missing descriptor sanity checks +- media: ov519: add missing endpoint sanity checks +- btrfs: tree-checker: Verify inode item +- btrfs: delayed-inode: Kill the BUG_ON() in btrfs_delete_delayed_dir_index() +- net: hns3: update hns3 version to 1.9.37.6 +- net: hns3: ignore the send mailbox failure by VF is unalive +- net: hns3: update hns3 version to 1.9.37.5 +- net: hns3: fix "tc qdisc del" failed issue +- net: hns3: rename two functions from periodical to periodic +- net: hns3: modify some print messages for cleanup and keep style consistent +- net: hns3: add some blank lines for cleanup +- net: hns3: sync some code from linux mainline +- net: hns3: fix mailbox send to VF failed issue +- net: hns3: disable phy loopback setting in hclge_mac_start_phy +- net: hns3: delete some useless code +- net: hns3: remove the limitation of MAC address duplicate configuration +- net: hns3: delete the unused struct hns3_link_mode_mapping +- net: hns3: rename one parameter in hclge_add_fd_entry_by_arfs() +- net: hns3: modify the location of macro HCLGE_LINK_STATUS_MS definition +- net: hns3: modify some unsuitable parameter type of RSS +- net: hns3: move some definition location +- net: hns3: add judgement for hclgevf_update_port_base_vlan_info() +- net: hns3: check null pointer in function hclge_fd_config_rule() +- net: hns3: optimize deletion of the flow direction table +- net: hns3: fix a ipv6 address copy problem in hclge_fd_get_flow_tuples() +- net: hns3: fix VF bandwidth does not take effect in some case +- net: hns3: synchronize some print relating to reset issue +- net: hns3: delete unnecessary 5s delay judgement in hclgevf_reset_event() +- net: hns3: delete unnecessary reset handling judgement in hclgevf_reset_tqp() +- net: hns3: delete unnecessary judgement in hns3_get_regs() +- net: hns3: delete one variable in hclge_get_sset_count() for optimization +- net: hns3: optimize return process for phy loop back +- net: hns3: fix "mac exist" problem +- net: hns3: add one printing information in hnae3_unregister_client() function +- slcan: Don't transmit uninitialized stack data in padding +- mm: mempolicy: require at least one nodeid for MPOL_PREFERRED +- livepatch/core: fix kabi for klp_rel_state +- livepatch/core: support jump_label +- arm64: entry: SP Alignment Fault doesn't write to FAR_EL1 +- arm64: mark (__)cpus_have_const_cap as __always_inline +- arm64/module: revert to unsigned interpretation of ABS16/32 relocations +- arm64/module: deal with ambiguity in PRELxx relocation ranges +- i2c: designware: Add ACPI HID for Hisilicon Hip08-Lite I2C controller +- ACPI / APD: Add clock frequency for Hisilicon Hip08-Lite I2C controller +- qm: fix packet loss for acc +- net/hinic: Solve the problem that 1822 NIC reports 5d0 error +- net: hns3: Rectification of driver code review +- net: hns3: update hns3 version to 1.9.37.4 +- net: hns3: additional fix for fraglist handling +- net: hns3: fix for fraglist skb headlen not handling correctly +- net: hns3: update hns3 version to 1.9.37.3 +- sec: modify driver to adapt dm-crypt +- qm: reinforce reset failure scene +- zip: fix decompress a empty file +- hpre: dfx for IO operation and delay +- RDMA/hns: optimize mtr management and fix mtr addressing bug +- RDMA/hns: fix bug of accessing null pointer +- sec: Overall optimization of sec code +- qm: optimize the maximum number of VF and delete invalid addr +- qm: optimize set hw_reset flag logic for user +- qm: fixup the problem of wrong judgement of used parameter +- qm: Move all the same logic functions of hisilicon crypto to qm +- drivers : localbus cleancode +- drivers : sysctl cleancode +- drivers : sfc cleancode +- kretprobe: check re-registration of the same kretprobe earlier +- vhost: Check docket sk_family instead of call getname +- btrfs: tree-checker: Add EXTENT_ITEM and METADATA_ITEM check +- block: fix possible memory leak in 'blk_prepare_release_queue' +- Revert "dm-crypt: Add IV generation templates" +- Revert "dm-crypt: modify dm-crypt to rely on IV generation templates" + +* Sat Mar 21 2020 Yang Yingliang - 4.19.90-2003.4.0.0036 +- x86/config: enable CONFIG_CFQ_GROUP_IOSCHED +- x86/openeuler_config: disable CONFIG_EFI_VARS + +* Fri Mar 20 2020 Yang Yingliang - 4.19.90-2003.3.0.0035 +- btrfs: don't use WARN_ON when ret is -ENOTENT in __btrfs_free_extent() +- cifs: fix panic in smb2_reconnect + +* Wed Mar 18 2020 Yang Yingliang - 4.19.90-2003.2.0.0034 +- xfs: avoid f_bfree overflow +- xfs: always init fdblocks in mount +- xfs: devirtualize ->sf_entsize and ->sf_nextentry +- block: fix inaccurate io_ticks +- block: delete part_round_stats and switch to less precise counting +- CIFS: Fix bug which the return value by asynchronous read is error +- net/hinic: Magic number rectification +- net/hinic: slove the problem that VF may be disconnected when vm reboot and receive lots of broadcast packets. +- openeuler/config: disable CONFIG_EFI_VARS +- pagecache: support percpu refcount to imporve performance +- arm64: mm: support setting page attributes for debugging +- staging: android: ashmem: Disallow ashmem memory from being remapped +- mm/resource: Return real error codes from walk failures +- vt: selection, push sel_lock up +- vt: selection, push console lock down +- net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup +- net: ipv6: add net argument to ip6_dst_lookup_flow + +* Mon Mar 16 2020 Luo Chunsheng - 4.19.90-2003.1.1.0033 +- fix kernel-devel upgrade running scriptlet failed + +* Sat Mar 14 2020 Yang Yingliang - 4.19.90-2003.1.1.0032 +- openeuler/config: enable CONFIG_FCOE +- openeuler/config: disable unused debug config +- net: hns3: update the number of version +- net: hns3: add dumping vlan filter config in debugfs +- net: hns3: Increase vlan tag0 when close the port_base_vlan +- net: hns3: adds support for extended VLAN mode and 'QOS' in vlan 802.1Q protocol. + +* Thu Mar 12 2020 Yang Yingliang - 4.19.90-2003.1.0.0031 +- net/hinic: driver code compliance rectification +- net/hinic: Solve the problem that the network card hangs when receiving the skb which frag_size=0 +- net: hns3: adds support for reading module eeprom info +- net: hns3: update hns3 version to 1.9.37.1 +- btrfs: tree-checker: Remove comprehensive root owner check +- xfs: add agf freeblocks verify in xfs_agf_verify +- blktrace: fix dereference after null check +- blktrace: Protect q->blk_trace with RCU +- vgacon: Fix a UAF in vgacon_invert_region +- can, slip: Protect tty->disc_data in write_wakeup and close with RCU +- relay: handle alloc_percpu returning NULL in relay_open +- drm/radeon: check the alloc_workqueue return value +- apparmor: Fix use-after-free in aa_audit_rule_init + +* Wed Mar 4 2020 Luo Chunsheng - 4.19.95-2002.6.0.0030 +- delete useless directory + +* Tue Mar 3 2020 Yang Yingliang - 4.19.95-2002.6.0.0029 +- livepatch/x86: enable livepatch config openeuler +- livepatch/x86: enable livepatch config for hulk +- livepatch/arm64: check active func in consistency stack checking +- livepatch/x86: check active func in consistency stack checking +- livepatch/x86: support livepatch without ftrace +- KVM: nVMX: Check IO instruction VM-exit conditions +- KVM: nVMX: Refactor IO bitmap checks into helper function +- KVM: nVMX: Don't emulate instructions in guest mode +- floppy: check FDC index for errors before assigning it +- ext4: add cond_resched() to __ext4_find_entry() +* Fri Feb 28 2020 Yang Yingliang - 4.19.95-2002.5.0.0028 +- x86 / config: add openeuler_defconfig +- files_cgroup: Fix soft lockup when refcnt overflow. +- vt: selection, close sel_buffer race +- vt: selection, handle pending signals in paste_selection +- RDMA/hns: Compilation Configuration update +- jbd2: do not clear the BH_Mapped flag when forgetting a metadata buffer +- jbd2: move the clearing of b_modified flag to the journal_unmap_buffer() +- iscsi: use dynamic single thread workqueue to improve performance +- workqueue: implement NUMA affinity for single thread workqueue +- iscsi: add member for NUMA aware order workqueue +- Revert "debugfs: fix kabi for function debugfs_remove_recursive" +- Revert "bdi: fix kabi for struct backing_dev_info" +- Revert "membarrier/kabi: fix kabi for membarrier_state" +- Revert "PCI: fix kabi change in struct pci_bus" +- files_cgroup: fix error pointer when kvm_vm_worker_thread +- bdi: get device name under rcu protect +- x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit +- timer_list: avoid other cpu soft lockup when printing timer list +- sysrq: avoid concurrently info printing by 'sysrq-trigger' +- bdi: fix memleak in bdi_register_va() +- iommu/iova: avoid softlockup in fq_flush_timeout +- qm: fix the way judge whether q stop in user space +- net: hns3: clear devil number for hns3_cae +- net: hns3: fix compile error when CONFIG_HNS3_DCB is not set +- qm: fixup compilation dependency +- rde: optimize debug regs clear logic +- sec: change sec_control reg config +- hpre: add likely and unlikey in result judgement +- hpre: optimize key process before free +- net: hns3: fix bug when parameter check +- drivers : sysctl fixup some param dont check the legitimacy +- net: hns3: add protect for parameters and remove unused functions +- qm: remove invalid addr print +- zip: use offset fields in sqe to avoid SG_SPLIT +- qm: fix wrong number of sg elements after dma map +- RDMA/hns:security review update +- RDMA/hns: some robust optimize in rdfx +- RDMA/hns: fix the bug of out-of-bonds-read in post send +- net: hns3: Remove the function of vf check mac address +- net: hns3: update hns3 version to 1.9.35.1 +- uacce: Remove uacce mode 1 relatives +- acc: Remove uacce mode 1 logic below hisilicon +- RDMA/hns: Add roce dfx of arm_cnt +- RDMA/hns: avoid potential overflow of +- RDMA/hns: handle device err after device state to UNIT +- net: hns3: change version to 1.9.35.0 +- net: hns3: fix missing help info for qs shaper in debugfs +- net: hns3: set VF's default reset_type to HNAE3_NONE_RESET +- net: hns3: fix port base vlan add fail when concurrent with reset +- net: hns3: skip mac speed and duplex modification checking for fibre port support autoneg +- net: hns3: modify timing of reading register in hclge_reset_wait() +- net: hns3: support of dump mac id and loopback status in debugfs +- net: hns3: optimize parameter of hclge_set_phy_loopback() function +- net: hns3: optimize parameter of hclge_phy_link_status_wait() function +- net: hns3: delete unnecessary judgement in hns3_get_stats() +- net: hns3: no need to check return value of debugfs_create functions +- net: hns3: make array spec_opcode static const, makes object smaller +- net: hns: replace space with tab for cleanup +- net: hns3: modify return value in hns3_dbg_cmd_write +- net: hns3: rename variable flag in hnae3_unregister_client() +- net: hns3: move struct hclge_mdio_cfg_cmd declaration +- net: hns3: modify error process of hclge_phy_link_status_wait() +- net: hns3: support query vf ring and vector map relation +- net: hns3: add enabled tc numbers and dwrr weight info in debugfs +- net: hns3: add error process in hclge_mac_link_status_wait() function +- net: hns3: modify code of hclge_mac_phy_link_status_wait() function +- net: hns3: replace goto with return in function hns3_set_ringparam() +- net: hns3: modify print format in hns3_set_ringpa() +- net: hns: replace goto with return in function hclge_set_vf_uc_mac_addr +- net: hns3: modify the irq name of misc vectors +- net: hns3: optimize code of hns3_parse_vlan_tag() function +- net: hns3: optimize local variable of hclge_set_loopback() function +- net: hns3: optimize code of hclge_init_kdump_kernel_config() function +- net: hns: remove unnecessary newline +- net: hns: modify print function used in hclge_init_ae_dev() +- net: hns3: modify the irq name of tqp vectors +- net: hns3: delete blank lines and space for cleanup +- net: hns3: do not schedule the periodical task when reset fail +- net: hns3: modify the location of updating the hardware reset done counter +- net: hns3: refactor the notification scheme of PF reset +- net: hns3: refactor the procedure of VF FLR +- net: hns3: modify hclge_func_reset_sync_vf()'s return type to void +- net: hns3: enlarge HCLGE_RESET_WAIT_CNT +- net: hns3: refactor the precedure of PF FLR +- net: hns3: split hclgevf_reset() into preparing and rebuilding part +- net: hns3: split hclge_reset() into preparing and rebuilding part +- net: hns3: Add "mac table" information query function +- net: hns3: fix bug that PF set VF mac didn't work +- net: hns3: delete some useless repeated printing +- net: hns3: delete some useless function and definication +- net: hns3: sync some code from net-next part1 +- net: hns3: refactor the promisc mode setting +- net: hns3: refine mac address configure for VF +- net: hns3: use mutex vport_lock intead of spin lock umv_lock +- net: hns3: opmitize the table entry restore when resetting +- net: hns3: refine mac address configure for PF +- net: fix bug and change version to 1.9.33.0 +- net: hns3: cae clear warnings +- drivers : sysctl remove rcu_lock +- RDMA/hns:remove useless header in cmd +- hac: sec: add initial configuration in sec_engine_init +- net: hns3: cae security review +- net: hns3: cae io_param definition updated +- debugfs: fix kabi for function debugfs_remove_recursive +- simple_recursive_removal(): kernel-side rm -rf for ramfs-style filesystems +- debugfs: simplify __debugfs_remove_file() +- block: rename 'q->debugfs_dir' and 'q->blk_trace->dir' in blk_unregister_queue() +- ext4: add cond_resched() to ext4_protect_reserved_inode +- bdi: fix kabi for struct backing_dev_info +- bdi: fix use-after-free for the bdi device +- vfs: fix do_last() regression +- do_last(): fetch directory ->i_mode and ->i_uid before it's too late +- ext4: reserve revoke credits in __ext4_new_inode +- jbd2: make jbd2_handle_buffer_credits() handle reserved handles +- jbd2: Fine tune estimate of necessary descriptor blocks +- jbd2: Provide trace event for handle restarts +- ext4: Reserve revoke credits for freed blocks +- jbd2: Make credit checking more strict +- jbd2: Rename h_buffer_credits to h_total_credits +- jbd2: Reserve space for revoke descriptor blocks +- jbd2: Drop jbd2_space_needed() +- jbd2: remove repeated assignments in __jbd2_log_wait_for_space() +- jbd2: Account descriptor blocks into t_outstanding_credits +- jbd2: Factor out common parts of stopping and restarting a handle +- jbd2: Drop pointless wakeup from jbd2_journal_stop() +- jbd2: Drop pointless check from jbd2_journal_stop() +- jbd2: Reorganize jbd2_journal_stop() +- ocfs2: Use accessor function for h_buffer_credits +- ext4, jbd2: Provide accessor function for handle credits +- ext4: Provide function to handle transaction restarts +- ext4: Avoid unnecessary revokes in ext4_alloc_branch() +- ext4: Use ext4_journal_extend() instead of jbd2_journal_extend() +- ext4: Fix ext4_should_journal_data() for EA inodes +- ext4: Do not iput inode under running transaction +- ext4: Move marking of handle as sync to ext4_add_nondir() +- jbd2: Completely fill journal descriptor blocks +- jbd2: Fixup stale comment in commit code +- libertas: Fix two buffer overflows at parsing bss descriptor +* Fri Feb 7 2020 Xie XiuQi - 4.19.95-2002.1.0.0027 +- drm/i915/gen9: Clear residual context state on context switch +- selftest/membarrier: fix build error +- membarrier/kabi: fix kabi for membarrier_state +- membarrier: Fix RCU locking bug caused by faulty merge +- sched/membarrier: Return -ENOMEM to userspace on memory allocation failure +- sched/membarrier: Skip IPIs when mm->mm_users == 1 +- selftests, sched/membarrier: Add multi-threaded test +- sched/membarrier: Fix p->mm->membarrier_state racy load +- sched: Clean up active_mm reference counting +- sched/membarrier: Remove redundant check +- drm/i915: Fix use-after-free when destroying GEM context +- PCI: fix kabi change in struct pci_bus +- PCI: add a member in 'struct pci_bus' to record the original 'pci_ops' +- KVM: tools/kvm_stat: Fix kvm_exit filter name +- KVM: arm/arm64: use esr_ec as trace field of kvm_exit tracepoint +- PCI/AER: increments pci bus reference count in aer-inject process +- irqchip/gic-v3-its: its support herbination +- PM / hibernate: introduce system_in_hibernation +- config: enable CONFIG_SMMU_BYPASS_DEV by default +- f2fs: support swap file w/ DIO +- mac80211: Do not send Layer 2 Update frame before authorization +- cfg80211/mac80211: make ieee80211_send_layer2_update a public function +- PCI/AER: Refactor error injection fallbacks +- net/sched: act_mirred: Pull mac prior redir to non mac_header_xmit device +- kernfs: fix potential null pointer dereference +- arm64: fix calling nmi_enter() repeatedly when IPI_CPU_CRASH_STOP +- usb: missing parentheses in USE_NEW_SCHEME +- USB: serial: option: add Telit ME910G1 0x110a composition +- USB: core: fix check for duplicate endpoints +- usb: dwc3: gadget: Fix request complete check +- net: sch_prio: When ungrafting, replace with FIFO +- mlxsw: spectrum_qdisc: Ignore grafting of invisible FIFO +- vlan: vlan_changelink() should propagate errors +- vlan: fix memory leak in vlan_dev_set_egress_priority +- vxlan: fix tos value before xmit +- tcp: fix "old stuff" D-SACK causing SACK to be treated as D-SACK +- sctp: free cmd->obj.chunk for the unprocessed SCTP_CMD_REPLY +- sch_cake: avoid possible divide by zero in cake_enqueue() +- pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM +- net: usb: lan78xx: fix possible skb leak +- net: stmmac: dwmac-sunxi: Allow all RGMII modes +- net: stmmac: dwmac-sun8i: Allow all RGMII modes +- net: dsa: mv88e6xxx: Preserve priority when setting CPU port. +- macvlan: do not assume mac_header is set in macvlan_broadcast() +- gtp: fix bad unlock balance in gtp_encap_enable_socket +- PCI/switchtec: Read all 64 bits of part_event_bitmap +- ARM: dts: imx6ul: use nvmem-cells for cpu speed grading +- cpufreq: imx6q: read OCOTP through nvmem for imx6ul/imx6ull +- powerpc/spinlocks: Include correct header for static key +- powerpc/vcpu: Assume dedicated processors as non-preempt +- hv_netvsc: Fix unwanted rx_table reset +- llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c) +- parisc: Fix compiler warnings in debug_core.c +- block: fix memleak when __blk_rq_map_user_iov() is failed +- s390/dasd: fix memleak in path handling error case +- s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly +- drm/exynos: gsc: add missed component_del +- s390/purgatory: do not build purgatory with kcov, kasan and friends +- net: stmmac: Always arm TX Timer at end of transmission start +- net: stmmac: RX buffer size must be 16 byte aligned +- net: stmmac: xgmac: Clear previous RX buffer size +- net: stmmac: Do not accept invalid MTU values +- fs: avoid softlockups in s_inodes iterators +- perf/x86/intel: Fix PT PMI handling +- kconfig: don't crash on NULL expressions in expr_eq() +- iommu/iova: Init the struct iova to fix the possible memleak +- regulator: rn5t618: fix module aliases +- ASoC: wm8962: fix lambda value +- rfkill: Fix incorrect check to avoid NULL pointer dereference +- parisc: add missing __init annotation +- net: usb: lan78xx: Fix error message format specifier +- cxgb4: Fix kernel panic while accessing sge_info +- bnx2x: Fix logic to get total no. of PFs per engine +- bnx2x: Do not handle requests from VFs after parity +- bpf: Clear skb->tstamp in bpf_redirect when necessary +- btrfs: Fix error messages in qgroup_rescan_init +- powerpc: Ensure that swiotlb buffer is allocated from low memory +- samples: bpf: fix syscall_tp due to unused syscall +- samples: bpf: Replace symbol compare of trace_event +- ARM: dts: am437x-gp/epos-evm: fix panel compatible +- spi: spi-ti-qspi: Fix a bug when accessing non default CS +- bpf, mips: Limit to 33 tail calls +- bnxt_en: Return error if FW returns more data than dump length +- ARM: dts: bcm283x: Fix critical trip point +- ASoC: topology: Check return value for soc_tplg_pcm_create() +- spi: spi-cavium-thunderx: Add missing pci_release_regions() +- ARM: dts: Cygnus: Fix MDIO node address/size cells +- selftests/ftrace: Fix multiple kprobe testcase +- ARM: dts: BCM5301X: Fix MDIO node address/size cells +- netfilter: nf_tables: validate NFT_DATA_VALUE after nft_data_init() +- netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END +- netfilter: nft_set_rbtree: bogus lookup/get on consecutive elements in named sets +- netfilter: uapi: Avoid undefined left-shift in xt_sctp.h +- ARM: vexpress: Set-up shared OPP table instead of individual for each CPU +- ARM: dts: imx6ul: imx6ul-14x14-evk.dtsi: Fix SPI NOR probing +- efi/gop: Fix memory leak in __gop_query32/64() +- efi/gop: Return EFI_SUCCESS if a usable GOP was found +- efi/gop: Return EFI_NOT_FOUND if there are no usable GOPs +- ASoC: Intel: bytcr_rt5640: Update quirk for Teclast X89 +- x86/efi: Update e820 with reserved EFI boot services data to fix kexec breakage +- libtraceevent: Fix lib installation with O= +- netfilter: ctnetlink: netns exit must wait for callbacks +- locking/spinlock/debug: Fix various data races +- ASoC: max98090: fix possible race conditions +- regulator: fix use after free issue +- bpf: Fix passing modified ctx to ld/abs/ind instruction +- USB: dummy-hcd: increase max number of devices to 32 +- USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein +- block: fix use-after-free on cached last_lookup partition +- perf/x86/intel/bts: Fix the use of page_private() +- xen/blkback: Avoid unmapping unmapped grant pages +- s390/smp: fix physical to logical CPU map for SMT +- ubifs: ubifs_tnc_start_commit: Fix OOB in layout_in_gaps +- net: add annotations on hh->hh_len lockless accesses +- xfs: periodically yield scrub threads to the scheduler +- ath9k_htc: Discard undersized packets +- ath9k_htc: Modify byte order for an error message +- net: core: limit nested device depth +- rxrpc: Fix possible NULL pointer access in ICMP handling +- KVM: PPC: Book3S HV: use smp_mb() when setting/clearing host_ipi flag +- selftests: rtnetlink: add addresses with fixed life time +- powerpc/pseries/hvconsole: Fix stack overread via udbg +- drm/mst: Fix MST sideband up-reply failure handling +- scsi: qedf: Do not retry ELS request if qedf_alloc_cmd fails +- bdev: Refresh bdev size for disks without partitioning +- bdev: Factor out bdev revalidation into a common helper +- fix compat handling of FICLONERANGE, FIDEDUPERANGE and FS_IOC_FIEMAP +- tty: serial: msm_serial: Fix lockup for sysrq and oops +- arm64: dts: meson: odroid-c2: Disable usb_otg bus to avoid power failed warning +- dt-bindings: clock: renesas: rcar-usb2-clock-sel: Fix typo in example +- regulator: ab8500: Remove AB8505 USB regulator +- media: flexcop-usb: ensure -EIO is returned on error condition +- Bluetooth: Fix memory leak in hci_connect_le_scan +- Bluetooth: delete a stray unlock +- Bluetooth: btusb: fix PM leak in error case of setup +- platform/x86: pmc_atom: Add Siemens CONNECT X300 to critclk_systems DMI table +- xfs: don't check for AG deadlock for realtime files in bunmapi +- ACPI: sysfs: Change ACPI_MASKABLE_GPE_MAX to 0x100 +- HID: i2c-hid: Reset ALPS touchpads on resume +- nfsd4: fix up replay_matches_cache() +- PM / devfreq: Check NULL governor in available_governors_show +- drm/msm: include linux/sched/task.h +- ftrace: Avoid potential division by zero in function profiler +- arm64: Revert support for execute-only user mappings +- exit: panic before exit_mm() on global init exit +- ALSA: firewire-motu: Correct a typo in the clock proc string +- ALSA: cs4236: fix error return comparison of an unsigned integer +- apparmor: fix aa_xattrs_match() may sleep while holding a RCU lock +- tracing: Fix endianness bug in histogram trigger +- tracing: Have the histogram compare functions convert to u64 first +- tracing: Avoid memory leak in process_system_preds() +- tracing: Fix lock inversion in trace_event_enable_tgid_record() +- rseq/selftests: Fix: Namespace gettid() for compatibility with glibc 2.30 +- riscv: ftrace: correct the condition logic in function graph tracer +- gpiolib: fix up emulated open drain outputs +- libata: Fix retrieving of active qcs +- ata: ahci_brcm: BCM7425 AHCI requires AHCI_HFLAG_DELAY_ENGINE +- ata: ahci_brcm: Add missing clock management during recovery +- ata: ahci_brcm: Allow optional reset controller to be used +- ata: ahci_brcm: Fix AHCI resources management +- ata: libahci_platform: Export again ahci_platform_able_phys() +- compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE +- compat_ioctl: block: handle Persistent Reservations +- dmaengine: Fix access to uninitialized dma_slave_caps +- locks: print unsigned ino in /proc/locks +- pstore/ram: Write new dumps to start of recycled zones +- mm: move_pages: return valid node id in status if the page is already on the target node +- memcg: account security cred as well to kmemcg +- mm/zsmalloc.c: fix the migrated zspage statistics. +- media: cec: check 'transmit_in_progress', not 'transmitting' +- media: cec: avoid decrementing transmit_queue_sz if it is 0 +- media: cec: CEC 2.0-only bcast messages were ignored +- media: pulse8-cec: fix lost cec_transmit_attempt_done() call +- MIPS: Avoid VDSO ABI breakage due to global register variable +- drm/sun4i: hdmi: Remove duplicate cleanup calls +- ALSA: hda/realtek - Add headset Mic no shutup for ALC283 +- ALSA: usb-audio: set the interface format after resume on Dell WD19 +- ALSA: usb-audio: fix set_format altsetting sanity check +- ALSA: ice1724: Fix sleep-in-atomic in Infrasonic Quartet support code +- netfilter: nft_tproxy: Fix port selector on Big Endian +- drm: limit to INT_MAX in create_blob ioctl +- taskstats: fix data-race +- xfs: fix mount failure crash on invalid iclog memory access +- ALSA: hda - fixup for the bass speaker on Lenovo Carbon X1 7th gen +- ALSA: hda/realtek - Enable the bass speaker of ASUS UX431FLC +- ALSA: hda/realtek - Add Bass Speaker and fixed dac for bass speaker +- PM / hibernate: memory_bm_find_bit(): Tighten node optimisation +- xen/balloon: fix ballooned page accounting without hotplug enabled +- xen-blkback: prevent premature module unload +- IB/mlx5: Fix steering rule of drop and count +- IB/mlx4: Follow mirror sequence of device add during device removal +- s390/cpum_sf: Avoid SBD overflow condition in irq handler +- s390/cpum_sf: Adjust sampling interval to avoid hitting sample limits +- md: raid1: check rdev before reference in raid1_sync_request func +- afs: Fix creation calls in the dynamic root to fail with EOPNOTSUPP +- net: make socket read/write_iter() honor IOCB_NOWAIT +- usb: gadget: fix wrong endpoint desc +- drm/nouveau: Move the declaration of struct nouveau_conn_atom up a bit +- scsi: iscsi: qla4xxx: fix double free in probe +- scsi: qla2xxx: Ignore PORT UPDATE after N2N PLOGI +- scsi: qla2xxx: Send Notify ACK after N2N PLOGI +- scsi: qla2xxx: Configure local loop for N2N target +- scsi: qla2xxx: Fix PLOGI payload and ELS IOCB dump length +- scsi: qla2xxx: Don't call qlt_async_event twice +- scsi: qla2xxx: Drop superfluous INIT_WORK of del_work +- scsi: lpfc: Fix memory leak on lpfc_bsg_write_ebuf_set func +- rxe: correctly calculate iCRC for unaligned payloads +- RDMA/cma: add missed unregister_pernet_subsys in init failure +- afs: Fix SELinux setting security label on /afs +- afs: Fix afs_find_server lookups for ipv4 peers +- PM / devfreq: Don't fail devfreq_dev_release if not in list +- PM / devfreq: Set scaling_max_freq to max on OPP notifier error +- PM / devfreq: Fix devfreq_notifier_call returning errno +- iio: adc: max9611: Fix too short conversion time delay +- drm/amd/display: Fixed kernel panic when booting with DP-to-HDMI dongle +- drm/amdgpu: add cache flush workaround to gfx8 emit_fence +- drm/amdgpu: add check before enabling/disabling broadcast mode +- nvme-fc: fix double-free scenarios on hw queues +- nvme_fc: add module to ops template to allow module references +- spi: fsl: use platform_get_irq() instead of of_irq_to_resource() +- pinctrl: baytrail: Really serialize all register accesses +- tty/serial: atmel: fix out of range clock divider handling +- spi: fsl: don't map irq during probe +- gtp: avoid zero size hashtable +- gtp: fix an use-after-free in ipv4_pdp_find() +- gtp: fix wrong condition in gtp_genl_dump_pdp() +- tcp: do not send empty skb from tcp_write_xmit() +- tcp/dccp: fix possible race __inet_lookup_established() +- net: marvell: mvpp2: phylink requires the link interrupt +- gtp: do not allow adding duplicate tid and ms_addr pdp context +- net/dst: do not confirm neighbor for vxlan and geneve pmtu update +- sit: do not confirm neighbor when do pmtu update +- vti: do not confirm neighbor when do pmtu update +- tunnel: do not confirm neighbor when do pmtu update +- net/dst: add new function skb_dst_update_pmtu_no_confirm +- gtp: do not confirm neighbor when do pmtu update +- ip6_gre: do not confirm neighbor when do pmtu update +- net: add bool confirm_neigh parameter for dst_ops.update_pmtu +- vhost/vsock: accept only packets with the right dst_cid +- udp: fix integer overflow while computing available space in sk_rcvbuf +- tcp: Fix highest_sack and highest_sack_seq +- ptp: fix the race between the release of ptp_clock and cdev +- net: stmmac: dwmac-meson8b: Fix the RGMII TX delay on Meson8b/8m2 SoCs +- net/mlxfw: Fix out-of-memory error in mfa2 flash burning +- net: ena: fix napi handler misbehavior when the napi budget is zero +- hrtimer: Annotate lockless access to timer->state +- net: icmp: fix data-race in cmp_global_allow() +- net: add a READ_ONCE() in skb_peek_tail() +- inetpeer: fix data-race in inet_putpeer / inet_putpeer +- netfilter: bridge: make sure to pull arp header in br_nf_forward_arp() +- 6pack,mkiss: fix possible deadlock +- netfilter: ebtables: compat: reject all padding in matches/watchers +- bonding: fix active-backup transition after link failure +- ALSA: hda - Downgrade error message for single-cmd fallback +- netfilter: nf_queue: enqueue skbs with NULL dst +- net, sysctl: Fix compiler warning when only cBPF is present +- x86/mce: Fix possibly incorrect severity calculation on AMD +- Revert "powerpc/vcpu: Assume dedicated processors as non-preempt" +- userfaultfd: require CAP_SYS_PTRACE for UFFD_FEATURE_EVENT_FORK +- kernel: sysctl: make drop_caches write-only +- mailbox: imx: Fix Tx doorbell shutdown path +- ocfs2: fix passing zero to 'PTR_ERR' warning +- s390/cpum_sf: Check for SDBT and SDB consistency +- libfdt: define INT32_MAX and UINT32_MAX in libfdt_env.h +- s390/zcrypt: handle new reply code FILTERED_BY_HYPERVISOR +- perf regs: Make perf_reg_name() return "unknown" instead of NULL +- perf script: Fix brstackinsn for AUXTRACE +- cdrom: respect device capabilities during opening action +- powerpc: Don't add -mabi= flags when building with Clang +- scripts/kallsyms: fix definitely-lost memory leak +- apparmor: fix unsigned len comparison with less than zero +- gpio: mpc8xxx: Don't overwrite default irq_set_type callback +- scsi: target: iscsi: Wait for all commands to finish before freeing a session +- scsi: iscsi: Don't send data to unbound connection +- scsi: NCR5380: Add disconnect_mask module parameter +- scsi: scsi_debug: num_tgts must be >= 0 +- scsi: ufs: Fix error handing during hibern8 enter +- scsi: pm80xx: Fix for SATA device discovery +- watchdog: Fix the race between the release of watchdog_core_data and cdev +- HID: rmi: Check that the RMI_STARTED bit is set before unregistering the RMI transport device +- HID: Improve Windows Precision Touchpad detection. +- libnvdimm/btt: fix variable 'rc' set but not used +- ARM: 8937/1: spectre-v2: remove Brahma-B53 from hardening +- HID: logitech-hidpp: Silence intermittent get_battery_capacity errors +- HID: quirks: Add quirk for HP MSU1465 PIXART OEM mouse +- bcache: at least try to shrink 1 node in bch_mca_scan() +- clk: pxa: fix one of the pxa RTC clocks +- scsi: atari_scsi: sun3_scsi: Set sg_tablesize to 1 instead of SG_NONE +- powerpc/security: Fix wrong message when RFI Flush is disable +- PCI: rpaphp: Correctly match ibm, my-drc-index to drc-name when using drc-info +- PCI: rpaphp: Annotate and correctly byte swap DRC properties +- PCI: rpaphp: Don't rely on firmware feature to imply drc-info support +- powerpc/pseries/cmm: Implement release() function for sysfs device +- scsi: ufs: fix potential bug which ends in system hang +- PCI: rpaphp: Fix up pointer to first drc-info entry +- scsi: lpfc: fix: Coverity: lpfc_cmpl_els_rsp(): Null pointer dereferences +- fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long +- irqchip: ingenic: Error out if IRQ domain creation failed +- irqchip/irq-bcm7038-l1: Enable parent IRQ if necessary +- clk: clk-gpio: propagate rate change to parent +- clk: qcom: Allow constant ratio freq tables for rcg +- f2fs: fix to update dir's i_pino during cross_rename +- scsi: lpfc: Fix duplicate unreg_rpi error in port offline flow +- scsi: tracing: Fix handling of TRANSFER LENGTH == 0 for READ(6) and WRITE(6) +- jbd2: Fix statistics for the number of logged blocks +- ext4: iomap that extends beyond EOF should be marked dirty +- powerpc/book3s64/hash: Add cond_resched to avoid soft lockup warning +- powerpc/security/book3s64: Report L1TF status in sysfs +- clocksource/drivers/timer-of: Use unique device name instead of timer +- clocksource/drivers/asm9260: Add a check for of_clk_get +- leds: lm3692x: Handle failure to probe the regulator +- dma-debug: add a schedule point in debug_dma_dump_mappings() +- powerpc/tools: Don't quote $objdump in scripts +- powerpc/pseries: Don't fail hash page table insert for bolted mapping +- powerpc/pseries: Mark accumulate_stolen_time() as notrace +- scsi: hisi_sas: Replace in_softirq() check in hisi_sas_task_exec() +- scsi: csiostor: Don't enable IRQs too early +- scsi: lpfc: Fix SLI3 hba in loop mode not discovering devices +- scsi: target: compare full CHAP_A Algorithm strings +- dmaengine: xilinx_dma: Clear desc_pendingcount in xilinx_dma_reset +- iommu/tegra-smmu: Fix page tables in > 4 GiB memory +- iommu: rockchip: Free domain on .domain_free +- f2fs: fix to update time in lazytime mode +- Input: atmel_mxt_ts - disable IRQ across suspend +- scsi: lpfc: Fix locking on mailbox command completion +- scsi: mpt3sas: Fix clear pending bit in ioctl status +- scsi: lpfc: Fix discovery failures when target device connectivity bounces +- perf probe: Fix to show function entry line as probe-able +- mmc: sdhci: Add a quirk for broken command queuing +- mmc: sdhci: Workaround broken command queuing on Intel GLK +- mmc: sdhci-of-esdhc: fix P2020 errata handling +- mmc: sdhci: Update the tuning failed messages to pr_debug level +- mmc: sdhci-of-esdhc: Revert "mmc: sdhci-of-esdhc: add erratum A-009204 support" +- mmc: sdhci-msm: Correct the offset and value for DDR_CONFIG register +- powerpc/irq: fix stack overflow verification +- powerpc/vcpu: Assume dedicated processors as non-preempt +- x86/MCE/AMD: Allow Reserved types to be overwritten in smca_banks[] +- x86/MCE/AMD: Do not use rdmsr_safe_on_cpu() in smca_configure() +- KVM: arm64: Ensure 'params' is initialised when looking up sys register +- ext4: unlock on error in ext4_expand_extra_isize() +- staging: comedi: gsc_hpdi: check dma_alloc_coherent() return value +- platform/x86: hp-wmi: Make buffer for HPWMI_FEATURE2_QUERY 128 bytes +- intel_th: pci: Add Elkhart Lake SOC support +- intel_th: pci: Add Comet Lake PCH-V support +- USB: EHCI: Do not return -EPIPE when hub is disconnected +- cpufreq: Avoid leaving stale IRQ work items during CPU offline +- usbip: Fix error path of vhci_recv_ret_submit() +- usbip: Fix receive error in vhci-hcd when using scatter-gather +- btrfs: return error pointer from alloc_test_extent_buffer +- s390/ftrace: fix endless recursion in function_graph tracer +- drm/amdgpu: fix uninitialized variable pasid_mapping_needed +- usb: xhci: Fix build warning seen with CONFIG_PM=n +- can: kvaser_usb: kvaser_usb_leaf: Fix some info-leaks to USB devices +- mmc: mediatek: fix CMD_TA to 2 for MT8173 HS200/HS400 mode +- Revert "mmc: sdhci: Fix incorrect switch to HS mode" +- btrfs: don't prematurely free work in scrub_missing_raid56_worker() +- btrfs: don't prematurely free work in reada_start_machine_worker() +- net: phy: initialise phydev speed and duplex sanely +- drm/amdgpu: fix bad DMA from INTERRUPT_CNTL2 +- mips: fix build when "48 bits virtual memory" is enabled +- libtraceevent: Fix memory leakage in copy_filter_type +- crypto: vmx - Avoid weird build failures +- mac80211: consider QoS Null frames for STA_NULLFUNC_ACKED +- crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c +- crypto: sun4i-ss - Fix 64-bit size_t warnings +- net: ethernet: ti: ale: clean ale tbl on init and intf restart +- fbtft: Make sure string is NULL terminated +- iwlwifi: check kasprintf() return value +- brcmfmac: remove monitor interface when detaching +- x86/insn: Add some Intel instructions to the opcode map +- ASoC: Intel: bytcr_rt5640: Update quirk for Acer Switch 10 SW5-012 2-in-1 +- ASoC: wm5100: add missed pm_runtime_disable +- spi: st-ssc4: add missed pm_runtime_disable +- ASoC: wm2200: add missed operations in remove and probe failure +- btrfs: don't prematurely free work in run_ordered_work() +- btrfs: don't prematurely free work in end_workqueue_fn() +- mmc: tmio: Add MMC_CAP_ERASE to allow erase/discard/trim requests +- crypto: virtio - deal with unsupported input sizes +- tun: fix data-race in gro_normal_list() +- spi: tegra20-slink: add missed clk_unprepare +- ASoC: wm8904: fix regcache handling +- iwlwifi: mvm: fix unaligned read of rx_pkt_status +- bcache: fix deadlock in bcache_allocator +- tracing/kprobe: Check whether the non-suffixed symbol is notrace +- tracing: use kvcalloc for tgid_map array allocation +- x86/crash: Add a forward declaration of struct kimage +- cpufreq: Register drivers only after CPU devices have been registered +- bcache: fix static checker warning in bcache_device_free() +- parport: load lowlevel driver if ports not found +- nvme: Discard workaround for non-conformant devices +- s390/disassembler: don't hide instruction addresses +- ASoC: Intel: kbl_rt5663_rt5514_max98927: Add dmic format constraint +- iio: dac: ad5446: Add support for new AD5600 DAC +- ASoC: rt5677: Mark reg RT5677_PWR_ANLG2 as volatile +- spi: pxa2xx: Add missed security checks +- EDAC/ghes: Fix grain calculation +- media: si470x-i2c: add missed operations in remove +- ice: delay less +- crypto: atmel - Fix authenc support when it is set to m +- soundwire: intel: fix PDI/stream mapping for Bulk +- media: pvrusb2: Fix oops on tear-down when radio support is not present +- fsi: core: Fix small accesses and unaligned offsets via sysfs +- ath10k: fix get invalid tx rate for Mesh metric +- perf probe: Filter out instances except for inlined subroutine and subprogram +- perf probe: Skip end-of-sequence and non statement lines +- perf probe: Fix to show calling lines of inlined functions +- perf probe: Return a better scope DIE if there is no best scope +- perf probe: Skip overlapped location on searching variables +- perf parse: If pmu configuration fails free terms +- xen/gntdev: Use select for DMA_SHARED_BUFFER +- drm/amdgpu: fix potential double drop fence reference +- drm/amdgpu: disallow direct upload save restore list from gfx driver +- perf tools: Splice events onto evlist even on error +- perf probe: Fix to probe a function which has no entry pc +- libsubcmd: Use -O0 with DEBUG=1 +- perf probe: Fix to show inlined function callsite without entry_pc +- perf probe: Fix to show ranges of variables in functions without entry_pc +- perf probe: Fix to probe an inline function which has no entry pc +- perf probe: Walk function lines in lexical blocks +- perf jevents: Fix resource leak in process_mapfile() and main() +- perf probe: Fix to list probe event with correct line number +- perf probe: Fix to find range-only function instance +- rtlwifi: fix memory leak in rtl92c_set_fw_rsvdpagepkt() +- ALSA: timer: Limit max amount of slave instances +- spi: img-spfi: fix potential double release +- bnx2x: Fix PF-VF communication over multi-cos queues. +- rfkill: allocate static minor +- nvmem: imx-ocotp: reset error status on probe +- media: v4l2-core: fix touch support in v4l_g_fmt +- ixgbe: protect TX timestamping from API misuse +- pinctrl: amd: fix __iomem annotation in amd_gpio_irq_handler() +- Bluetooth: Fix advertising duplicated flags +- libbpf: Fix error handling in bpf_map__reuse_fd() +- iio: dln2-adc: fix iio_triggered_buffer_postenable() position +- pinctrl: sh-pfc: sh7734: Fix duplicate TCLK1_B +- loop: fix no-unmap write-zeroes request behavior +- libata: Ensure ata_port probe has completed before detach +- s390/mm: add mm_pxd_folded() checks to pxd_free() +- s390/time: ensure get_clock_monotonic() returns monotonic values +- phy: qcom-usb-hs: Fix extcon double register after power cycle +- net: dsa: LAN9303: select REGMAP when LAN9303 enable +- gpu: host1x: Allocate gather copy for host1x +- RDMA/qedr: Fix memory leak in user qp and mr +- ACPI: button: Add DMI quirk for Medion Akoya E2215T +- spi: sprd: adi: Add missing lock protection when rebooting +- drm/tegra: sor: Use correct SOR index on Tegra210 +- net: phy: dp83867: enable robust auto-mdix +- i40e: initialize ITRN registers with correct values +- arm64: psci: Reduce the waiting time for cpu_psci_cpu_kill() +- md/bitmap: avoid race window between md_bitmap_resize and bitmap_file_clear_bit +- media: smiapp: Register sensor after enabling runtime PM on the device +- x86/ioapic: Prevent inconsistent state when moving an interrupt +- ipmi: Don't allow device module unload when in use +- rtl8xxxu: fix RTL8723BU connection failure issue after warm reboot +- drm/gma500: fix memory disclosures due to uninitialized bytes +- perf tests: Disable bp_signal testing for arm64 +- x86/mce: Lower throttling MCE messages' priority to warning +- bpf/stackmap: Fix deadlock with rq_lock in bpf_get_stack() +- Bluetooth: hci_core: fix init for HCI_USER_CHANNEL +- Bluetooth: Workaround directed advertising bug in Broadcom controllers +- Bluetooth: missed cpu_to_le16 conversion in hci_init4_req +- iio: adc: max1027: Reset the device at probe time +- usb: usbfs: Suppress problematic bind and unbind uevents. +- perf report: Add warning when libunwind not compiled in +- perf test: Report failure for mmap events +- drm/bridge: dw-hdmi: Restore audio when setting a mode +- ath10k: Correct error handling of dma_map_single() +- x86/mm: Use the correct function type for native_set_fixmap() +- extcon: sm5502: Reset registers during initialization +- drm/amd/display: Fix dongle_caps containing stale information. +- syscalls/x86: Use the correct function type in SYSCALL_DEFINE0 +- media: ti-vpe: vpe: fix a v4l2-compliance failure about invalid sizeimage +- media: ti-vpe: vpe: ensure buffers are cleaned up properly in abort cases +- media: ti-vpe: vpe: fix a v4l2-compliance failure causing a kernel panic +- media: ti-vpe: vpe: Make sure YUYV is set as default format +- media: ti-vpe: vpe: fix a v4l2-compliance failure about frame sequence number +- media: ti-vpe: vpe: fix a v4l2-compliance warning about invalid pixel format +- media: ti-vpe: vpe: Fix Motion Vector vpdma stride +- media: cx88: Fix some error handling path in 'cx8800_initdev()' +- drm/drm_vblank: Change EINVAL by the correct errno +- block: Fix writeback throttling W=1 compiler warnings +- samples: pktgen: fix proc_cmd command result check logic +- drm/bridge: dw-hdmi: Refuse DDC/CI transfers on the internal I2C controller +- media: cec-funcs.h: add status_req checks +- media: flexcop-usb: fix NULL-ptr deref in flexcop_usb_transfer_init() +- regulator: max8907: Fix the usage of uninitialized variable in max8907_regulator_probe() +- hwrng: omap3-rom - Call clk_disable_unprepare() on exit only if not idled +- usb: renesas_usbhs: add suspend event support in gadget mode +- media: venus: Fix occasionally failures to suspend +- selftests/bpf: Correct path to include msg + path +- pinctrl: devicetree: Avoid taking direct reference to device name string +- ath10k: fix offchannel tx failure when no ath10k_mac_tx_frm_has_freq +- media: venus: core: Fix msm8996 frequency table +- tools/power/cpupower: Fix initializer override in hsw_ext_cstates +- media: ov6650: Fix stored crop rectangle not in sync with hardware +- media: ov6650: Fix stored frame format not in sync with hardware +- media: i2c: ov2659: Fix missing 720p register config +- media: ov6650: Fix crop rectangle alignment not passed back +- media: i2c: ov2659: fix s_stream return value +- media: am437x-vpfe: Setting STD to current value is not an error +- IB/iser: bound protection_sg size by data_sg size +- ath10k: fix backtrace on coredump +- staging: rtl8188eu: fix possible null dereference +- staging: rtl8192u: fix multiple memory leaks on error path +- spi: Add call to spi_slave_abort() function when spidev driver is released +- drm/amdgpu: grab the id mgr lock while accessing passid_mapping +- iio: light: bh1750: Resolve compiler warning and make code more readable +- drm/bridge: analogix-anx78xx: silence -EPROBE_DEFER warnings +- drm/panel: Add missing drm_panel_init() in panel drivers +- drm: mst: Fix query_payload ack reply struct +- ALSA: hda/ca0132 - Fix work handling in delayed HP detection +- ALSA: hda/ca0132 - Avoid endless loop +- ALSA: hda/ca0132 - Keep power on during processing DSP response +- ALSA: pcm: Avoid possible info leaks from PCM stream buffers +- Btrfs: fix removal logic of the tree mod log that leads to use-after-free issues +- btrfs: handle ENOENT in btrfs_uuid_tree_iterate +- btrfs: do not leak reloc root if we fail to read the fs root +- btrfs: skip log replay on orphaned roots +- btrfs: abort transaction after failed inode updates in create_subvol +- btrfs: send: remove WARN_ON for readonly mount +- Btrfs: fix missing data checksums after replaying a log tree +- btrfs: do not call synchronize_srcu() in inode_tree_del +- btrfs: don't double lock the subvol_sem for rename exchange +- selftests: forwarding: Delete IPv6 address at the end +- sctp: fully initialize v4 addr in some functions +- qede: Fix multicast mac configuration +- qede: Disable hardware gro when xdp prog is installed +- net: usb: lan78xx: Fix suspend/resume PHY register access error +- net: qlogic: Fix error paths in ql_alloc_large_buffers() +- net: nfc: nci: fix a possible sleep-in-atomic-context bug in nci_uart_tty_receive() +- net: hisilicon: Fix a BUG trigered by wrong bytes_compl +- net: gemini: Fix memory leak in gmac_setup_txqs +- net: dst: Force 4-byte alignment of dst_metrics +- mod_devicetable: fix PHY module format +- fjes: fix missed check in fjes_acpi_add +- sock: fix potential memory leak in proto_register() +- arm64/sve: Fix missing SVE/FPSIMD endianness conversions +- svm: Delete ifdef CONFIG_ACPI in svm +- svm: Delete svm_unbind_cores() in svm_notifier_release call +- svm: Fix unpin_memory calculate nr_pages error +- vrf: Do not attempt to create IPv6 mcast rule if IPv6 is disabled +- iommu: Add missing new line for dma type +- xhci: fix USB3 device initiated resume race with roothub autosuspend +- drm/radeon: fix r1xx/r2xx register checker for POT textures +- scsi: iscsi: Fix a potential deadlock in the timeout handler +- dm mpath: remove harmful bio-based optimization +- drm: meson: venc: cvbs: fix CVBS mode matching +- dma-buf: Fix memory leak in sync_file_merge() +- vfio/pci: call irq_bypass_unregister_producer() before freeing irq +- ARM: tegra: Fix FLOW_CTLR_HALT register clobbering by tegra_resume() +- ARM: dts: s3c64xx: Fix init order of clock providers +- CIFS: Close open handle after interrupted close +- CIFS: Respect O_SYNC and O_DIRECT flags during reconnect +- cifs: Don't display RDMA transport on reconnect +- cifs: smbd: Return -EINVAL when the number of iovs exceeds SMBDIRECT_MAX_SGE +- cifs: smbd: Add messages on RDMA session destroy and reconnection +- cifs: smbd: Return -EAGAIN when transport is reconnecting +- rpmsg: glink: Free pending deferred work on remove +- rpmsg: glink: Don't send pending rx_done during remove +- rpmsg: glink: Fix rpmsg_register_device err handling +- rpmsg: glink: Put an extra reference during cleanup +- rpmsg: glink: Fix use after free in open_ack TIMEOUT case +- rpmsg: glink: Fix reuse intents memory leak issue +- rpmsg: glink: Set tail pointer to 0 at end of FIFO +- xtensa: fix TLB sanity checker +- PCI: Apply Cavium ACS quirk to ThunderX2 and ThunderX3 +- PCI/MSI: Fix incorrect MSI-X masking on resume +- PCI: Fix Intel ACS quirk UPDCR register address +- PCI/PM: Always return devices to D0 when thawing +- mmc: block: Add CMD13 polling for MMC IOCTLS with R1B response +- mmc: block: Make card_busy_detect() a bit more generic +- Revert "arm64: preempt: Fix big-endian when checking preempt count in assembly" +- tcp: Protect accesses to .ts_recent_stamp with {READ, WRITE}_ONCE() +- tcp: tighten acceptance of ACKs not matching a child socket +- tcp: fix rejected syncookies due to stale timestamps +- net/mlx5e: Query global pause state before setting prio2buffer +- tipc: fix ordering of tipc module init and exit routine +- tcp: md5: fix potential overestimation of TCP option space +- openvswitch: support asymmetric conntrack +- net: thunderx: start phy before starting autonegotiation +- net: sched: fix dump qlen for sch_mq/sch_mqprio with NOLOCK subqueues +- net: ethernet: ti: cpsw: fix extra rx interrupt +- net: dsa: fix flow dissection on Tx path +- net: bridge: deny dev_set_mac_address() when unregistering +- mqprio: Fix out-of-bounds access in mqprio_dump +- inet: protect against too small mtu values. +- ext4: check for directory entries too close to block end +- ext4: fix ext4_empty_dir() for directories with holes + +* Mon Jan 13 2020 luochunsheng - 4.19.90-vhulk1912.2.1.0026 +- fix compile error when debugfiles.list is empty + +* Mon Jan 13 2020 luochunsheng - 4.19.90-vhulk1912.2.1.0025 +- update kernel code from https://gitee.com/openeuler/kernel/ + +* Mon Jan 06 2020 zhanghailiang - 4.19.90-vhulk1912.2.1.0024 +- support more than 256 vcpus for VM + +* Tue Dec 31 2019 linfeilong - 4.19.90-vhulk1912.2.1.0023 +- delete some unuseful file + +* Mon Dec 30 2019 yuxiangyang - 4.19.90-vhulk1912.2.1.0022 +- update Huawei copyright + +* Mon Dec 30 2019 caomeng - 4.19.90-vhulk1912.2.1.0021 +- modefied README.md + +* Sat Dec 28 2019 caomeng - 4.19.90-vhulk1912.2.1.0020 +- change tag and change config_ktask + +* Sat Dec 28 2019 caomeng - 4.19.90-vhulk1907.1.0.0019 +- modefied license + +* Wed Dec 25 2019 luochunsheng - 4.19.90-vhulk1907.1.0.0018 +- update Module.kabi_aarch64 +- fix patch kernel-SMMU-V3-support-Virtualization-with-3408iMR-3.patch + +* Tue Dec 24 2019 Pan Zhang - 4.19.90-vhulk1907.1.0.0017 +- fix get_user_pages_fast with evmm issue + +* Tue Dec 24 2019 caihongda - 4.19.90-vhulk1907.1.0.0016 +- cpu/freq:remove unused patches + +* Tue Dec 24 2019 shenkai - 4.19.90-vhulk1907.1.0.0015 +- modify vmap allocation start address + +* Tue Dec 24 2019 caomeng - 4.19.90-vhulk1907.1.0.0014 +- fix some problem about rebase hulk + +* Mon Dec 23 2019 yuxiangyang - 4.19.90-vhulk1907.1.0.0013 +- fix CONFIG_EULEROS_USE_IDLE_NO_CSTATES compile error +- add a new method of cpu usage + +* Mon Dec 23 2019 caomeng - 4.19.90-vhulk1907.1.0.0012 +- change version + +* Mon Dec 23 2019 luochunsheng - 4.19.36-vhulk1907.1.0.0011 +- fix mkgrub-menu-*.sh path +- SMMU supports bypass of configured PCI devices by cmdline smmu.bypassdev + +* Mon Dec 23 2019 chenmaodong - 4.19.36-vhulk1907.1.0.0010 +- drm/radeon: Fix potential buffer overflow in ci_dpm.c + +* Mon Dec 23 2019 wuxu - 4.19.36-vhulk1907.1.0.0009 +- add security compile noexecstack option for vdso + +* Mon Dec 23 2019 caomeng - 4.19.36-vhulk1907.1.0.0008 +- rebase hulk patches + +* Fri Dec 20 2019 yeyunfeng - 4.19.36-vhulk1907.1.0.0007 +- perf/smmuv3: fix possible sleep in preempt context +- crypto: user - prevent operating on larval algorithms + +* Thu Dec 19 2019 luochunsheng - 4.19.36-vhulk1907.1.0.0006 +- update release to satisfy upgrade + +* Wed Nov 27 2019 lihongjiang - 4.19.36-vhulk1907.1.0.h005 +- change page size from 4K to 64K + +* Thu Nov 21 2019 caomeng - 4.19.36-vhulk1907.1.0.h004 +- fix problem about x86 compile: change signing_key.pem to certs/signing_key.pem +- in file arch/x86/configs/euleros_defconfig + +* Mon Nov 4 2019 caomeng - 4.19.36-vhulk1907.1.0.h003 +- Add buildrequires ncurlses-devel + +* Fri Oct 25 2019 luochunsheng - 4.19.36-vhulk1907.1.0.h002 +- Add vmlinx to debuginfo package and add kernel-source package + +* Wed Sep 04 2019 openEuler Buildteam - 4.19.36-vhulk1907.1.0.h001 +- Package init diff --git a/patch-4.19.90-2203.3.0-rt103-openeuler_defconfig.patch b/patch-4.19.90-2203.3.0-rt103-openeuler_defconfig.patch new file mode 100644 index 0000000..ee8052a --- /dev/null +++ b/patch-4.19.90-2203.3.0-rt103-openeuler_defconfig.patch @@ -0,0 +1,52 @@ +diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig +index 8717ffe01..7086ed5b1 100644 +--- a/arch/arm64/configs/openeuler_defconfig ++++ b/arch/arm64/configs/openeuler_defconfig +@@ -73,8 +73,9 @@ CONFIG_NO_HZ_FULL=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y + # CONFIG_PREEMPT_NONE is not set +-CONFIG_PREEMPT_VOLUNTARY=y ++# CONFIG_PREEMPT_VOLUNTARY is not set + # CONFIG_PREEMPT is not set ++CONFIG_PREEMPT_RT_FULL=y + + # + # CPU/Task time and stats accounting +@@ -988,7 +989,7 @@ CONFIG_FRAME_VECTOR=y + # CONFIG_PERCPU_STATS is not set + # CONFIG_GUP_BENCHMARK is not set + CONFIG_ARCH_HAS_PTE_SPECIAL=y +-CONFIG_PIN_MEMORY=y ++# CONFIG_PIN_MEMORY is not set + CONFIG_PID_RESERVE=y + CONFIG_NET=y + CONFIG_COMPAT_NETLINK_MESSAGES=y +diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig +index 64f0805a5..a791ac897 100644 +--- a/arch/x86/configs/openeuler_defconfig ++++ b/arch/x86/configs/openeuler_defconfig +@@ -78,8 +78,9 @@ CONFIG_NO_HZ_FULL=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y + # CONFIG_PREEMPT_NONE is not set +-CONFIG_PREEMPT_VOLUNTARY=y ++# CONFIG_PREEMPT_VOLUNTARY is not set + # CONFIG_PREEMPT is not set ++CONFIG_PREEMPT_RT_FULL=y + + # + # CPU/Task time and stats accounting +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 4c90438fc..5d2fc5be5 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1846,6 +1846,8 @@ static int console_trylock_spinning(void) + + #else + ++void zap_locks(void) {} ++ + static int console_trylock_spinning(void) + { + return console_trylock(); diff --git a/patch-4.19.90-2203.3.0-rt103.patch b/patch-4.19.90-2203.3.0-rt103.patch new file mode 100644 index 0000000..4062efe --- /dev/null +++ b/patch-4.19.90-2203.3.0-rt103.patch @@ -0,0 +1,25890 @@ +diff --git a/arch/Kconfig b/arch/Kconfig +index 00f55932b..9fdf4a803 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -37,6 +37,7 @@ config OPROFILE + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h +index 1d5716bc0..6883bc952 100644 +--- a/arch/alpha/include/asm/spinlock_types.h ++++ b/arch/alpha/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef _ALPHA_SPINLOCK_TYPES_H + #define _ALPHA_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int lock; + } arch_spinlock_t; +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index b9455d212..5cfcb8a41 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -52,7 +52,7 @@ config ARM + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 +- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU ++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE + select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_MMAP_RND_BITS if MMU + select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) +@@ -91,6 +91,7 @@ config ARM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ +@@ -2165,7 +2166,7 @@ config NEON + + config KERNEL_MODE_NEON + bool "Support for NEON in kernel mode" +- depends on NEON && AEABI ++ depends on NEON && AEABI && !PREEMPT_RT_BASE + help + Say Y to include support for NEON in kernel mode. + +diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig +index e4b1be66b..f4b253bd0 100644 +--- a/arch/arm/configs/at91_dt_defconfig ++++ b/arch/arm/configs/at91_dt_defconfig +@@ -19,6 +19,7 @@ CONFIG_ARCH_MULTI_V5=y + CONFIG_ARCH_AT91=y + CONFIG_SOC_AT91RM9200=y + CONFIG_SOC_AT91SAM9=y ++# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set + CONFIG_AEABI=y + CONFIG_UACCESS_WITH_MEMCPY=y + CONFIG_ZBOOT_ROM_TEXT=0x0 +@@ -64,7 +65,6 @@ CONFIG_BLK_DEV_LOOP=y + CONFIG_BLK_DEV_RAM=y + CONFIG_BLK_DEV_RAM_COUNT=4 + CONFIG_BLK_DEV_RAM_SIZE=8192 +-CONFIG_ATMEL_TCLIB=y + CONFIG_ATMEL_SSC=y + CONFIG_SCSI=y + CONFIG_BLK_DEV_SD=y +diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig +index 208002555..be92871ab 100644 +--- a/arch/arm/configs/sama5_defconfig ++++ b/arch/arm/configs/sama5_defconfig +@@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y + CONFIG_SOC_SAMA5D2=y + CONFIG_SOC_SAMA5D3=y + CONFIG_SOC_SAMA5D4=y ++# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set + CONFIG_AEABI=y + CONFIG_UACCESS_WITH_MEMCPY=y + CONFIG_ZBOOT_ROM_TEXT=0x0 +@@ -75,7 +76,6 @@ CONFIG_BLK_DEV_LOOP=y + CONFIG_BLK_DEV_RAM=y + CONFIG_BLK_DEV_RAM_COUNT=4 + CONFIG_BLK_DEV_RAM_SIZE=8192 +-CONFIG_ATMEL_TCLIB=y + CONFIG_ATMEL_SSC=y + CONFIG_EEPROM_AT24=y + CONFIG_SCSI=y +diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h +index 46d41140d..c421b5b81 100644 +--- a/arch/arm/include/asm/irq.h ++++ b/arch/arm/include/asm/irq.h +@@ -23,6 +23,8 @@ + #endif + + #ifndef __ASSEMBLY__ ++#include ++ + struct irqaction; + struct pt_regs; + +diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h +index 597695864..a37c08039 100644 +--- a/arch/arm/include/asm/spinlock_types.h ++++ b/arch/arm/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef __ASM_SPINLOCK_TYPES_H + #define __ASM_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + #define TICKET_SHIFT 16 + + typedef struct { +diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h +index d3e937dce..6ab96a2ce 100644 +--- a/arch/arm/include/asm/switch_to.h ++++ b/arch/arm/include/asm/switch_to.h +@@ -4,6 +4,13 @@ + + #include + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb +@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info + #define switch_to(prev,next,last) \ + do { \ + __complete_pending_tlbi(); \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index 8f55dc520..4f834bfca 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -49,6 +49,7 @@ struct cpu_context_save { + struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + __u32 cpu; /* cpu */ +@@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, + #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ + #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ ++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ ++#define TIF_NEED_RESCHED_LAZY 7 + + #define TIF_NOHZ 12 /* in adaptive nohz mode */ + #define TIF_USING_IWMMXT 17 +@@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_UPROBE (1 << TIF_UPROBE) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +@@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, + * Change these and you break ASM code in entry-common.S + */ + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +- _TIF_NOTIFY_RESUME | _TIF_UPROBE) ++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ ++ _TIF_NEED_RESCHED_LAZY) + + #endif /* __KERNEL__ */ + #endif /* __ASM_ARM_THREAD_INFO_H */ +diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c +index 3968d6c22..b35d373fc 100644 +--- a/arch/arm/kernel/asm-offsets.c ++++ b/arch/arm/kernel/asm-offsets.c +@@ -56,6 +56,7 @@ int main(void) + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index e85a3af9d..cc67c0a3a 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -216,11 +216,18 @@ __irq_svc: + + #ifdef CONFIG_PREEMPT + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption ++ ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED ++ tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt ++1: + #endif + + svc_exit r5, irq = 1 @ return from exception +@@ -235,8 +242,14 @@ svc_preempt: + 1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED ++ bne 1b ++ tst r0, #_TIF_NEED_RESCHED_LAZY + reteq r8 @ go again +- b 1b ++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r0, #0 @ if preempt lazy count != 0 ++ beq 1b ++ ret r8 @ go again ++ + #endif + + __und_fault: +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S +index 0465d65d2..47675830e 100644 +--- a/arch/arm/kernel/entry-common.S ++++ b/arch/arm/kernel/entry-common.S +@@ -56,7 +56,9 @@ __ret_fast_syscall: + cmp r2, #TASK_SIZE + blne addr_limit_check_failed + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne fast_work_pending ++ tst r1, #_TIF_SECCOMP + bne fast_work_pending + + +@@ -93,8 +95,11 @@ __ret_fast_syscall: + cmp r2, #TASK_SIZE + blne addr_limit_check_failed + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne do_slower_path ++ tst r1, #_TIF_SECCOMP + beq no_work_pending ++do_slower_path: + UNWIND(.fnend ) + ENDPROC(ret_fast_syscall) + +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index dfe24883c..e1b59f33a 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -652,7 +652,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) + */ + trace_hardirqs_off(); + do { +- if (likely(thread_flags & _TIF_NEED_RESCHED)) { ++ if (likely(thread_flags & (_TIF_NEED_RESCHED | ++ _TIF_NEED_RESCHED_LAZY))) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) +diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig +index 903f23c30..fa493a86e 100644 +--- a/arch/arm/mach-at91/Kconfig ++++ b/arch/arm/mach-at91/Kconfig +@@ -107,6 +107,31 @@ config SOC_AT91SAM9 + AT91SAM9X35 + AT91SAM9XE + ++comment "Clocksource driver selection" ++ ++config ATMEL_CLOCKSOURCE_PIT ++ bool "Periodic Interval Timer (PIT) support" ++ depends on SOC_AT91SAM9 || SOC_SAMA5 ++ default SOC_AT91SAM9 || SOC_SAMA5 ++ select ATMEL_PIT ++ help ++ Select this to get a clocksource based on the Atmel Periodic Interval ++ Timer. It has a relatively low resolution and the TC Block clocksource ++ should be preferred. ++ ++config ATMEL_CLOCKSOURCE_TCB ++ bool "Timer Counter Blocks (TCB) support" ++ depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST ++ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 ++ depends on !ATMEL_TCLIB ++ select ATMEL_ARM_TCB_CLKSRC ++ help ++ Select this to get a high precision clocksource based on a ++ TC block with a 5+ MHz base clock rate. ++ On platforms with 16-bit counters, two timer channels are combined ++ to make a single 32-bit timer. ++ It can also be used as a clock event device supporting oneshot mode. ++ + config HAVE_AT91_UTMI + bool + +diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c +index 6a1e68237..17dca0ff3 100644 +--- a/arch/arm/mach-exynos/platsmp.c ++++ b/arch/arm/mach-exynos/platsmp.c +@@ -239,7 +239,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void exynos_secondary_init(unsigned int cpu) + { +@@ -252,8 +252,8 @@ static void exynos_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) +@@ -317,7 +317,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -344,7 +344,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -390,7 +390,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) + * calibrations, then wait for it to finish + */ + fail: +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? ret : 0; + } +diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c +index f66815c3d..00524abd9 100644 +--- a/arch/arm/mach-hisi/platmcpm.c ++++ b/arch/arm/mach-hisi/platmcpm.c +@@ -61,7 +61,7 @@ + + static void __iomem *sysctrl, *fabric; + static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + static u32 fabric_phys_addr; + /* + * [0]: bootwrapper physical address +@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) + if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) + return -EINVAL; + +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + + if (hip04_cpu_table[cluster][cpu]) + goto out; +@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) + + out: + hip04_cpu_table[cluster][cpu]++; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + + return 0; + } +@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu) + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + hip04_cpu_table[cluster][cpu]--; + if (hip04_cpu_table[cluster][cpu] == 1) { + /* A power_up request went ahead of us. */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return; + } else if (hip04_cpu_table[cluster][cpu] > 1) { + pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); +@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu) + } + + last_man = hip04_cluster_is_down(cluster); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + if (last_man) { + /* Since it's Cortex A15, disable L2 prefetching. */ + asm volatile( +@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu) + cpu >= HIP04_MAX_CPUS_PER_CLUSTER); + + count = TIMEOUT_MSEC / POLL_MSEC; +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + for (tries = 0; tries < count; tries++) { + if (hip04_cpu_table[cluster][cpu]) + goto err; +@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) + data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); + if (data & CORE_WFI_STATUS(cpu)) + break; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + /* Wait for clean L2 when the whole cluster is down. */ + msleep(POLL_MSEC); +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + } + if (tries >= count) + goto err; +@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) + goto err; + if (hip04_cluster_is_down(cluster)) + hip04_set_snoop_filter(cluster, 0); +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 1; + err: +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 0; + } + #endif +diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c +index 326e870d7..d9ac80aa1 100644 +--- a/arch/arm/mach-imx/cpuidle-imx6q.c ++++ b/arch/arm/mach-imx/cpuidle-imx6q.c +@@ -17,22 +17,22 @@ + #include "hardware.h" + + static int num_idle_cpus = 0; +-static DEFINE_SPINLOCK(cpuidle_lock); ++static DEFINE_RAW_SPINLOCK(cpuidle_lock); + + static int imx6q_enter_wait(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) + { +- spin_lock(&cpuidle_lock); ++ raw_spin_lock(&cpuidle_lock); + if (++num_idle_cpus == num_online_cpus()) + imx6_set_lpm(WAIT_UNCLOCKED); +- spin_unlock(&cpuidle_lock); ++ raw_spin_unlock(&cpuidle_lock); + + cpu_do_idle(); + +- spin_lock(&cpuidle_lock); ++ raw_spin_lock(&cpuidle_lock); + if (num_idle_cpus-- == num_online_cpus()) + imx6_set_lpm(WAIT_CLOCKED); +- spin_unlock(&cpuidle_lock); ++ raw_spin_unlock(&cpuidle_lock); + + return index; + } +diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c +index 1c73694c8..ac4d2f030 100644 +--- a/arch/arm/mach-omap2/omap-smp.c ++++ b/arch/arm/mach-omap2/omap-smp.c +@@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = { + .startup_addr = omap5_secondary_startup, + }; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) + { +@@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c +index 75ef5d4be..c17c86e5d 100644 +--- a/arch/arm/mach-prima2/platsmp.c ++++ b/arch/arm/mach-prima2/platsmp.c +@@ -22,7 +22,7 @@ + + static void __iomem *clk_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sirfsoc_secondary_init(unsigned int cpu) + { +@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static const struct of_device_id clk_ids[] = { +@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) + /* make sure write buffer is drained */ + mb(); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c +index 5494c9e0c..e8ce157d3 100644 +--- a/arch/arm/mach-qcom/platsmp.c ++++ b/arch/arm/mach-qcom/platsmp.c +@@ -46,7 +46,7 @@ + + extern void secondary_startup_arm(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + #ifdef CONFIG_HOTPLUG_CPU + static void qcom_cpu_die(unsigned int cpu) +@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int scss_release_secondary(unsigned int cpu) +@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Send the secondary CPU a soft interrupt, thereby causing +@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return ret; + } +diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c +index 39038a038..6da5c9387 100644 +--- a/arch/arm/mach-spear/platsmp.c ++++ b/arch/arm/mach-spear/platsmp.c +@@ -32,7 +32,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); + +@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c +index 231f19e17..a3419b700 100644 +--- a/arch/arm/mach-sti/platsmp.c ++++ b/arch/arm/mach-sti/platsmp.c +@@ -35,7 +35,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sti_secondary_init(unsigned int cpu) + { +@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index f49b996ae..726f08332 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -436,6 +436,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, + if (addr < TASK_SIZE) + return do_page_fault(addr, fsr, regs); + ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + if (user_mode(regs)) + goto bad_area; + +@@ -503,6 +506,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, + static int + do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + { ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + do_bad_area(addr, fsr, regs); + return 0; + } +diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c +index d02f8187b..542692dbd 100644 +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr) + return *ptep; + } + ++static unsigned int fixmap_idx(int type) ++{ ++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++} ++ + void *kmap(struct page *page) + { + might_sleep(); +@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; + int type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); +@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) + + type = kmap_atomic_idx_push(); + +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + /* +@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr) + + if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(idx)); +- set_fixmap_pte(idx, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_fixmap_pte(idx, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + } + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + struct page *page = pfn_to_page(pfn); + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); + #endif +- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ set_fixmap_pte(idx, __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_fixmap_pte(idx, next_p->kmap_pte[i]); ++ } ++} ++#endif +diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c +index c23665101..6b60f582b 100644 +--- a/arch/arm/plat-versatile/platsmp.c ++++ b/arch/arm/plat-versatile/platsmp.c +@@ -32,7 +32,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void versatile_secondary_init(unsigned int cpu) + { +@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu) + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 02c2f528c..fbcb3496f 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -143,6 +143,7 @@ config ARM64 + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_RCU_TABLE_FREE +diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig +index a5606823e..1c2da1535 100644 +--- a/arch/arm64/crypto/Kconfig ++++ b/arch/arm64/crypto/Kconfig +@@ -19,43 +19,43 @@ config CRYPTO_SHA512_ARM64 + + config CRYPTO_SHA1_ARM64_CE + tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_SHA1 + + config CRYPTO_SHA2_ARM64_CE + tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_SHA256_ARM64 + + config CRYPTO_SHA512_ARM64_CE + tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_SHA512_ARM64 + + config CRYPTO_SHA3_ARM64 + tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_SHA3 + + config CRYPTO_SM3_ARM64_CE + tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_SM3 + + config CRYPTO_SM4_ARM64_CE + tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_ALGAPI + select CRYPTO_SM4 + + config CRYPTO_GHASH_ARM64_CE + tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_HASH + select CRYPTO_GF128MUL + select CRYPTO_AES +@@ -63,7 +63,7 @@ config CRYPTO_GHASH_ARM64_CE + + config CRYPTO_CRCT10DIF_ARM64_CE + tristate "CRCT10DIF digest algorithm using PMULL instructions" +- depends on KERNEL_MODE_NEON && CRC_T10DIF ++ depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE + select CRYPTO_HASH + + config CRYPTO_AES_ARM64 +@@ -72,13 +72,13 @@ config CRYPTO_AES_ARM64 + + config CRYPTO_AES_ARM64_CE + tristate "AES core cipher using ARMv8 Crypto Extensions" +- depends on ARM64 && KERNEL_MODE_NEON ++ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_ALGAPI + select CRYPTO_AES_ARM64 + + config CRYPTO_AES_ARM64_CE_CCM + tristate "AES in CCM mode using ARMv8 Crypto Extensions" +- depends on ARM64 && KERNEL_MODE_NEON ++ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_ALGAPI + select CRYPTO_AES_ARM64_CE + select CRYPTO_AES_ARM64 +@@ -86,7 +86,7 @@ config CRYPTO_AES_ARM64_CE_CCM + + config CRYPTO_AES_ARM64_CE_BLK + tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_BLKCIPHER + select CRYPTO_AES_ARM64_CE + select CRYPTO_AES_ARM64 +@@ -94,7 +94,7 @@ config CRYPTO_AES_ARM64_CE_BLK + + config CRYPTO_AES_ARM64_NEON_BLK + tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_BLKCIPHER + select CRYPTO_AES_ARM64 + select CRYPTO_AES +@@ -102,13 +102,13 @@ config CRYPTO_AES_ARM64_NEON_BLK + + config CRYPTO_CHACHA20_NEON + tristate "NEON accelerated ChaCha20 symmetric cipher" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_BLKCIPHER + select CRYPTO_CHACHA20 + + config CRYPTO_AES_ARM64_BS + tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE + select CRYPTO_BLKCIPHER + select CRYPTO_AES_ARM64_NEON_BLK + select CRYPTO_AES_ARM64 +diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h +index c3e4273c1..61d3c4f93 100644 +--- a/arch/arm64/include/asm/alternative.h ++++ b/arch/arm64/include/asm/alternative.h +@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length); + static inline void apply_alternatives_module(void *start, size_t length) { } + #endif + ++#ifdef CONFIG_KVM_ARM_HOST ++void kvm_compute_layout(void); ++#else ++static inline void kvm_compute_layout(void) { } ++#endif ++ + #define ALTINSTR_ENTRY(feature) \ + " .word 661b - .\n" /* label */ \ + " .word 663f - .\n" /* new instruction */ \ +diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h +index a157ff465..f952fdda8 100644 +--- a/arch/arm64/include/asm/spinlock_types.h ++++ b/arch/arm64/include/asm/spinlock_types.h +@@ -16,10 +16,6 @@ + #ifndef __ASM_SPINLOCK_TYPES_H + #define __ASM_SPINLOCK_TYPES_H + +-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) +-# error "please don't include this file directly" +-#endif +- + #include + #include + +diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h +index 6bc5fe80f..c45726e21 100644 +--- a/arch/arm64/include/asm/thread_info.h ++++ b/arch/arm64/include/asm/thread_info.h +@@ -43,6 +43,7 @@ struct thread_info { + u64 ttbr0; /* saved TTBR0_EL1 */ + #endif + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + }; + + #define thread_saved_pc(tsk) \ +@@ -78,11 +79,12 @@ void arch_release_task_struct(struct task_struct *tsk); + #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ + #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ + #define TIF_SEA_NOTIFY 6 /* notify to do an error recovery */ +-#define TIF_NOHZ 7 +-#define TIF_SYSCALL_TRACE 8 +-#define TIF_SYSCALL_AUDIT 9 +-#define TIF_SYSCALL_TRACEPOINT 10 +-#define TIF_SECCOMP 11 ++#define TIF_NEED_RESCHED_LAZY 7 ++#define TIF_NOHZ 8 ++#define TIF_SYSCALL_TRACE 9 ++#define TIF_SYSCALL_AUDIT 10 ++#define TIF_SYSCALL_TRACEPOINT 11 ++#define TIF_SECCOMP 12 + #define TIF_POLLING_NRFLAG 16 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ + #define TIF_FREEZE 19 +@@ -114,6 +116,7 @@ void arch_release_task_struct(struct task_struct *tsk); + #define _TIF_SVE (1 << TIF_SVE) + #define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64) + #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + + #ifdef CONFIG_UCE_KERNEL_RECOVERY + #define _TIF_UCE_KERNEL_RECOVERY (1 << TIF_UCE_KERNEL_RECOVERY) +@@ -121,8 +124,10 @@ void arch_release_task_struct(struct task_struct *tsk); + + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ +- _TIF_UPROBE | _TIF_FSCHECK | _TIF_SEA_NOTIFY) ++ _TIF_UPROBE | _TIF_FSCHECK | _TIF_SEA_NOTIFY | \ ++ _TIF_NEED_RESCHED_LAZY) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ + _TIF_NOHZ) +diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c +index 8511fc3b9..cb621dd49 100644 +--- a/arch/arm64/kernel/alternative.c ++++ b/arch/arm64/kernel/alternative.c +@@ -237,6 +237,7 @@ static int __apply_alternatives_multi_stop(void *unused) + void __init apply_alternatives_all(void) + { + /* better not try code patching on a live SMP system */ ++ kvm_compute_layout(); + stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); + } + +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c +index 6e1847fb4..975b0f41e 100644 +--- a/arch/arm64/kernel/asm-offsets.c ++++ b/arch/arm64/kernel/asm-offsets.c +@@ -41,6 +41,7 @@ int main(void) + BLANK(); + DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); + DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); ++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); + DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN + DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index 9a044d425..7b44df229 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -690,11 +690,16 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs x0, daif + orr w24, w24, w0 + alternative_else_nop_endif +- cbnz w24, 1f // preempt count != 0 || NMI return path ++ cbnz w24, 2f // preempt count != 0 || NMI return path + ldr x0, [tsk, #TSK_TI_FLAGS] // get flags +- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? +- bl el1_preempt ++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? ++ ++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? + 1: ++ bl el1_preempt ++2: + #endif + + #ifdef CONFIG_ARM64_PSEUDO_NMI +@@ -727,6 +732,7 @@ el1_preempt: + 1: bl preempt_schedule_irq // irq en/disable is done inside + ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS + tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? + ret x24 + #endif + +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index bb048144c..1634b1795 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -160,6 +160,16 @@ static void sve_free(struct task_struct *task) + __sve_free(task); + } + ++static void *sve_free_atomic(struct task_struct *task) ++{ ++ void *sve_state = task->thread.sve_state; ++ ++ WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); ++ ++ task->thread.sve_state = NULL; ++ return sve_state; ++} ++ + /* + * TIF_SVE controls whether a task can use SVE without trapping while + * in userspace, and also the way a task's FPSIMD/SVE state is stored +@@ -573,6 +583,7 @@ int sve_set_vector_length(struct task_struct *task, + * non-SVE thread. + */ + if (task == current) { ++ preempt_disable(); + local_bh_disable(); + + fpsimd_save(); +@@ -583,8 +594,10 @@ int sve_set_vector_length(struct task_struct *task, + if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) + sve_to_fpsimd(task); + +- if (task == current) ++ if (task == current) { + local_bh_enable(); ++ preempt_enable(); ++ } + + /* + * Force reallocation of task SVE state to the correct size +@@ -839,6 +852,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) + + sve_alloc(current); + ++ preempt_disable(); + local_bh_disable(); + + fpsimd_save(); +@@ -852,6 +866,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) + WARN_ON(1); /* SVE access shouldn't have trapped */ + + local_bh_enable(); ++ preempt_enable(); + } + + /* +@@ -918,10 +933,12 @@ void fpsimd_thread_switch(struct task_struct *next) + void fpsimd_flush_thread(void) + { + int vl, supported_vl; ++ void *mem = NULL; + + if (!system_supports_fpsimd()) + return; + ++ preempt_disable(); + local_bh_disable(); + + memset(¤t->thread.uw.fpsimd_state, 0, +@@ -930,7 +947,7 @@ void fpsimd_flush_thread(void) + + if (system_supports_sve()) { + clear_thread_flag(TIF_SVE); +- sve_free(current); ++ mem = sve_free_atomic(current); + + /* + * Reset the task vector length as required. +@@ -966,6 +983,8 @@ void fpsimd_flush_thread(void) + set_thread_flag(TIF_FOREIGN_FPSTATE); + + local_bh_enable(); ++ preempt_enable(); ++ kfree(mem); + } + + /* +@@ -977,9 +996,11 @@ void fpsimd_preserve_current_state(void) + if (!system_supports_fpsimd()) + return; + ++ preempt_disable(); + local_bh_disable(); + fpsimd_save(); + local_bh_enable(); ++ preempt_enable(); + } + + /* +@@ -1050,6 +1071,7 @@ void fpsimd_restore_current_state(void) + return; + } + ++ preempt_disable(); + local_bh_disable(); + + if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { +@@ -1058,6 +1080,7 @@ void fpsimd_restore_current_state(void) + } + + local_bh_enable(); ++ preempt_enable(); + } + + /* +@@ -1070,6 +1093,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) + if (WARN_ON(!system_supports_fpsimd())) + return; + ++ preempt_disable(); + local_bh_disable(); + + current->thread.uw.fpsimd_state = *state; +@@ -1082,6 +1106,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) + clear_thread_flag(TIF_FOREIGN_FPSTATE); + + local_bh_enable(); ++ preempt_enable(); + } + + /* +@@ -1128,6 +1153,7 @@ void kernel_neon_begin(void) + + BUG_ON(!may_use_simd()); + ++ preempt_disable(); + local_bh_disable(); + + __this_cpu_write(kernel_neon_busy, true); +@@ -1141,6 +1167,7 @@ void kernel_neon_begin(void) + preempt_disable(); + + local_bh_enable(); ++ preempt_enable(); + } + EXPORT_SYMBOL(kernel_neon_begin); + +diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c +index 75009eba3..fd9c01b43 100644 +--- a/arch/arm64/kernel/signal.c ++++ b/arch/arm64/kernel/signal.c +@@ -709,7 +709,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, + /* Check valid user FS if needed */ + addr_limit_user_check(); + +- if (thread_flags & _TIF_NEED_RESCHED) { ++ if (thread_flags & _TIF_NEED_RESCHED_MASK) { + /* Unmask Debug and SError for the next task */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + +diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c +index c712a7376..792da0e12 100644 +--- a/arch/arm64/kvm/va_layout.c ++++ b/arch/arm64/kvm/va_layout.c +@@ -33,7 +33,7 @@ static u8 tag_lsb; + static u64 tag_val; + static u64 va_mask; + +-static void compute_layout(void) ++__init void kvm_compute_layout(void) + { + phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); + u64 hyp_va_msb; +@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, + + BUG_ON(nr_inst != 5); + +- if (!has_vhe() && !va_mask) +- compute_layout(); + + for (i = 0; i < nr_inst; i++) { + u32 rd, rn, insn, oinsn; +@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, + return; + } + +- if (!va_mask) +- compute_layout(); +- + /* + * Compute HYP VA by using the same computation as kern_hyp_va() + */ +diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h +index 7a906b521..d8f596fec 100644 +--- a/arch/hexagon/include/asm/spinlock_types.h ++++ b/arch/hexagon/include/asm/spinlock_types.h +@@ -21,10 +21,6 @@ + #ifndef _ASM_SPINLOCK_TYPES_H + #define _ASM_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int lock; + } arch_spinlock_t; +diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h +index 6e345fefc..681408d68 100644 +--- a/arch/ia64/include/asm/spinlock_types.h ++++ b/arch/ia64/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef _ASM_IA64_SPINLOCK_TYPES_H + #define _ASM_IA64_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int lock; + } arch_spinlock_t; +diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c +index 6115464d5..f09e34c84 100644 +--- a/arch/ia64/kernel/mca.c ++++ b/arch/ia64/kernel/mca.c +@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, + ti->cpu = cpu; + p->stack = ti; + p->state = TASK_UNINTERRUPTIBLE; +- cpumask_set_cpu(cpu, &p->cpus_allowed); ++ cpumask_set_cpu(cpu, &p->cpus_mask); + INIT_LIST_HEAD(&p->tasks); + p->parent = p->real_parent = p->group_leader = p; + INIT_LIST_HEAD(&p->children); +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index d2fefde97..09782e575 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -2519,7 +2519,7 @@ config MIPS_CRC_SUPPORT + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool +diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h +index e610473d6..1428b4feb 100644 +--- a/arch/mips/include/asm/switch_to.h ++++ b/arch/mips/include/asm/switch_to.h +@@ -42,7 +42,7 @@ extern struct task_struct *ll_task; + * inline to try to keep the overhead down. If we have been forced to run on + * a "CPU" with an FPU because of a previous high level of FP computation, + * but did not actually use the FPU during the most recent time-slice (CU1 +- * isn't set), we undo the restriction on cpus_allowed. ++ * isn't set), we undo the restriction on cpus_mask. + * + * We're not calling set_cpus_allowed() here, because we have no need to + * force prompt migration - we're already switching the current CPU to a +@@ -57,7 +57,7 @@ do { \ + test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ + (!(KSTK_STATUS(prev) & ST0_CU1))) { \ + clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ +- prev->cpus_allowed = prev->thread.user_cpus_allowed; \ ++ prev->cpus_mask = prev->thread.user_cpus_allowed; \ + } \ + next->thread.emulated_fp = 0; \ + } while(0) +diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c +index a7c0f97e4..1a08428ee 100644 +--- a/arch/mips/kernel/mips-mt-fpaff.c ++++ b/arch/mips/kernel/mips-mt-fpaff.c +@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, + if (retval) + goto out_unlock; + +- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); ++ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); + cpumask_and(&mask, &allowed, cpu_active_mask); + + out_unlock: +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 9dab0ed1b..3623cf32f 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) + * restricted the allowed set to exclude any CPUs with FPUs, + * we'll skip the procedure. + */ +- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { ++ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { + cpumask_t tmask; + + current->thread.user_cpus_allowed +- = current->cpus_allowed; +- cpumask_and(&tmask, ¤t->cpus_allowed, ++ = current->cpus_mask; ++ cpumask_and(&tmask, ¤t->cpus_mask, + &mt_fpu_cpumask); + set_cpus_allowed_ptr(current, &tmask); + set_thread_flag(TIF_FPUBOUND); +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 5329ef16d..cda7db582 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT + + config RWSEM_GENERIC_SPINLOCK + bool ++ default y if PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config GENERIC_LOCKBREAK + bool +@@ -216,6 +217,7 @@ config PPC + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_RCU_TABLE_FREE if SMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN +@@ -398,7 +400,7 @@ menu "Kernel options" + + config HIGHMEM + bool "High memory support" +- depends on PPC32 ++ depends on PPC32 && !PREEMPT_RT_FULL + + source kernel/Kconfig.hz + +diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h +index 87adaf13b..7305cb6a5 100644 +--- a/arch/powerpc/include/asm/spinlock_types.h ++++ b/arch/powerpc/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H + #define _ASM_POWERPC_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int slock; + } arch_spinlock_t; +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index 3c0002044..64c3d1a72 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -37,6 +37,8 @@ struct thread_info { + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + unsigned long local_flags; /* private flags for thread */ + #ifdef CONFIG_LIVEPATCH + unsigned long *livepatch_sp; +@@ -81,18 +83,18 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src + #define TIF_SIGPENDING 1 /* signal pending */ + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ + #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ +-#define TIF_32BIT 4 /* 32 bit binary */ + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ + #define TIF_PATCH_PENDING 6 /* pending live patching update */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SINGLESTEP 8 /* singlestepping active */ + #define TIF_NOHZ 9 /* in adaptive nohz mode */ + #define TIF_SECCOMP 10 /* secure computing */ +-#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ +-#define TIF_NOERROR 12 /* Force successful syscall return */ ++ ++#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ ++#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ ++ + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ + #define TIF_UPROBE 14 /* breakpointed or single-stepping */ +-#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ + #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation + for stack store? */ + #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +@@ -100,6 +102,10 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif + #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ ++#define TIF_32BIT 20 /* 32 bit binary */ ++#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */ ++#define TIF_NOERROR 22 /* Force successful syscall return */ ++ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<flags) + set_bits(irqtp->flags, &curtp->flags); + } ++#endif + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S +index 695b24a2d..032ada21b 100644 +--- a/arch/powerpc/kernel/misc_32.S ++++ b/arch/powerpc/kernel/misc_32.S +@@ -42,6 +42,7 @@ + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -58,6 +59,7 @@ _GLOBAL(call_do_softirq) + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr ++#endif + + /* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S +index facc02964..8b1774186 100644 +--- a/arch/powerpc/kernel/misc_64.S ++++ b/arch/powerpc/kernel/misc_64.S +@@ -32,6 +32,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -42,6 +43,7 @@ _GLOBAL(call_do_softirq) + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_do_irq) + mflr r0 +diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig +index 68a0e9d5b..6f4d5d761 100644 +--- a/arch/powerpc/kvm/Kconfig ++++ b/arch/powerpc/kvm/Kconfig +@@ -178,6 +178,7 @@ config KVM_E500MC + config KVM_MPIC + bool "KVM in-kernel MPIC emulation" + depends on KVM && E500 ++ depends on !PREEMPT_RT_FULL + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_IRQ_ROUTING +diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c +index c9ef3c532..cb10249b1 100644 +--- a/arch/powerpc/platforms/cell/spufs/sched.c ++++ b/arch/powerpc/platforms/cell/spufs/sched.c +@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) + * runqueue. The context will be rescheduled on the proper node + * if it is timesliced or preempted. + */ +- cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); ++ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); + + /* Save the current cpu id for spu interrupt routing. */ + ctx->last_ran = raw_smp_processor_id(); +diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c +index e7075aaff..1580464a9 100644 +--- a/arch/powerpc/platforms/ps3/device-init.c ++++ b/arch/powerpc/platforms/ps3/device-init.c +@@ -752,8 +752,8 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev, + } + pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); + +- res = wait_event_interruptible(dev->done.wait, +- dev->done.done || kthread_should_stop()); ++ res = swait_event_interruptible_exclusive(dev->done.wait, ++ dev->done.done || kthread_should_stop()); + if (kthread_should_stop()) + res = -EINTR; + if (res) { +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c +index 06f02960b..d80d919c7 100644 +--- a/arch/powerpc/platforms/pseries/iommu.c ++++ b/arch/powerpc/platforms/pseries/iommu.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, + } + + static DEFINE_PER_CPU(__be64 *, tce_page); ++static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock); + + static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, + long npages, unsigned long uaddr, +@@ -232,7 +234,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, + direction, attrs); + } + +- local_irq_save(flags); /* to protect tcep and the page behind it */ ++ /* to protect tcep and the page behind it */ ++ local_lock_irqsave(tcp_page_lock, flags); + + tcep = __this_cpu_read(tce_page); + +@@ -243,7 +246,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, + tcep = (__be64 *)__get_free_page(GFP_ATOMIC); + /* If allocation fails, fall back to the loop implementation */ + if (!tcep) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(tcp_page_lock, flags); + return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, + direction, attrs); + } +@@ -277,7 +280,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, + tcenum += limit; + } while (npages > 0 && !rc); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(tcp_page_lock, flags); + + if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { + ret = (int)rc; +@@ -435,13 +438,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, + u64 rc = 0; + long l, limit; + +- local_irq_disable(); /* to protect tcep and the page behind it */ ++ /* to protect tcep and the page behind it */ ++ local_lock_irq(tcp_page_lock); + tcep = __this_cpu_read(tce_page); + + if (!tcep) { + tcep = (__be64 *)__get_free_page(GFP_ATOMIC); + if (!tcep) { +- local_irq_enable(); ++ local_unlock_irq(tcp_page_lock); + return -ENOMEM; + } + __this_cpu_write(tce_page, tcep); +@@ -487,7 +491,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, + + /* error cleanup: caller will clear whole range */ + +- local_irq_enable(); ++ local_unlock_irq(tcp_page_lock); + return rc; + } + +diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h +index cfed272e4..8e28e8176 100644 +--- a/arch/s390/include/asm/spinlock_types.h ++++ b/arch/s390/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef __ASM_SPINLOCK_TYPES_H + #define __ASM_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + int lock; + } __attribute__ ((aligned (4))) arch_spinlock_t; +diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h +index e82369f28..22ca9a98b 100644 +--- a/arch/sh/include/asm/spinlock_types.h ++++ b/arch/sh/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef __ASM_SH_SPINLOCK_TYPES_H + #define __ASM_SH_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int lock; + } arch_spinlock_t; +diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c +index 5717c7cbd..66dd399b2 100644 +--- a/arch/sh/kernel/irq.c ++++ b/arch/sh/kernel/irq.c +@@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu) + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -175,6 +176,7 @@ void do_softirq_own_stack(void) + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c +index 713670e6d..5dfc71534 100644 +--- a/arch/sparc/kernel/irq_64.c ++++ b/arch/sparc/kernel/irq_64.c +@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; +@@ -868,6 +869,7 @@ void do_softirq_own_stack(void) + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 4ba057494..2dfb2ceeb 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -182,6 +182,7 @@ config X86 + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_RCU_TABLE_FREE if PARAVIRT + select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE + select HAVE_REGS_AND_STACK_ACCESS_API +@@ -266,8 +267,11 @@ config ARCH_MAY_HAVE_PC_FDC + def_bool y + depends on ISA_DMA_API + ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_CALIBRATE_DELAY + def_bool y +@@ -946,7 +950,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index 917f25e4d..58d8c03fc 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = skcipher_walk_done(&walk, 0); + } +- kernel_fpu_end(); + + return err; + } +diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c +index 41034745d..d4bf7fc02 100644 +--- a/arch/x86/crypto/cast5_avx_glue.c ++++ b/arch/x86/crypto/cast5_avx_glue.c +@@ -61,7 +61,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) + + static int ecb_crypt(struct skcipher_request *req, bool enc) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; +@@ -76,7 +76,7 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) + u8 *wsrc = walk.src.virt.addr; + u8 *wdst = walk.dst.virt.addr; + +- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); + + /* Process multi-block batch */ + if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { +@@ -105,10 +105,9 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) + } while (nbytes >= bsize); + + done: ++ cast5_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -212,7 +211,7 @@ static int cbc_decrypt(struct skcipher_request *req) + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct skcipher_walk walk; + unsigned int nbytes; + int err; +@@ -220,12 +219,11 @@ static int cbc_decrypt(struct skcipher_request *req) + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes)) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); + nbytes = __cbc_decrypt(ctx, &walk); ++ cast5_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -292,7 +290,7 @@ static int ctr_crypt(struct skcipher_request *req) + { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct skcipher_walk walk; + unsigned int nbytes; + int err; +@@ -300,13 +298,12 @@ static int ctr_crypt(struct skcipher_request *req) + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); + nbytes = __ctr_crypt(&walk, ctx); ++ cast5_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } + +- cast5_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + ctr_crypt_final(&walk, ctx); + err = skcipher_walk_done(&walk, 0); +diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c +index dce7c5d39..6194160b7 100644 +--- a/arch/x86/crypto/chacha20_glue.c ++++ b/arch/x86/crypto/chacha20_glue.c +@@ -81,23 +81,24 @@ static int chacha20_simd(struct skcipher_request *req) + + crypto_chacha20_init(state, ctx, walk.iv); + +- kernel_fpu_begin(); +- + while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { ++ kernel_fpu_begin(); ++ + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); ++ kernel_fpu_end(); + err = skcipher_walk_done(&walk, + walk.nbytes % CHACHA20_BLOCK_SIZE); + } + + if (walk.nbytes) { ++ kernel_fpu_begin(); + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); ++ kernel_fpu_end(); + err = skcipher_walk_done(&walk, 0); + } + +- kernel_fpu_end(); +- + return err; + } + +diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c +index a78ef99a9..dac489a1c 100644 +--- a/arch/x86/crypto/glue_helper.c ++++ b/arch/x86/crypto/glue_helper.c +@@ -38,7 +38,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, + void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const unsigned int bsize = 128 / 8; + struct skcipher_walk walk; +- bool fpu_enabled = false; ++ bool fpu_enabled; + unsigned int nbytes; + int err; + +@@ -51,7 +51,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, + unsigned int i; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- &walk, fpu_enabled, nbytes); ++ &walk, false, nbytes); + for (i = 0; i < gctx->num_funcs; i++) { + func_bytes = bsize * gctx->funcs[i].num_blocks; + +@@ -69,10 +69,9 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, + if (nbytes < bsize) + break; + } ++ glue_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } +- +- glue_fpu_end(fpu_enabled); + return err; + } + EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); +@@ -115,7 +114,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, + void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const unsigned int bsize = 128 / 8; + struct skcipher_walk walk; +- bool fpu_enabled = false; ++ bool fpu_enabled; + unsigned int nbytes; + int err; + +@@ -129,7 +128,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, + u128 last_iv; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- &walk, fpu_enabled, nbytes); ++ &walk, false, nbytes); + /* Start of the last block. */ + src += nbytes / bsize - 1; + dst += nbytes / bsize - 1; +@@ -161,10 +160,10 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, + done: + u128_xor(dst, dst, (u128 *)walk.iv); + *(u128 *)walk.iv = last_iv; ++ glue_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); +@@ -175,7 +174,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, + void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const unsigned int bsize = 128 / 8; + struct skcipher_walk walk; +- bool fpu_enabled = false; ++ bool fpu_enabled; + unsigned int nbytes; + int err; + +@@ -189,7 +188,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, + le128 ctrblk; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- &walk, fpu_enabled, nbytes); ++ &walk, false, nbytes); + + be128_to_le128(&ctrblk, (be128 *)walk.iv); + +@@ -213,11 +212,10 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, + } + + le128_to_be128((be128 *)walk.iv, &ctrblk); ++ glue_fpu_end(fpu_enabled); + err = skcipher_walk_done(&walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); +- + if (nbytes) { + le128 ctrblk; + u128 tmp; +@@ -278,7 +276,7 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, + { + const unsigned int bsize = 128 / 8; + struct skcipher_walk walk; +- bool fpu_enabled = false; ++ bool fpu_enabled; + unsigned int nbytes; + int err; + +@@ -289,21 +287,24 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, + + /* set minimum length to bsize, for tweak_fn */ + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- &walk, fpu_enabled, ++ &walk, false, + nbytes < bsize ? bsize : nbytes); + + /* calculate first value of T */ + tweak_fn(tweak_ctx, walk.iv, walk.iv); + + while (nbytes) { ++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, ++ &walk, fpu_enabled, ++ nbytes < bsize ? bsize : nbytes); + nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); + ++ glue_fpu_end(fpu_enabled); ++ fpu_enabled = false; + err = skcipher_walk_done(&walk, nbytes); + nbytes = walk.nbytes; + } + +- glue_fpu_end(fpu_enabled); +- + return err; + } + EXPORT_SYMBOL_GPL(glue_xts_req_128bit); +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c +index c66c9d141..dd5072a69 100644 +--- a/arch/x86/entry/common.c ++++ b/arch/x86/entry/common.c +@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs) + + #define EXIT_TO_USERMODE_LOOP_FLAGS \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) ++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) + + static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) + { +@@ -149,9 +149,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) + /* We have work to do. */ + local_irq_enable(); + +- if (cached_flags & _TIF_NEED_RESCHED) ++ if (cached_flags & _TIF_NEED_RESCHED_MASK) + schedule(); + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } ++#endif + if (cached_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index 8059d4fd9..d880352e4 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -766,8 +766,25 @@ END(ret_from_exception) + ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + .Lneed_resched: ++ # preempt count == 0 + NEED_RS set? + cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz restore_all_kernel ++#else ++ jz test_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jne restore_all_kernel ++ ++ movl PER_CPU_VAR(current_task), %ebp ++ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? ++ jnz restore_all_kernel ++ ++ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) ++ jz restore_all_kernel ++test_int_off: ++#endif + testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all_kernel + call preempt_schedule_irq +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index ccb5e3486..663a99f63 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -732,7 +732,23 @@ retint_kernel: + btl $9, EFLAGS(%rsp) /* were interrupts off? */ + jnc 1f + 0: cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz 1f ++#else ++ jz do_preempt_schedule_irq ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jnz 1f ++ ++ movq PER_CPU_VAR(current_task), %rcx ++ cmpl $0, TASK_TI_preempt_lazy_count(%rcx) ++ jnz 1f ++ ++ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) ++ jnc 1f ++do_preempt_schedule_irq: ++#endif + call preempt_schedule_irq + jmp 0b + 1: +@@ -1083,6 +1099,7 @@ bad_gs: + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(do_softirq_own_stack) + pushq %rbp +@@ -1093,6 +1110,7 @@ ENTRY(do_softirq_own_stack) + leaveq + ret + ENDPROC(do_softirq_own_stack) ++#endif + + #ifdef CONFIG_XEN + idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 +diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h +index b56d504af..e51c70940 100644 +--- a/arch/x86/include/asm/fpu/api.h ++++ b/arch/x86/include/asm/fpu/api.h +@@ -20,6 +20,7 @@ + */ + extern void kernel_fpu_begin(void); + extern void kernel_fpu_end(void); ++extern void kernel_fpu_resched(void); + extern bool irq_fpu_usable(void); + + /* +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h +index 90cb2f36c..98a38e098 100644 +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -86,17 +86,48 @@ static __always_inline void __preempt_count_sub(int val) + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ +-static __always_inline bool __preempt_count_dec_and_test(void) ++static __always_inline bool ____preempt_count_dec_and_test(void) + { + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); + } + ++static __always_inline bool __preempt_count_dec_and_test(void) ++{ ++ if (____preempt_count_dec_and_test()) ++ return true; ++#ifdef CONFIG_PREEMPT_LAZY ++ if (preempt_count()) ++ return false; ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else ++ return false; ++#endif ++} ++ + /* + * Returns true when we need to resched and can (barring IRQ state). + */ + static __always_inline bool should_resched(int preempt_offset) + { ++#ifdef CONFIG_PREEMPT_LAZY ++ u32 tmp; ++ ++ tmp = raw_cpu_read_4(__preempt_count); ++ if (tmp == preempt_offset) ++ return true; ++ ++ /* preempt count == 0 ? */ ++ tmp &= ~PREEMPT_NEED_RESCHED; ++ if (tmp != preempt_offset) ++ return false; ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); ++#endif + } + + #ifdef CONFIG_PREEMPT +diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h +index 33d3c88a7..c00e27af2 100644 +--- a/arch/x86/include/asm/signal.h ++++ b/arch/x86/include/asm/signal.h +@@ -28,6 +28,19 @@ typedef struct { + #define SA_IA32_ABI 0x02000000u + #define SA_X32_ABI 0x01000000u + ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) ++#define ARCH_RT_DELAYS_SIGNAL_SEND ++#endif ++ + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 8ec97a62c..7bc85841f 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -60,7 +60,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -71,8 +71,14 @@ static __always_inline void boot_init_stack_canary(void) + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = rdtsc(); + canary += tsc + (tsc << 32UL); + canary &= CANARY_MASK; +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index 82b73b75d..dc267291f 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -56,17 +56,24 @@ struct task_struct; + struct thread_info { + unsigned long flags; /* low level flags */ + u32 status; /* thread synchronous flags */ ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ + }; + + #define INIT_THREAD_INFO(tsk) \ + { \ + .flags = 0, \ ++ .preempt_lazy_count = 0, \ + } + + #else /* !__ASSEMBLY__ */ + + #include + ++#define GET_THREAD_INFO(reg) \ ++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ ++ _ASM_SUB $(THREAD_SIZE),reg ; ++ + #endif + + /* +@@ -91,6 +98,7 @@ struct thread_info { + #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ + #define TIF_NOTSC 16 /* TSC is not accessible in userland */ + #define TIF_IA32 17 /* IA32 compatibility process */ ++#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */ + #define TIF_NOHZ 19 /* in adaptive nohz mode */ + #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ + #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ +@@ -120,6 +128,7 @@ struct thread_info { + #define _TIF_NOCPUID (1 << TIF_NOCPUID) + #define _TIF_NOTSC (1 << TIF_NOTSC) + #define _TIF_IA32 (1 << TIF_IA32) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_NOHZ (1 << TIF_NOHZ) + #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) + #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) +@@ -165,6 +174,8 @@ struct thread_info { + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #define STACK_WARN (THREAD_SIZE/8) + + /* +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index a6397da20..cebec7489 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1723,7 +1723,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) + return false; + } + +-static inline bool ioapic_irqd_mask(struct irq_data *data) ++static inline bool ioapic_prepare_move(struct irq_data *data) + { + /* If we are moving the IRQ we need to mask it */ + if (unlikely(irqd_is_setaffinity_pending(data))) { +@@ -1734,9 +1734,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data) + return false; + } + +-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) ++static inline void ioapic_finish_move(struct irq_data *data, bool moveit) + { +- if (unlikely(masked)) { ++ if (unlikely(moveit)) { + /* Only migrate the irq if the ack has been received. + * + * On rare occasions the broadcast level triggered ack gets +@@ -1771,11 +1771,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) + } + } + #else +-static inline bool ioapic_irqd_mask(struct irq_data *data) ++static inline bool ioapic_prepare_move(struct irq_data *data) + { + return false; + } +-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) ++static inline void ioapic_finish_move(struct irq_data *data, bool moveit) + { + } + #endif +@@ -1784,11 +1784,11 @@ static void ioapic_ack_level(struct irq_data *irq_data) + { + struct irq_cfg *cfg = irqd_cfg(irq_data); + unsigned long v; +- bool masked; ++ bool moveit; + int i; + + irq_complete_move(cfg); +- masked = ioapic_irqd_mask(irq_data); ++ moveit = ioapic_prepare_move(irq_data); + + /* + * It appears there is an erratum which affects at least version 0x11 +@@ -1843,7 +1843,7 @@ static void ioapic_ack_level(struct irq_data *irq_data) + eoi_ioapic_pin(cfg->vector, irq_data->chip_data); + } + +- ioapic_irqd_unmask(irq_data, masked); ++ ioapic_finish_move(irq_data, moveit); + } + + static void ioapic_ir_ack_level(struct irq_data *irq_data) +diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c +index 01de31db3..ce1c5b9fb 100644 +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -38,6 +38,7 @@ void common(void) { + + BLANK(); + OFFSET(TASK_TI_flags, task_struct, thread_info.flags); ++ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); + OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); + + BLANK(); +@@ -94,6 +95,7 @@ void common(void) { + + BLANK(); + DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); ++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); + + /* TLB state for the entry code */ + OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); +diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +index a999a58ca..d6410d074 100644 +--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c ++++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +@@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ +- if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { ++ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index 2e5003fef..768c53767 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -136,6 +136,18 @@ void kernel_fpu_end(void) + } + EXPORT_SYMBOL_GPL(kernel_fpu_end); + ++void kernel_fpu_resched(void) ++{ ++ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); ++ ++ if (should_resched(PREEMPT_OFFSET)) { ++ kernel_fpu_end(); ++ cond_resched(); ++ kernel_fpu_begin(); ++ } ++} ++EXPORT_SYMBOL_GPL(kernel_fpu_resched); ++ + /* + * Save the FPU state (mark it for reload if necessary): + * +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c +index 3d9f4b7b4..3d56397e2 100644 +--- a/arch/x86/kernel/fpu/signal.c ++++ b/arch/x86/kernel/fpu/signal.c +@@ -351,10 +351,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) + sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); + } + ++ preempt_disable(); + local_bh_disable(); + fpu->initialized = 1; + fpu__restore(fpu); + local_bh_enable(); ++ preempt_enable(); + + /* Failure is already handled */ + return err; +diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c +index 95600a99a..9192d7608 100644 +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c +@@ -130,6 +130,7 @@ void irq_ctx_init(int cpu) + cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct irq_stack *irqstk; +@@ -146,6 +147,7 @@ void do_softirq_own_stack(void) + + call_on_stack(__do_softirq, isp); + } ++#endif + + bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) + { +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c +index 020efe0f9..5d0c97555 100644 +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -205,6 +206,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -274,6 +304,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + + switch_to_extra(prev_p, next_p); + ++ switch_kmaps(prev_p, next_p); ++ + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 262e49301..c2f51b6e8 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2257,7 +2257,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) + apic->vcpu = vcpu; + + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, +- HRTIMER_MODE_ABS_PINNED); ++ HRTIMER_MODE_ABS_PINNED_HARD); + apic->lapic_timer.timer.function = apic_timer_fn; + + /* +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 06df5c232..b4fd0e202 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6878,6 +6878,13 @@ int kvm_arch_init(void *opaque) + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; +diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c +index 6d18b70ed..f752724c2 100644 +--- a/arch/x86/mm/highmem_32.c ++++ b/arch/x86/mm/highmem_32.c +@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap); + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + + if (!PageHighMem(page)) +@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr) + #endif + + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + +diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c +index b3294d367..c0ec8d430 100644 +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(iomap_free); + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -68,7 +69,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -119,6 +125,9 @@ iounmap_atomic(void __iomem *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index e2d4b25c7..9626ebb9e 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + pgprot_t ref_prot; + + spin_lock(&pgd_lock); ++ /* ++ * Keep preemption disabled after __flush_tlb_all() which expects not be ++ * preempted during the flush of the local TLB. ++ */ ++ preempt_disable(); + /* + * Check for races, another CPU might have split this page + * up for us already: + */ + tmp = _lookup_address_cpa(cpa, address, &level); + if (tmp != kpte) { ++ preempt_enable(); + spin_unlock(&pgd_lock); + return 1; + } +@@ -726,6 +732,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + break; + + default: ++ preempt_enable(); + spin_unlock(&pgd_lock); + return 1; + } +@@ -764,6 +771,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + * going on. + */ + __flush_tlb_all(); ++ preempt_enable(); + spin_unlock(&pgd_lock); + + return 0; +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index dfc809b31..3b7dd7064 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -622,18 +622,16 @@ void __init efi_dump_pagetable(void) + + /* + * Makes the calling thread switch to/from efi_mm context. Can be used +- * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well +- * as during efi runtime calls i.e current->active_mm == current_mm. +- * We are not mm_dropping()/mm_grabbing() any mm, because we are not +- * losing/creating any references. ++ * in a kernel thread and user context. Preemption needs to remain disabled ++ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm ++ * can not change under us. ++ * It should be ensured that there are no concurent calls to this function. + */ + void efi_switch_mm(struct mm_struct *mm) + { +- task_lock(current); + efi_scratch.prev_mm = current->active_mm; + current->active_mm = mm; + switch_mm(efi_scratch.prev_mm, mm, NULL); +- task_unlock(current); + } + + #ifdef CONFIG_EFI_MIXED +diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h +index bb1fe6c18..8a22f1e7b 100644 +--- a/arch/xtensa/include/asm/spinlock_types.h ++++ b/arch/xtensa/include/asm/spinlock_types.h +@@ -2,10 +2,6 @@ + #ifndef __ASM_SPINLOCK_TYPES_H + #define __ASM_SPINLOCK_TYPES_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + typedef struct { + volatile unsigned int slock; + } arch_spinlock_t; +diff --git a/block/blk-core.c b/block/blk-core.c +index a5d80ab91..6a57dc63d 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -203,6 +203,9 @@ void __blk_rq_init(struct request_queue *q, struct request *rq) + + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; +@@ -997,12 +1000,21 @@ void blk_queue_exit(struct request_queue *q) + percpu_ref_put(&q->q_usage_counter); + } + ++static void blk_queue_usage_counter_release_wrk(struct work_struct *work) ++{ ++ struct request_queue *q = ++ container_of(work, struct request_queue, mq_pcpu_wake); ++ ++ wake_up_all(&q->mq_freeze_wq); ++} ++ + static void blk_queue_usage_counter_release(struct percpu_ref *ref) + { + struct request_queue *q = + container_of(ref, struct request_queue, q_usage_counter); + +- wake_up_all(&q->mq_freeze_wq); ++ if (wq_has_sleeper(&q->mq_freeze_wq)) ++ schedule_work(&q->mq_pcpu_wake); + } + + static void blk_rq_timed_out_timer(struct timer_list *t) +@@ -1102,6 +1114,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); + + init_waitqueue_head(&q->mq_freeze_wq); ++ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); + mutex_init(&q_wrapper->mq_freeze_lock); + + /* +diff --git a/block/blk-ioc.c b/block/blk-ioc.c +index 281b7a93e..d6ae26a5e 100644 +--- a/block/blk-ioc.c ++++ b/block/blk-ioc.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + #include "blk.h" + +@@ -119,7 +120,7 @@ static void ioc_release_fn(struct work_struct *work) + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -203,7 +204,7 @@ void put_io_context_active(struct io_context *ioc) + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 0732bcc65..afb9c567d 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -377,6 +377,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, + rq->extra_len = 0; + rq->__deadline = 0; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + +@@ -604,12 +607,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) + } + EXPORT_SYMBOL(blk_mq_end_request); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++void __blk_mq_complete_request_remote_work(struct work_struct *work) ++{ ++ struct request *rq = container_of(work, struct request, work); ++ ++ rq->q->softirq_done_fn(rq); ++} ++ ++#else ++ + static void __blk_mq_complete_request_remote(void *data) + { + struct request *rq = data; + + rq->q->softirq_done_fn(rq); + } ++#endif + + /** + * blk_mq_force_complete_rq() - Force complete the request, bypassing any error +@@ -651,19 +666,27 @@ void blk_mq_force_complete_rq(struct request *rq) + return; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in ++ * here. But we could try to invoke it one the CPU like this. ++ */ ++ schedule_work_on(ctx->cpu, &rq->work); ++#else + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(ctx->cpu, &rq->csd); ++#endif + } else { + rq->q->softirq_done_fn(rq); + } +- put_cpu(); ++ put_cpu_light(); + } + EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq); + +@@ -1466,14 +1489,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, + return; + + if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { +- int cpu = get_cpu(); ++ int cpu = get_cpu_light(); + if (cpumask_test_cpu(cpu, hctx->cpumask)) { + __blk_mq_run_hw_queue(hctx); +- put_cpu(); ++ put_cpu_light(); + return; + } + +- put_cpu(); ++ put_cpu_light(); + } + + kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, +@@ -3408,10 +3431,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, + kt = nsecs; + + mode = HRTIMER_MODE_REL; +- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); ++ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current); + hrtimer_set_expires(&hs.timer, kt); + +- hrtimer_init_sleeper(&hs, current); + do { + if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) + break; +diff --git a/block/blk-mq.h b/block/blk-mq.h +index b3540d62c..9666c337c 100644 +--- a/block/blk-mq.h ++++ b/block/blk-mq.h +@@ -123,12 +123,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + */ + static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) + { +- return __blk_mq_get_ctx(q, get_cpu()); ++ return __blk_mq_get_ctx(q, get_cpu_light()); + } + + static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) + { +- put_cpu(); ++ put_cpu_light(); + } + + struct blk_mq_alloc_data { +diff --git a/block/blk-softirq.c b/block/blk-softirq.c +index e47a2f751..7726f48d7 100644 +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c +@@ -53,6 +53,7 @@ static void trigger_softirq(void *data) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu) + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +@@ -142,6 +144,7 @@ void __blk_complete_request(struct request *req) + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__blk_complete_request); + +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index e0c8e907b..e079f9a70 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); + struct cryptd_cpu_queue { + struct crypto_queue queue; + struct work_struct work; ++ spinlock_t qlock; + }; + + struct cryptd_queue { +@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); + INIT_WORK(&cpu_queue->work, cryptd_queue_worker); ++ spin_lock_init(&cpu_queue->qlock); + } + pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); + return 0; +@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, + struct cryptd_cpu_queue *cpu_queue; + atomic_t *refcnt; + +- cpu = get_cpu(); +- cpu_queue = this_cpu_ptr(queue->cpu_queue); ++ cpu_queue = raw_cpu_ptr(queue->cpu_queue); ++ spin_lock_bh(&cpu_queue->qlock); ++ cpu = smp_processor_id(); ++ + err = crypto_enqueue_request(&cpu_queue->queue, request); + + refcnt = crypto_tfm_ctx(request->tfm); +@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, + atomic_inc(refcnt); + + out_put_cpu: +- put_cpu(); ++ spin_unlock_bh(&cpu_queue->qlock); + + return err; + } +@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work) + cpu_queue = container_of(work, struct cryptd_cpu_queue, work); + /* + * Only handle one request at a time to avoid hogging crypto workqueue. +- * preempt_disable/enable is used to prevent being preempted by +- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent +- * cryptd_enqueue_request() being accessed from software interrupts. + */ +- local_bh_disable(); +- preempt_disable(); ++ spin_lock_bh(&cpu_queue->qlock); + backlog = crypto_get_backlog(&cpu_queue->queue); + req = crypto_dequeue_request(&cpu_queue->queue); +- preempt_enable(); +- local_bh_enable(); ++ spin_unlock_bh(&cpu_queue->qlock); + + if (!req) + return; +diff --git a/crypto/scompress.c b/crypto/scompress.c +index 968bbcf65..c2f0077e0 100644 +--- a/crypto/scompress.c ++++ b/crypto/scompress.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches; + static void * __percpu *scomp_dst_scratches; + static int scomp_scratch_users; + static DEFINE_MUTEX(scomp_lock); ++static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock); + + #ifdef CONFIG_NET + static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) +@@ -146,7 +148,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) + void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp *scomp = *tfm_ctx; + void **ctx = acomp_request_ctx(req); +- const int cpu = get_cpu(); ++ const int cpu = local_lock_cpu(scomp_scratches_lock); + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); + int ret; +@@ -181,7 +183,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) + 1); + } + out: +- put_cpu(); ++ local_unlock_cpu(scomp_scratches_lock); + return ret; + } + +diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c +index 4ed0a78fd..eece02262 100644 +--- a/drivers/block/zram/zcomp.c ++++ b/drivers/block/zram/zcomp.c +@@ -116,12 +116,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf) + + struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) + { +- return *get_cpu_ptr(comp->stream); ++ struct zcomp_strm *zstrm; ++ ++ zstrm = *get_local_ptr(comp->stream); ++ spin_lock(&zstrm->zcomp_lock); ++ return zstrm; + } + + void zcomp_stream_put(struct zcomp *comp) + { +- put_cpu_ptr(comp->stream); ++ struct zcomp_strm *zstrm; ++ ++ zstrm = *this_cpu_ptr(comp->stream); ++ spin_unlock(&zstrm->zcomp_lock); ++ put_local_ptr(zstrm); + } + + int zcomp_compress(struct zcomp_strm *zstrm, +@@ -171,6 +179,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) + pr_err("Can't allocate a compression stream\n"); + return -ENOMEM; + } ++ spin_lock_init(&zstrm->zcomp_lock); + *per_cpu_ptr(comp->stream, cpu) = zstrm; + return 0; + } +diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h +index 41c1002a7..d424eafcb 100644 +--- a/drivers/block/zram/zcomp.h ++++ b/drivers/block/zram/zcomp.h +@@ -14,6 +14,7 @@ struct zcomp_strm { + /* compression/decompression buffer */ + void *buffer; + struct crypto_comp *tfm; ++ spinlock_t zcomp_lock; + }; + + /* dynamic per-device compression frontend */ +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index dade3734a..d3aace0d1 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -53,6 +53,40 @@ static size_t huge_class_size; + + static void zram_free_page(struct zram *zram, size_t index); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) ++{ ++ size_t index; ++ ++ for (index = 0; index < num_pages; index++) ++ spin_lock_init(&zram->table[index].lock); ++} ++ ++static int zram_slot_trylock(struct zram *zram, u32 index) ++{ ++ int ret; ++ ++ ret = spin_trylock(&zram->table[index].lock); ++ if (ret) ++ __set_bit(ZRAM_LOCK, &zram->table[index].value); ++ return ret; ++} ++ ++static void zram_slot_lock(struct zram *zram, u32 index) ++{ ++ spin_lock(&zram->table[index].lock); ++ __set_bit(ZRAM_LOCK, &zram->table[index].value); ++} ++ ++static void zram_slot_unlock(struct zram *zram, u32 index) ++{ ++ __clear_bit(ZRAM_LOCK, &zram->table[index].value); ++ spin_unlock(&zram->table[index].lock); ++} ++ ++#else ++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } ++ + static int zram_slot_trylock(struct zram *zram, u32 index) + { + return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value); +@@ -67,6 +101,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index) + { + bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value); + } ++#endif + + static inline bool init_done(struct zram *zram) + { +@@ -902,6 +937,8 @@ static DEVICE_ATTR_RO(io_stat); + static DEVICE_ATTR_RO(mm_stat); + static DEVICE_ATTR_RO(debug_stat); + ++ ++ + static void zram_meta_free(struct zram *zram, u64 disksize) + { + size_t num_pages = disksize >> PAGE_SHIFT; +@@ -932,6 +969,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) + + if (!huge_class_size) + huge_class_size = zs_huge_class_size(zram->mem_pool); ++ zram_meta_init_table_locks(zram, num_pages); + return true; + } + +@@ -990,6 +1028,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, + unsigned long handle; + unsigned int size; + void *src, *dst; ++ struct zcomp_strm *zstrm; + + if (zram_wb_enabled(zram)) { + zram_slot_lock(zram, index); +@@ -1024,6 +1063,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, + + size = zram_get_obj_size(zram, index); + ++ zstrm = zcomp_stream_get(zram->comp); + src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + if (size == PAGE_SIZE) { + dst = kmap_atomic(page); +@@ -1031,14 +1071,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, + kunmap_atomic(dst); + ret = 0; + } else { +- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); + + dst = kmap_atomic(page); + ret = zcomp_decompress(zstrm, src, size, dst); + kunmap_atomic(dst); +- zcomp_stream_put(zram->comp); + } + zs_unmap_object(zram->mem_pool, handle); ++ zcomp_stream_put(zram->comp); + zram_slot_unlock(zram, index); + + /* Should NEVER happen. Return bio error if it does. */ +diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h +index d1095dfdf..144e91061 100644 +--- a/drivers/block/zram/zram_drv.h ++++ b/drivers/block/zram/zram_drv.h +@@ -61,6 +61,9 @@ struct zram_table_entry { + unsigned long element; + }; + unsigned long value; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t lock; ++#endif + #ifdef CONFIG_ZRAM_MEMORY_TRACKING + ktime_t ac_time; + #endif +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 05daadfbf..257bbb664 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1237,28 +1237,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) + return *ptr; + } + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 c_high, j_high; +- __u64 ip; + unsigned long seed; + int credit = 0; + + if (cycles == 0) +- cycles = get_reg(fast_pool, regs); ++ cycles = get_reg(fast_pool, NULL); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + fast_pool->pool[0] ^= cycles ^ j_high ^ irq; + fast_pool->pool[1] ^= now ^ c_high; +- ip = regs ? instruction_pointer(regs) : _RET_IP_; ++ if (!ip) ++ ip = _RET_IP_; + fast_pool->pool[2] ^= ip; + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : +- get_reg(fast_pool, regs); ++ get_reg(fast_pool, NULL); + + fast_mix(fast_pool); + add_interrupt_bench(cycles); +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index f08949a5f..58790a4f0 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -53,6 +53,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da + return container_of(data, struct tpm_tis_tcg_phy, priv); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Flushes previous write operations to chip so that a subsequent ++ * ioread*()s won't stall a cpu. ++ */ ++static inline void tpm_tis_flush(void __iomem *iobase) ++{ ++ ioread8(iobase + TPM_ACCESS(0)); ++} ++#else ++#define tpm_tis_flush(iobase) do { } while (0) ++#endif ++ ++static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) ++{ ++ iowrite8(b, iobase + addr); ++ tpm_tis_flush(iobase); ++} ++ ++static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) ++{ ++ iowrite32(b, iobase + addr); ++ tpm_tis_flush(iobase); ++} ++ + static bool interrupts = true; + module_param(interrupts, bool, 0444); + MODULE_PARM_DESC(interrupts, "Enable interrupts"); +@@ -150,7 +175,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); + + while (len--) +- iowrite8(*value++, phy->iobase + addr); ++ tpm_tis_iowrite8(*value++, phy->iobase, addr); + + return 0; + } +@@ -177,7 +202,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) + { + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); + +- iowrite32(value, phy->iobase + addr); ++ tpm_tis_iowrite32(value, phy->iobase, addr); + + return 0; + } +diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig +index 4d37f018d..34b07047b 100644 +--- a/drivers/clocksource/Kconfig ++++ b/drivers/clocksource/Kconfig +@@ -404,8 +404,11 @@ config ARMV7M_SYSTICK + This options enables support for the ARMv7M system timer unit + + config ATMEL_PIT ++ bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST + select TIMER_OF if OF +- def_bool SOC_AT91SAM9 || SOC_SAMA5 ++ help ++ This enables build of clocksource and clockevent driver for ++ the integrated PIT in Microchip ARM SoCs. + + config ATMEL_ST + bool "Atmel ST timer support" if COMPILE_TEST +@@ -415,6 +418,14 @@ config ATMEL_ST + help + Support for the Atmel ST timer. + ++config ATMEL_ARM_TCB_CLKSRC ++ bool "Microchip ARM TC Block" if COMPILE_TEST ++ select REGMAP_MMIO ++ depends on GENERIC_CLOCKEVENTS ++ help ++ This enables build of clocksource and clockevent driver for ++ the integrated Timer Counter Blocks in Microchip ARM SoCs. ++ + config CLKSRC_EXYNOS_MCT + bool "Exynos multi core timer driver" if COMPILE_TEST + depends on ARM || ARM64 +diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile +index db51b2427..0df9384a1 100644 +--- a/drivers/clocksource/Makefile ++++ b/drivers/clocksource/Makefile +@@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o + obj-$(CONFIG_TIMER_PROBE) += timer-probe.o + obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o + obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o +-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o ++obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o ++obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o + obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o + obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o + obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o +diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c +index 43f4d5c4d..ba15242a6 100644 +--- a/drivers/clocksource/tcb_clksrc.c ++++ b/drivers/clocksource/tcb_clksrc.c +@@ -25,8 +25,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -126,6 +125,8 @@ static struct clocksource clksrc = { + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ bool clk_enabled; ++ u32 freq; + void __iomem *regs; + }; + +@@ -134,15 +135,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + ++static void tc_clk_disable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ clk_disable(tcd->clk); ++ tcd->clk_enabled = false; ++} ++ ++static void tc_clk_enable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ if (tcd->clk_enabled) ++ return; ++ clk_enable(tcd->clk); ++ tcd->clk_enabled = true; ++} ++ + static int tc_shutdown(struct clock_event_device *d) + { + struct tc_clkevt_device *tcd = to_tc_clkevt(d); +@@ -150,8 +162,14 @@ static int tc_shutdown(struct clock_event_device *d) + + writel(0xff, regs + ATMEL_TC_REG(2, IDR)); + writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); ++ return 0; ++} ++ ++static int tc_shutdown_clk_off(struct clock_event_device *d) ++{ ++ tc_shutdown(d); + if (!clockevent_state_detached(d)) +- clk_disable(tcd->clk); ++ tc_clk_disable(d); + + return 0; + } +@@ -164,9 +182,9 @@ static int tc_set_oneshot(struct clock_event_device *d) + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_shutdown(d); + +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | + ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); + writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -186,12 +204,12 @@ static int tc_set_periodic(struct clock_event_device *d) + /* By not making the gentime core emulate periodic mode on top + * of oneshot, we get lower overhead and improved accuracy. + */ +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -218,9 +236,13 @@ static struct tc_clkevt_device clkevt = { + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + /* Should be lower than at91rm9200's system timer */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, +- .set_state_shutdown = tc_shutdown, ++ .set_state_shutdown = tc_shutdown_clk_off, + .set_state_periodic = tc_set_periodic, + .set_state_oneshot = tc_set_oneshot, + }, +@@ -240,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle) + return IRQ_NONE; + } + +-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + int ret; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; +@@ -262,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) + clkevt.regs = tc->regs; + clkevt.clk = t2_clk; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk) / divisor; + + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -273,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) + return ret; + } + +- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); ++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); + + return ret; + } +@@ -410,7 +437,11 @@ static int __init tcb_clksrc_init(void) + goto err_disable_t1; + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + ret = setup_clkevents(tc, clk32k_divisor_idx); ++#else ++ ret = setup_clkevents(tc, best_divisor_idx); ++#endif + if (ret) + goto err_unregister_clksrc; + +diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c +new file mode 100644 +index 000000000..63ce3b693 +--- /dev/null ++++ b/drivers/clocksource/timer-atmel-tcb.c +@@ -0,0 +1,617 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct atmel_tcb_clksrc { ++ struct clocksource clksrc; ++ struct clock_event_device clkevt; ++ struct regmap *regmap; ++ void __iomem *base; ++ struct clk *clk[2]; ++ char name[20]; ++ int channels[2]; ++ int bits; ++ int irq; ++ struct { ++ u32 cmr; ++ u32 imr; ++ u32 rc; ++ bool clken; ++ } cache[2]; ++ u32 bmr_cache; ++ bool registered; ++ bool clk_enabled; ++}; ++ ++static struct atmel_tcb_clksrc tc, tce; ++ ++static struct clk *tcb_clk_get(struct device_node *node, int channel) ++{ ++ struct clk *clk; ++ char clk_name[] = "t0_clk"; ++ ++ clk_name[1] += channel; ++ clk = of_clk_get_by_name(node->parent, clk_name); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ return of_clk_get_by_name(node->parent, "t0_clk"); ++} ++ ++/* ++ * Clockevent device using its own channel ++ */ ++ ++static void tc_clkevt2_clk_disable(struct clock_event_device *d) ++{ ++ clk_disable(tce.clk[0]); ++ tce.clk_enabled = false; ++} ++ ++static void tc_clkevt2_clk_enable(struct clock_event_device *d) ++{ ++ if (tce.clk_enabled) ++ return; ++ clk_enable(tce.clk[0]); ++ tce.clk_enabled = true; ++} ++ ++static int tc_clkevt2_stop(struct clock_event_device *d) ++{ ++ writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0])); ++ writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0])); ++ ++ return 0; ++} ++ ++static int tc_clkevt2_shutdown(struct clock_event_device *d) ++{ ++ tc_clkevt2_stop(d); ++ if (!clockevent_state_detached(d)) ++ tc_clkevt2_clk_disable(d); ++ ++ return 0; ++} ++ ++/* For now, we always use the 32K clock ... this optimizes for NO_HZ, ++ * because using one of the divided clocks would usually mean the ++ * tick rate can never be less than several dozen Hz (vs 0.5 Hz). ++ * ++ * A divided clock could be good for high resolution timers, since ++ * 30.5 usec resolution can seem "low". ++ */ ++static int tc_clkevt2_set_oneshot(struct clock_event_device *d) ++{ ++ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) ++ tc_clkevt2_stop(d); ++ ++ tc_clkevt2_clk_enable(d); ++ ++ /* slow clock, count up to RC, then irq and stop */ ++ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP | ++ ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC, ++ tce.base + ATMEL_TC_CMR(tce.channels[0])); ++ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); ++ ++ return 0; ++} ++ ++static int tc_clkevt2_set_periodic(struct clock_event_device *d) ++{ ++ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) ++ tc_clkevt2_stop(d); ++ ++ /* By not making the gentime core emulate periodic mode on top ++ * of oneshot, we get lower overhead and improved accuracy. ++ */ ++ tc_clkevt2_clk_enable(d); ++ ++ /* slow clock, count up to RC, then irq and restart */ ++ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE | ++ ATMEL_TC_CMR_WAVESEL_UPRC, ++ tce.base + ATMEL_TC_CMR(tce.channels[0])); ++ writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0])); ++ ++ /* Enable clock and interrupts on RC compare */ ++ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); ++ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, ++ tce.base + ATMEL_TC_CCR(tce.channels[0])); ++ ++ return 0; ++} ++ ++static int tc_clkevt2_next_event(unsigned long delta, ++ struct clock_event_device *d) ++{ ++ writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0])); ++ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, ++ tce.base + ATMEL_TC_CCR(tce.channels[0])); ++ ++ return 0; ++} ++ ++static irqreturn_t tc_clkevt2_irq(int irq, void *handle) ++{ ++ unsigned int sr; ++ ++ sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0])); ++ if (sr & ATMEL_TC_CPCS) { ++ tce.clkevt.event_handler(&tce.clkevt); ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static void tc_clkevt2_suspend(struct clock_event_device *d) ++{ ++ tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0])); ++ tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0])); ++ tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0])); ++ tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) & ++ ATMEL_TC_CLKSTA); ++} ++ ++static void tc_clkevt2_resume(struct clock_event_device *d) ++{ ++ /* Restore registers for the channel, RA and RB are not used */ ++ writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0])); ++ writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0])); ++ writel(0, tc.base + ATMEL_TC_RA(tce.channels[0])); ++ writel(0, tc.base + ATMEL_TC_RB(tce.channels[0])); ++ /* Disable all the interrupts */ ++ writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0])); ++ /* Reenable interrupts that were enabled before suspending */ ++ writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0])); ++ ++ /* Start the clock if it was used */ ++ if (tce.cache[0].clken) ++ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, ++ tc.base + ATMEL_TC_CCR(tce.channels[0])); ++} ++ ++static int __init tc_clkevt_register(struct device_node *node, ++ struct regmap *regmap, void __iomem *base, ++ int channel, int irq, int bits) ++{ ++ int ret; ++ struct clk *slow_clk; ++ ++ tce.regmap = regmap; ++ tce.base = base; ++ tce.channels[0] = channel; ++ tce.irq = irq; ++ ++ slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); ++ if (IS_ERR(slow_clk)) ++ return PTR_ERR(slow_clk); ++ ++ ret = clk_prepare_enable(slow_clk); ++ if (ret) ++ return ret; ++ ++ tce.clk[0] = tcb_clk_get(node, tce.channels[0]); ++ if (IS_ERR(tce.clk[0])) { ++ ret = PTR_ERR(tce.clk[0]); ++ goto err_slow; ++ } ++ ++ snprintf(tce.name, sizeof(tce.name), "%s:%d", ++ kbasename(node->parent->full_name), channel); ++ tce.clkevt.cpumask = cpumask_of(0); ++ tce.clkevt.name = tce.name; ++ tce.clkevt.set_next_event = tc_clkevt2_next_event, ++ tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown, ++ tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic, ++ tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot, ++ tce.clkevt.suspend = tc_clkevt2_suspend, ++ tce.clkevt.resume = tc_clkevt2_resume, ++ tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ++ tce.clkevt.rating = 140; ++ ++ /* try to enable clk to avoid future errors in mode change */ ++ ret = clk_prepare_enable(tce.clk[0]); ++ if (ret) ++ goto err_slow; ++ clk_disable(tce.clk[0]); ++ ++ clockevents_config_and_register(&tce.clkevt, 32768, 1, ++ CLOCKSOURCE_MASK(bits)); ++ ++ ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED, ++ tce.clkevt.name, &tce); ++ if (ret) ++ goto err_clk; ++ ++ tce.registered = true; ++ ++ return 0; ++ ++err_clk: ++ clk_unprepare(tce.clk[0]); ++err_slow: ++ clk_disable_unprepare(slow_clk); ++ ++ return ret; ++} ++ ++/* ++ * Clocksource and clockevent using the same channel(s) ++ */ ++static u64 tc_get_cycles(struct clocksource *cs) ++{ ++ u32 lower, upper; ++ ++ do { ++ upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])); ++ lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); ++ } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]))); ++ ++ return (upper << 16) | lower; ++} ++ ++static u64 tc_get_cycles32(struct clocksource *cs) ++{ ++ return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); ++} ++ ++static u64 notrace tc_sched_clock_read(void) ++{ ++ return tc_get_cycles(&tc.clksrc); ++} ++ ++static u64 notrace tc_sched_clock_read32(void) ++{ ++ return tc_get_cycles32(&tc.clksrc); ++} ++ ++static int tcb_clkevt_next_event(unsigned long delta, ++ struct clock_event_device *d) ++{ ++ u32 old, next, cur; ++ ++ old = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); ++ next = old + delta; ++ writel(next, tc.base + ATMEL_TC_RC(tc.channels[0])); ++ cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); ++ ++ /* check whether the delta elapsed while setting the register */ ++ if ((next < old && cur < old && cur > next) || ++ (next > old && (cur < old || cur > next))) { ++ /* ++ * Clear the CPCS bit in the status register to avoid ++ * generating a spurious interrupt next time a valid ++ * timer event is configured. ++ */ ++ old = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); ++ return -ETIME; ++ } ++ ++ writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0])); ++ ++ return 0; ++} ++ ++static irqreturn_t tc_clkevt_irq(int irq, void *handle) ++{ ++ unsigned int sr; ++ ++ sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); ++ if (sr & ATMEL_TC_CPCS) { ++ tc.clkevt.event_handler(&tc.clkevt); ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static int tcb_clkevt_oneshot(struct clock_event_device *dev) ++{ ++ if (clockevent_state_oneshot(dev)) ++ return 0; ++ ++ /* ++ * Because both clockevent devices may share the same IRQ, we don't want ++ * the less likely one to stay requested ++ */ ++ return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED, ++ tc.name, &tc); ++} ++ ++static int tcb_clkevt_shutdown(struct clock_event_device *dev) ++{ ++ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0])); ++ if (tc.bits == 16) ++ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1])); ++ ++ if (!clockevent_state_detached(dev)) ++ free_irq(tc.irq, &tc); ++ ++ return 0; ++} ++ ++static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc, ++ int mck_divisor_idx) ++{ ++ /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */ ++ writel(mck_divisor_idx /* likely divide-by-8 */ ++ | ATMEL_TC_CMR_WAVE ++ | ATMEL_TC_CMR_WAVESEL_UP /* free-run */ ++ | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */ ++ | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */ ++ tc->base + ATMEL_TC_CMR(tc->channels[0])); ++ writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0])); ++ writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0])); ++ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ ++ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); ++ ++ /* second channel: waveform mode, input TIOA */ ++ writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */ ++ | ATMEL_TC_CMR_WAVE ++ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ ++ tc->base + ATMEL_TC_CMR(tc->channels[1])); ++ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */ ++ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1])); ++ ++ /* chain both channel, we assume the previous channel */ ++ regmap_write(tc->regmap, ATMEL_TC_BMR, ++ ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1])); ++ /* then reset all the timers */ ++ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); ++} ++ ++static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc, ++ int mck_divisor_idx) ++{ ++ /* channel 0: waveform mode, input mclk/8 */ ++ writel(mck_divisor_idx /* likely divide-by-8 */ ++ | ATMEL_TC_CMR_WAVE ++ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ ++ tc->base + ATMEL_TC_CMR(tc->channels[0])); ++ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ ++ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); ++ ++ /* then reset all the timers */ ++ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); ++} ++ ++static void tc_clksrc_suspend(struct clocksource *cs) ++{ ++ int i; ++ ++ for (i = 0; i < 1 + (tc.bits == 16); i++) { ++ tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i])); ++ tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i])); ++ tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i])); ++ tc.cache[i].clken = !!(readl(tc.base + ++ ATMEL_TC_SR(tc.channels[i])) & ++ ATMEL_TC_CLKSTA); ++ } ++ ++ if (tc.bits == 16) ++ regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache); ++} ++ ++static void tc_clksrc_resume(struct clocksource *cs) ++{ ++ int i; ++ ++ for (i = 0; i < 1 + (tc.bits == 16); i++) { ++ /* Restore registers for the channel, RA and RB are not used */ ++ writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i])); ++ writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i])); ++ writel(0, tc.base + ATMEL_TC_RA(tc.channels[i])); ++ writel(0, tc.base + ATMEL_TC_RB(tc.channels[i])); ++ /* Disable all the interrupts */ ++ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i])); ++ /* Reenable interrupts that were enabled before suspending */ ++ writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i])); ++ ++ /* Start the clock if it was used */ ++ if (tc.cache[i].clken) ++ writel(ATMEL_TC_CCR_CLKEN, tc.base + ++ ATMEL_TC_CCR(tc.channels[i])); ++ } ++ ++ /* in case of dual channel, chain channels */ ++ if (tc.bits == 16) ++ regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache); ++ /* Finally, trigger all the channels*/ ++ regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); ++} ++ ++static int __init tcb_clksrc_register(struct device_node *node, ++ struct regmap *regmap, void __iomem *base, ++ int channel, int channel1, int irq, ++ int bits) ++{ ++ u32 rate, divided_rate = 0; ++ int best_divisor_idx = -1; ++ int i, err = -1; ++ u64 (*tc_sched_clock)(void); ++ ++ tc.regmap = regmap; ++ tc.base = base; ++ tc.channels[0] = channel; ++ tc.channels[1] = channel1; ++ tc.irq = irq; ++ tc.bits = bits; ++ ++ tc.clk[0] = tcb_clk_get(node, tc.channels[0]); ++ if (IS_ERR(tc.clk[0])) ++ return PTR_ERR(tc.clk[0]); ++ err = clk_prepare_enable(tc.clk[0]); ++ if (err) { ++ pr_debug("can't enable T0 clk\n"); ++ goto err_clk; ++ } ++ ++ /* How fast will we be counting? Pick something over 5 MHz. */ ++ rate = (u32)clk_get_rate(tc.clk[0]); ++ for (i = 0; i < 5; i++) { ++ unsigned int divisor = atmel_tc_divisors[i]; ++ unsigned int tmp; ++ ++ if (!divisor) ++ continue; ++ ++ tmp = rate / divisor; ++ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); ++ if (best_divisor_idx > 0) { ++ if (tmp < 5 * 1000 * 1000) ++ continue; ++ } ++ divided_rate = tmp; ++ best_divisor_idx = i; ++ } ++ ++ if (tc.bits == 32) { ++ tc.clksrc.read = tc_get_cycles32; ++ tcb_setup_single_chan(&tc, best_divisor_idx); ++ tc_sched_clock = tc_sched_clock_read32; ++ snprintf(tc.name, sizeof(tc.name), "%s:%d", ++ kbasename(node->parent->full_name), tc.channels[0]); ++ } else { ++ tc.clk[1] = tcb_clk_get(node, tc.channels[1]); ++ if (IS_ERR(tc.clk[1])) ++ goto err_disable_t0; ++ ++ err = clk_prepare_enable(tc.clk[1]); ++ if (err) { ++ pr_debug("can't enable T1 clk\n"); ++ goto err_clk1; ++ } ++ tc.clksrc.read = tc_get_cycles, ++ tcb_setup_dual_chan(&tc, best_divisor_idx); ++ tc_sched_clock = tc_sched_clock_read; ++ snprintf(tc.name, sizeof(tc.name), "%s:%d,%d", ++ kbasename(node->parent->full_name), tc.channels[0], ++ tc.channels[1]); ++ } ++ ++ pr_debug("%s at %d.%03d MHz\n", tc.name, ++ divided_rate / 1000000, ++ ((divided_rate + 500000) % 1000000) / 1000); ++ ++ tc.clksrc.name = tc.name; ++ tc.clksrc.suspend = tc_clksrc_suspend; ++ tc.clksrc.resume = tc_clksrc_resume; ++ tc.clksrc.rating = 200; ++ tc.clksrc.mask = CLOCKSOURCE_MASK(32); ++ tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; ++ ++ err = clocksource_register_hz(&tc.clksrc, divided_rate); ++ if (err) ++ goto err_disable_t1; ++ ++ sched_clock_register(tc_sched_clock, 32, divided_rate); ++ ++ tc.registered = true; ++ ++ /* Set up and register clockevents */ ++ tc.clkevt.name = tc.name; ++ tc.clkevt.cpumask = cpumask_of(0); ++ tc.clkevt.set_next_event = tcb_clkevt_next_event; ++ tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot; ++ tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown; ++ tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT; ++ tc.clkevt.rating = 125; ++ ++ clockevents_config_and_register(&tc.clkevt, divided_rate, 1, ++ BIT(tc.bits) - 1); ++ ++ return 0; ++ ++err_disable_t1: ++ if (tc.bits == 16) ++ clk_disable_unprepare(tc.clk[1]); ++ ++err_clk1: ++ if (tc.bits == 16) ++ clk_put(tc.clk[1]); ++ ++err_disable_t0: ++ clk_disable_unprepare(tc.clk[0]); ++ ++err_clk: ++ clk_put(tc.clk[0]); ++ ++ pr_err("%s: unable to register clocksource/clockevent\n", ++ tc.clksrc.name); ++ ++ return err; ++} ++ ++static int __init tcb_clksrc_init(struct device_node *node) ++{ ++ const struct of_device_id *match; ++ struct regmap *regmap; ++ void __iomem *tcb_base; ++ u32 channel; ++ int irq, err, chan1 = -1; ++ unsigned bits; ++ ++ if (tc.registered && tce.registered) ++ return -ENODEV; ++ ++ /* ++ * The regmap has to be used to access registers that are shared ++ * between channels on the same TCB but we keep direct IO access for ++ * the counters to avoid the impact on performance ++ */ ++ regmap = syscon_node_to_regmap(node->parent); ++ if (IS_ERR(regmap)) ++ return PTR_ERR(regmap); ++ ++ tcb_base = of_iomap(node->parent, 0); ++ if (!tcb_base) { ++ pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__); ++ return -ENXIO; ++ } ++ ++ match = of_match_node(atmel_tcb_dt_ids, node->parent); ++ bits = (uintptr_t)match->data; ++ ++ err = of_property_read_u32_index(node, "reg", 0, &channel); ++ if (err) ++ return err; ++ ++ irq = of_irq_get(node->parent, channel); ++ if (irq < 0) { ++ irq = of_irq_get(node->parent, 0); ++ if (irq < 0) ++ return irq; ++ } ++ ++ if (tc.registered) ++ return tc_clkevt_register(node, regmap, tcb_base, channel, irq, ++ bits); ++ ++ if (bits == 16) { ++ of_property_read_u32_index(node, "reg", 1, &chan1); ++ if (chan1 == -1) { ++ if (tce.registered) { ++ pr_err("%s: clocksource needs two channels\n", ++ node->parent->full_name); ++ return -EINVAL; ++ } else { ++ return tc_clkevt_register(node, regmap, ++ tcb_base, channel, ++ irq, bits); ++ } ++ } ++ } ++ ++ return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq, ++ bits); ++} ++TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); +diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c +index ad48fd52c..c5264b3ee 100644 +--- a/drivers/connector/cn_proc.c ++++ b/drivers/connector/cn_proc.c +@@ -32,6 +32,7 @@ + #include + + #include ++#include + + /* + * Size of a cn_msg followed by a proc_event structure. Since the +@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; + + /* proc_event_counts is used as the sequence number of the netlink message */ + static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; ++static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock); + + static inline void send_msg(struct cn_msg *msg) + { +- preempt_disable(); ++ local_lock(send_msg_lock); + + msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; + ((struct proc_event *)msg->data)->cpu = smp_processor_id(); +@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg) + */ + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); + +- preempt_enable(); ++ local_unlock(send_msg_lock); + } + + void proc_fork_connector(struct task_struct *task) +diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 +index 35f71825b..bb4a6160d 100644 +--- a/drivers/cpufreq/Kconfig.x86 ++++ b/drivers/cpufreq/Kconfig.x86 +@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI + + config X86_POWERNOW_K8 + tristate "AMD Opteron/Athlon64 PowerNow!" +- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ ++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE + help + This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. + Support for K10 and newer processors is now in acpi-cpufreq. +diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c +index 67f7f8c42..b84e6c8b1 100644 +--- a/drivers/crypto/caam/qi.c ++++ b/drivers/crypto/caam/qi.c +@@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested); + static u64 times_congested; + #endif + +-/* +- * CPU from where the module initialised. This is required because QMan driver +- * requires CGRs to be removed from same CPU from where they were originally +- * allocated. +- */ +-static int mod_init_cpu; +- + /* + * This is a a cache of buffers, from which the users of CAAM QI driver + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than +@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) + } + EXPORT_SYMBOL(caam_drv_ctx_rel); + +-int caam_qi_shutdown(struct device *qidev) ++void caam_qi_shutdown(struct device *qidev) + { +- int i, ret; ++ int i; + struct caam_qi_priv *priv = dev_get_drvdata(qidev); + const cpumask_t *cpus = qman_affine_cpus(); +- struct cpumask old_cpumask = current->cpus_allowed; + + for_each_cpu(i, cpus) { + struct napi_struct *irqtask; +@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev) + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); + } + +- /* +- * QMan driver requires CGRs to be deleted from same CPU from where they +- * were instantiated. Hence we get the module removal execute from the +- * same CPU from where it was originally inserted. +- */ +- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); +- +- ret = qman_delete_cgr(&priv->cgr); +- if (ret) +- dev_err(qidev, "Deletion of CGR failed: %d\n", ret); +- else +- qman_release_cgrid(priv->cgr.cgrid); ++ qman_delete_cgr_safe(&priv->cgr); ++ qman_release_cgrid(priv->cgr.cgrid); + + kmem_cache_destroy(qi_cache); + +- /* Now that we're done with the CGRs, restore the cpus allowed mask */ +- set_cpus_allowed_ptr(current, &old_cpumask); +- + platform_device_unregister(priv->qi_pdev); +- return ret; + } + + static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) +@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev) + struct device *ctrldev = &caam_pdev->dev, *qidev; + struct caam_drv_private *ctrlpriv; + const cpumask_t *cpus = qman_affine_cpus(); +- struct cpumask old_cpumask = current->cpus_allowed; + static struct platform_device_info qi_pdev_info = { + .name = "caam_qi", + .id = PLATFORM_DEVID_NONE + }; + +- /* +- * QMAN requires CGRs to be removed from same CPU+portal from where it +- * was originally allocated. Hence we need to note down the +- * initialisation CPU and use the same CPU for module exit. +- * We select the first CPU to from the list of portal owning CPUs. +- * Then we pin module init to this CPU. +- */ +- mod_init_cpu = cpumask_first(cpus); +- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); +- + qi_pdev_info.parent = ctrldev; + qi_pdev_info.dma_mask = dma_get_mask(ctrldev); + qi_pdev = platform_device_register_full(&qi_pdev_info); +@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev) + return -ENOMEM; + } + +- /* Done with the CGRs; restore the cpus allowed mask */ +- set_cpus_allowed_ptr(current, &old_cpumask); + #ifdef CONFIG_DEBUG_FS + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, + ×_congested, &caam_fops_u64_ro); +diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h +index 357b69f57..b6c8acc30 100644 +--- a/drivers/crypto/caam/qi.h ++++ b/drivers/crypto/caam/qi.h +@@ -174,7 +174,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc); + void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); + + int caam_qi_init(struct platform_device *pdev); +-int caam_qi_shutdown(struct device *dev); ++void caam_qi_shutdown(struct device *dev); + + /** + * qi_cache_alloc - Allocate buffers from CAAM-QI cache +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c +index 69842145c..4c3ef46e7 100644 +--- a/drivers/dma-buf/dma-buf.c ++++ b/drivers/dma-buf/dma-buf.c +@@ -179,7 +179,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) + return 0; + + retry: +- seq = read_seqcount_begin(&resv->seq); ++ seq = read_seqbegin(&resv->seq); + rcu_read_lock(); + + fobj = rcu_dereference(resv->fence); +@@ -188,7 +188,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) + else + shared_count = 0; + fence_excl = rcu_dereference(resv->fence_excl); +- if (read_seqcount_retry(&resv->seq, seq)) { ++ if (read_seqretry(&resv->seq, seq)) { + rcu_read_unlock(); + goto retry; + } +@@ -1046,12 +1046,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) + + robj = buf_obj->resv; + while (true) { +- seq = read_seqcount_begin(&robj->seq); ++ seq = read_seqbegin(&robj->seq); + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + shared_count = fobj ? fobj->shared_count : 0; + fence = rcu_dereference(robj->fence_excl); +- if (!read_seqcount_retry(&robj->seq, seq)) ++ if (!read_seqretry(&robj->seq, seq)) + break; + rcu_read_unlock(); + } +diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c +index 49ab09468..f11d58492 100644 +--- a/drivers/dma-buf/reservation.c ++++ b/drivers/dma-buf/reservation.c +@@ -109,8 +109,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + + dma_fence_get(fence); + +- preempt_disable(); +- write_seqcount_begin(&obj->seq); ++ write_seqlock(&obj->seq); + + for (i = 0; i < fobj->shared_count; ++i) { + struct dma_fence *old_fence; +@@ -121,8 +120,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + if (old_fence->context == fence->context) { + /* memory barrier is added by write_seqcount_begin */ + RCU_INIT_POINTER(fobj->shared[i], fence); +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + dma_fence_put(old_fence); + return; +@@ -146,8 +144,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + fobj->shared_count++; + } + +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + dma_fence_put(signaled); + } +@@ -191,15 +188,13 @@ reservation_object_add_shared_replace(struct reservation_object *obj, + fobj->shared_count++; + + done: +- preempt_disable(); +- write_seqcount_begin(&obj->seq); ++ write_seqlock(&obj->seq); + /* + * RCU_INIT_POINTER can be used here, + * seqcount provides the necessary barriers + */ + RCU_INIT_POINTER(obj->fence, fobj); +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + if (!old) + return; +@@ -259,14 +254,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, + if (fence) + dma_fence_get(fence); + +- preempt_disable(); +- write_seqcount_begin(&obj->seq); +- /* write_seqcount_begin provides the necessary memory barrier */ ++ write_seqlock(&obj->seq); + RCU_INIT_POINTER(obj->fence_excl, fence); + if (old) + old->shared_count = 0; +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + /* inplace update, no shared fences */ + while (i--) +@@ -349,13 +341,10 @@ int reservation_object_copy_fences(struct reservation_object *dst, + src_list = reservation_object_get_list(dst); + old = reservation_object_get_excl(dst); + +- preempt_disable(); +- write_seqcount_begin(&dst->seq); +- /* write_seqcount_begin provides the necessary memory barrier */ ++ write_seqlock(&dst->seq); + RCU_INIT_POINTER(dst->fence_excl, new); + RCU_INIT_POINTER(dst->fence, dst_list); +- write_seqcount_end(&dst->seq); +- preempt_enable(); ++ write_sequnlock(&dst->seq); + + if (src_list) + kfree_rcu(src_list, rcu); +@@ -396,7 +385,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, + shared_count = i = 0; + + rcu_read_lock(); +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + + fence_excl = rcu_dereference(obj->fence_excl); + if (fence_excl && !dma_fence_get_rcu(fence_excl)) +@@ -445,7 +434,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, + } + } + +- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { ++ if (i != shared_count || read_seqretry(&obj->seq, seq)) { + while (i--) + dma_fence_put(shared[i]); + dma_fence_put(fence_excl); +@@ -494,7 +483,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + + retry: + shared_count = 0; +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + rcu_read_lock(); + i = -1; + +@@ -541,7 +530,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + + rcu_read_unlock(); + if (fence) { +- if (read_seqcount_retry(&obj->seq, seq)) { ++ if (read_seqretry(&obj->seq, seq)) { + dma_fence_put(fence); + goto retry; + } +@@ -597,7 +586,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + retry: + ret = true; + shared_count = 0; +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + + if (test_all) { + unsigned i; +@@ -618,7 +607,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + break; + } + +- if (read_seqcount_retry(&obj->seq, seq)) ++ if (read_seqretry(&obj->seq, seq)) + goto retry; + } + +@@ -631,7 +620,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + if (ret < 0) + goto retry; + +- if (read_seqcount_retry(&obj->seq, seq)) ++ if (read_seqretry(&obj->seq, seq)) + goto retry; + } + } +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index 184ad34b9..661d980dc 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -89,7 +89,7 @@ struct mm_struct efi_mm = { + + struct workqueue_struct *efi_rts_wq; + +-static bool disable_runtime; ++static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE); + static int __init setup_noefi(char *arg) + { + disable_runtime = true; +@@ -115,6 +115,9 @@ static int __init parse_efi_cmdline(char *str) + if (parse_option_str(str, "noruntime")) + disable_runtime = true; + ++ if (parse_option_str(str, "runtime")) ++ disable_runtime = false; ++ + return 0; + } + early_param("efi", parse_efi_cmdline); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index f92597c29..10c675850 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -261,11 +261,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, + } + + /* Install the new fence list, seqcount provides the barriers */ +- preempt_disable(); +- write_seqcount_begin(&resv->seq); ++ write_seqlock(&resv->seq); + RCU_INIT_POINTER(resv->fence, new); +- write_seqcount_end(&resv->seq); +- preempt_enable(); ++ write_sequnlock(&resv->seq); + + /* Drop the references to the removed fences or move them to ef_list */ + for (i = j, k = 0; i < old->shared_count; ++i) { +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index b1b207747..a6949f9d2 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -516,7 +516,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, + long timeout, + struct intel_rps_client *rps_client) + { +- unsigned int seq = __read_seqcount_begin(&resv->seq); ++ unsigned int seq = read_seqbegin(&resv->seq); + struct dma_fence *excl; + bool prune_fences = false; + +@@ -569,9 +569,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, + * signaled and that the reservation object has not been changed (i.e. + * no new fences have been added). + */ +- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { ++ if (prune_fences && !read_seqretry(&resv->seq, seq)) { + if (reservation_object_trylock(resv)) { +- if (!__read_seqcount_retry(&resv->seq, seq)) ++ if (!read_seqretry(&resv->seq, seq)) + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); + } +@@ -4693,7 +4693,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + * + */ + retry: +- seq = raw_read_seqcount(&obj->resv->seq); ++ seq = read_seqbegin(&obj->resv->seq); + + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); +@@ -4711,7 +4711,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + } + } + +- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) ++ if (args->busy && read_seqretry(&obj->resv->seq, seq)) + goto retry; + + err = 0; +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 298779693..f65817c51 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + +diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c +index 5c2c93cba..7124510b9 100644 +--- a/drivers/gpu/drm/i915/i915_request.c ++++ b/drivers/gpu/drm/i915/i915_request.c +@@ -356,9 +356,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, + + GEM_BUG_ON(!i915_request_completed(rq)); + +- local_irq_disable(); +- +- spin_lock(&engine->timeline.lock); ++ spin_lock_irq(&engine->timeline.lock); + GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); + list_del_init(&rq->link); + spin_unlock(&engine->timeline.lock); +@@ -372,9 +370,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, + GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); + atomic_dec(&rq->i915->gt_pm.rps.num_waiters); + } +- spin_unlock(&rq->lock); +- +- local_irq_enable(); ++ spin_unlock_irq(&rq->lock); + + /* + * The backing object for the context is done after switching to the +diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h +index b50c6b829..33028d8f4 100644 +--- a/drivers/gpu/drm/i915/i915_trace.h ++++ b/drivers/gpu/drm/i915/i915_trace.h +@@ -2,6 +2,10 @@ + #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) + #define _I915_TRACE_H_ + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#define NOTRACE ++#endif ++ + #include + #include + #include +@@ -679,7 +683,7 @@ DEFINE_EVENT(i915_request, i915_request_add, + TP_ARGS(rq) + ); + +-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) ++#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE) + DEFINE_EVENT(i915_request, i915_request_submit, + TP_PROTO(struct i915_request *rq), + TP_ARGS(rq) +diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c +index f7026e887..07e4ddebd 100644 +--- a/drivers/gpu/drm/i915/intel_sprite.c ++++ b/drivers/gpu/drm/i915/intel_sprite.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + #include "intel_drv.h" + #include "intel_frontbuffer.h" + #include +@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + #define VBLANK_EVASION_TIME_US 100 + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); ++ + /** + * intel_pipe_update_start() - start update of a set of display registers + * @new_crtc_state: the new crtc state +@@ -107,7 +110,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) + if (intel_psr_wait_for_idle(new_crtc_state)) + DRM_ERROR("PSR idle timed out, atomic update may fail\n"); + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + + crtc->debug.min_vbl = min; + crtc->debug.max_vbl = max; +@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) + break; + } + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + timeout = schedule_timeout(timeout); + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + finish_wait(wq, &wait); +@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) + return; + + irq_disable: +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + /** +@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) + new_crtc_state->base.event = NULL; + } + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + if (intel_vgpu_active(dev_priv)) + return; +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index 09522b915..a3e183c59 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -1818,6 +1818,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + struct radeon_device *rdev = dev->dev_private; + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -1910,6 +1911,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + /* Decode into vertical and horizontal scanout position. */ + *vpos = position & 0x1fff; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index d0fd147ef..fb5a3461b 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -167,10 +167,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) + { + u32 *fifo_mem = dev_priv->mmio_virt; + +- preempt_disable(); + if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) + vmw_write(dev_priv, SVGA_REG_SYNC, reason); +- preempt_enable(); + } + + void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 12bc9fa21..278f03f50 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -112,10 +112,12 @@ int hv_post_message(union hv_connection_id connection_id, + static void hv_stimer0_isr(void) + { + struct hv_per_cpu_context *hv_cpu; ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + + hv_cpu = this_cpu_ptr(hv_context.cpu_context); + hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt); +- add_interrupt_randomness(stimer0_vector, 0); ++ add_interrupt_randomness(stimer0_vector, 0, ip); + } + + static int hv_ce_set_next_event(unsigned long delta, +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 87d3d7da7..1d2d8a4b8 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "hv_trace.h" + +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 9aa18f387..39aaa1499 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -1042,6 +1042,8 @@ static void vmbus_isr(void) + void *page_addr = hv_cpu->synic_event_page; + struct hv_message *msg; + union hv_synic_event_flags *event; ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + bool handled = false; + + if (unlikely(page_addr == NULL)) +@@ -1085,7 +1087,7 @@ static void vmbus_isr(void) + tasklet_schedule(&hv_cpu->msg_dpc); + } + +- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); ++ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); + } + + /* +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c +index c1ce2299a..5c57ecf4b 100644 +--- a/drivers/i2c/busses/i2c-exynos5.c ++++ b/drivers/i2c/busses/i2c-exynos5.c +@@ -800,9 +800,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev) + } + + ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, +- IRQF_NO_SUSPEND | IRQF_ONESHOT, +- dev_name(&pdev->dev), i2c); +- ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c); + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); + goto err_clk; +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c +index 061a4bfb0..575aff50b 100644 +--- a/drivers/i2c/busses/i2c-hix5hd2.c ++++ b/drivers/i2c/busses/i2c-hix5hd2.c +@@ -449,8 +449,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) + hix5hd2_i2c_init(priv); + + ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq, +- IRQF_NO_SUSPEND | IRQF_ONESHOT, +- dev_name(&pdev->dev), priv); ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv); + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq); + goto err_clk; +diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c +index bedd5fba3..3f4259f11 100644 +--- a/drivers/infiniband/hw/hfi1/affinity.c ++++ b/drivers/infiniband/hw/hfi1/affinity.c +@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node) + struct hfi1_affinity_node *entry; + cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; + const struct cpumask *node_mask, +- *proc_mask = ¤t->cpus_allowed; ++ *proc_mask = current->cpus_ptr; + struct hfi1_affinity_node_list *affinity = &node_affinity; + struct cpu_mask_set *set = &affinity->proc; + +@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node) + * check whether process/context affinity has already + * been set + */ +- if (cpumask_weight(proc_mask) == 1) { ++ if (current->nr_cpus_allowed == 1) { + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", + current->pid, current->comm, + cpumask_pr_args(proc_mask)); +@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node) + cpu = cpumask_first(proc_mask); + cpumask_set_cpu(cpu, &set->used); + goto done; +- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { ++ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", + current->pid, current->comm, + cpumask_pr_args(proc_mask)); +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c +index 291c12f58..05e7b28a0 100644 +--- a/drivers/infiniband/hw/hfi1/sdma.c ++++ b/drivers/infiniband/hw/hfi1/sdma.c +@@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, + { + struct sdma_rht_node *rht_node; + struct sdma_engine *sde = NULL; +- const struct cpumask *current_mask = ¤t->cpus_allowed; + unsigned long cpu_id; + + /* + * To ensure that always the same sdma engine(s) will be + * selected make sure the process is pinned to this CPU only. + */ +- if (cpumask_weight(current_mask) != 1) ++ if (current->nr_cpus_allowed != 1) + goto out; + + cpu_id = smp_processor_id(); +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c +index 78fa634de..27b6e664e 100644 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c +@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) + static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) + { + struct qib_filedata *fd = fp->private_data; +- const unsigned int weight = cpumask_weight(¤t->cpus_allowed); ++ const unsigned int weight = current->nr_cpus_allowed; + const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); + int local_cpu; + +@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) + ret = find_free_ctxt(i_minor - 1, fp, uinfo); + else { + int unit; +- const unsigned int cpu = cpumask_first(¤t->cpus_allowed); +- const unsigned int weight = +- cpumask_weight(¤t->cpus_allowed); ++ const unsigned int cpu = cpumask_first(current->cpus_ptr); ++ const unsigned int weight = current->nr_cpus_allowed; + + if (weight == 1 && !test_bit(cpu, qib_cpulist)) + if (!find_hca(cpu, &unit) && unit >= 0) +diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig +index 4018af769..b4ce8c115 100644 +--- a/drivers/leds/trigger/Kconfig ++++ b/drivers/leds/trigger/Kconfig +@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT + + config LEDS_TRIGGER_CPU + bool "LED CPU Trigger" ++ depends on !PREEMPT_RT_BASE + help + This allows LEDs to be controlled by active CPUs. This shows + the active CPUs across an array of LEDs so you can see which +diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig +index f6e0a8b3a..18c03d79a 100644 +--- a/drivers/md/bcache/Kconfig ++++ b/drivers/md/bcache/Kconfig +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + select CRC64 + help + Allows a block device to be used as cache for other devices; uses +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c +index 3bd805f7c..4e7c61d08 100644 +--- a/drivers/md/dm-rq.c ++++ b/drivers/md/dm-rq.c +@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q) + /* Establish tio->ti before queuing work (map_tio_request) */ + tio->ti = ti; + kthread_queue_work(&md->kworker, &tio->work); +- BUG_ON(!irqs_disabled()); + } + } + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 24ef07d52..2b6f91da7 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) +@@ -6815,6 +6817,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) + __func__, cpu); + return -ENOMEM; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + return 0; + } + +@@ -6825,7 +6828,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) + conf->percpu = alloc_percpu(struct raid5_percpu); + if (!conf->percpu) + return -ENOMEM; +- + err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); + if (!err) { + conf->scribble_disks = max(conf->raid_disks, +diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h +index 8474c2241..a3bf907ab 100644 +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -637,6 +637,7 @@ struct r5conf { + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + struct flex_array *scribble; /* space for constructing buffer + * lists and performing address +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig +index 74f7c79d5..7fe963930 100644 +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ ++ + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n +diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c +index 598201645..953ff54dd 100644 +--- a/drivers/net/phy/fixed_phy.c ++++ b/drivers/net/phy/fixed_phy.c +@@ -23,7 +23,6 @@ + #include + #include + #include +-#include + #include + + #include "swphy.h" +@@ -36,7 +35,6 @@ struct fixed_mdio_bus { + struct fixed_phy { + int addr; + struct phy_device *phydev; +- seqcount_t seqcount; + struct fixed_phy_status status; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; +@@ -62,18 +60,15 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) + list_for_each_entry(fp, &fmb->phys, node) { + if (fp->addr == phy_addr) { + struct fixed_phy_status state; +- int s; +- +- do { +- s = read_seqcount_begin(&fp->seqcount); +- /* Issue callback if user registered it. */ +- if (fp->link_update) +- fp->link_update(fp->phydev->attached_dev, +- &fp->status); +- /* Check the GPIO for change in status */ +- fixed_phy_update(fp); +- state = fp->status; +- } while (read_seqcount_retry(&fp->seqcount, s)); ++ ++ /* Issue callback if user registered it. */ ++ if (fp->link_update) ++ fp->link_update(fp->phydev->attached_dev, ++ &fp->status); ++ ++ /* Check the GPIO for change in status */ ++ fixed_phy_update(fp); ++ state = fp->status; + + return swphy_read_reg(reg_num, &state); + } +@@ -131,8 +126,6 @@ int fixed_phy_add(unsigned int irq, int phy_addr, + if (!fp) + return -ENOMEM; + +- seqcount_init(&fp->seqcount); +- + if (irq != PHY_POLL) + fmb->mii_bus->irq[phy_addr] = irq; + +diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +index 94ad6fe29..52a49f0bb 100644 +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, + while (!ctx->done.done && msecs--) + udelay(1000); + } else { +- wait_event_interruptible(ctx->done.wait, +- ctx->done.done); ++ swait_event_interruptible_exclusive(ctx->done.wait, ++ ctx->done.done); + } + break; + default: +diff --git a/drivers/of/base.c b/drivers/of/base.c +index f0dbb7ad8..c59b30bab 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -130,31 +130,34 @@ static u32 phandle_cache_mask; + /* + * Caller must hold devtree_lock. + */ +-static void __of_free_phandle_cache(void) ++static struct device_node** __of_free_phandle_cache(void) + { + u32 cache_entries = phandle_cache_mask + 1; + u32 k; ++ struct device_node **shadow; + + if (!phandle_cache) +- return; ++ return NULL; + + for (k = 0; k < cache_entries; k++) + of_node_put(phandle_cache[k]); + +- kfree(phandle_cache); ++ shadow = phandle_cache; + phandle_cache = NULL; ++ return shadow; + } + + int of_free_phandle_cache(void) + { + unsigned long flags; ++ struct device_node **shadow; + + raw_spin_lock_irqsave(&devtree_lock, flags); + +- __of_free_phandle_cache(); ++ shadow = __of_free_phandle_cache(); + + raw_spin_unlock_irqrestore(&devtree_lock, flags); +- ++ kfree(shadow); + return 0; + } + #if !defined(CONFIG_MODULES) +@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) + u32 cache_entries; + struct device_node *np; + u32 phandles = 0; ++ struct device_node **shadow; + + raw_spin_lock_irqsave(&devtree_lock, flags); + +- __of_free_phandle_cache(); ++ shadow = __of_free_phandle_cache(); + + for_each_of_allnodes(np) + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) +@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) + + if (!phandles) + goto out; ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + + cache_entries = roundup_pow_of_two(phandles); + phandle_cache_mask = cache_entries - 1; + + phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), + GFP_ATOMIC); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + if (!phandle_cache) + goto out; + +@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) + + out: + raw_spin_unlock_irqrestore(&devtree_lock, flags); ++ kfree(shadow); + } + + void __init of_core_init(void) +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c +index 5aaa4ce04..45cebf92a 100644 +--- a/drivers/pci/switch/switchtec.c ++++ b/drivers/pci/switch/switchtec.c +@@ -43,10 +43,11 @@ struct switchtec_user { + + enum mrpc_state state; + +- struct completion comp; ++ wait_queue_head_t cmd_comp; + struct kref kref; + struct list_head list; + ++ bool cmd_done; + u32 cmd; + u32 status; + u32 return_code; +@@ -68,7 +69,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) + stuser->stdev = stdev; + kref_init(&stuser->kref); + INIT_LIST_HEAD(&stuser->list); +- init_completion(&stuser->comp); ++ init_waitqueue_head(&stuser->cmd_comp); + stuser->event_cnt = atomic_read(&stdev->event_cnt); + + dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); +@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) + kref_get(&stuser->kref); + stuser->read_len = sizeof(stuser->data); + stuser_set_state(stuser, MRPC_QUEUED); +- init_completion(&stuser->comp); ++ stuser->cmd_done = false; + list_add_tail(&stuser->list, &stdev->mrpc_queue); + + mrpc_cmd_submit(stdev); +@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) + stuser->read_len); + + out: +- complete_all(&stuser->comp); ++ stuser->cmd_done = true; ++ wake_up_interruptible(&stuser->cmd_comp); + list_del_init(&stuser->list); + stuser_put(stuser); + stdev->mrpc_busy = 0; +@@ -358,7 +360,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) + return PTR_ERR(stuser); + + filp->private_data = stuser; +- nonseekable_open(inode, filp); ++ stream_open(inode, filp); + + dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); + +@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, + mutex_unlock(&stdev->mrpc_mutex); + + if (filp->f_flags & O_NONBLOCK) { +- if (!try_wait_for_completion(&stuser->comp)) ++ if (!READ_ONCE(stuser->cmd_done)) + return -EAGAIN; + } else { +- rc = wait_for_completion_interruptible(&stuser->comp); ++ rc = wait_event_interruptible(stuser->cmd_comp, ++ stuser->cmd_done); + if (rc < 0) + return rc; + } +@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) + struct switchtec_dev *stdev = stuser->stdev; + __poll_t ret = 0; + +- poll_wait(filp, &stuser->comp.wait, wait); ++ poll_wait(filp, &stuser->cmd_comp, wait); + poll_wait(filp, &stdev->event_wq, wait); + + if (lock_mutex_and_test_alive(stdev)) +@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) + + mutex_unlock(&stdev->mrpc_mutex); + +- if (try_wait_for_completion(&stuser->comp)) ++ if (READ_ONCE(stuser->cmd_done)) + ret |= EPOLLIN | EPOLLRDNORM; + + if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) +@@ -1041,7 +1044,8 @@ static void stdev_kill(struct switchtec_dev *stdev) + + /* Wake up and kill any users waiting on an MRPC request */ + list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { +- complete_all(&stuser->comp); ++ stuser->cmd_done = true; ++ wake_up_interruptible(&stuser->cmd_comp); + list_del_init(&stuser->list); + stuser_put(stuser); + } +diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c +index 6768b2e81..c20f51af6 100644 +--- a/drivers/scsi/fcoe/fcoe.c ++++ b/drivers/scsi/fcoe/fcoe.c +@@ -1459,11 +1459,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1650,11 +1650,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1697,7 +1697,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) + */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + if (stats->ErrorFrames < 5) + printk(KERN_WARNING "fcoe: FCoE version " +@@ -1729,13 +1729,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index 658c07265..bceab74ee 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 6ba257cbc..d2398a19f 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { +diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c +index 22571abca..78a529d36 100644 +--- a/drivers/staging/android/vsoc.c ++++ b/drivers/staging/android/vsoc.c +@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) + return -EINVAL; + wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); + +- hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, +- HRTIMER_MODE_ABS); ++ hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, ++ HRTIMER_MODE_ABS, current); + hrtimer_set_expires_range_ns(&to->timer, wake_time, + current->timer_slack_ns); +- +- hrtimer_init_sleeper(to, current); + } + + while (1) { +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c +index 1ef937d79..540becb78 100644 +--- a/drivers/thermal/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/x86_pkg_temp_thermal.c +@@ -75,7 +75,7 @@ static int max_packages __read_mostly; + /* Array of package pointers */ + static struct pkg_device **packages; + /* Serializes interrupt notification, work and hotplug */ +-static DEFINE_SPINLOCK(pkg_temp_lock); ++static DEFINE_RAW_SPINLOCK(pkg_temp_lock); + /* Protects zone operation in the work function against hotplug removal */ + static DEFINE_MUTEX(thermal_zone_mutex); + +@@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) + u64 msr_val, wr_val; + + mutex_lock(&thermal_zone_mutex); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + ++pkg_work_cnt; + + pkgdev = pkg_temp_thermal_get_dev(cpu); + if (!pkgdev) { +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + mutex_unlock(&thermal_zone_mutex); + return; + } +@@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) + } + + enable_pkg_thres_interrupt(); +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + + /* + * If tzone is not NULL, then thermal_zone_mutex will prevent the +@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val) + struct pkg_device *pkgdev; + unsigned long flags; + +- spin_lock_irqsave(&pkg_temp_lock, flags); ++ raw_spin_lock_irqsave(&pkg_temp_lock, flags); + ++pkg_interrupt_cnt; + + disable_pkg_thres_interrupt(); +@@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val) + pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); + } + +- spin_unlock_irqrestore(&pkg_temp_lock, flags); ++ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); + return 0; + } + +@@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) + pkgdev->msr_pkg_therm_high); + + cpumask_set_cpu(cpu, &pkgdev->cpumask); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + packages[pkgid] = pkgdev; +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + return 0; + } + +@@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + } + + /* Protect against work and interrupts */ +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + + /* + * Check whether this cpu was the current target and store the new +@@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + * To cancel the work we need to drop the lock, otherwise + * we might deadlock if the work needs to be flushed. + */ +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + cancel_delayed_work_sync(&pkgdev->work); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + /* + * If this is not the last cpu in the package and the work + * did not run after we dropped the lock above, then we +@@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + pkg_thermal_schedule_work(target, &pkgdev->work); + } + +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + + /* Final cleanup if this is the last cpu */ + if (lastcpu) +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index 373f34bb4..7ce7ae3e9 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -54,7 +54,16 @@ static struct uart_driver serial8250_reg; + + static unsigned int skip_txen_test; /* force skip of txen test at init time */ + +-#define PASS_LIMIT 512 ++/* ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #include + /* +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 1867c2546..4ca464082 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -3262,9 +3263,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, + + serial8250_rpm_get(up); + +- if (port->sysrq) ++ if (port->sysrq || oops_in_progress) + locked = 0; +- else if (oops_in_progress) ++ else if (in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index aae97acd1..9af2b415d 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2257,18 +2257,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) + { + struct uart_amba_port *uap = amba_ports[co->index]; + unsigned int old_cr = 0, new_cr; +- unsigned long flags; ++ unsigned long flags = 0; + int locked = 1; + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -2294,8 +2300,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) + pl011_write(old_cr, uap, REG_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c +index 6420ae581..0f4f41ed9 100644 +--- a/drivers/tty/serial/omap-serial.c ++++ b/drivers/tty/serial/omap-serial.c +@@ -1307,13 +1307,10 @@ serial_omap_console_write(struct console *co, const char *s, + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1342,8 +1339,7 @@ serial_omap_console_write(struct console *co, const char *s, + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 72a8c7009..d0b366955 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -215,7 +215,7 @@ static struct sysrq_key_op sysrq_showlocks_op = { + #endif + + #ifdef CONFIG_SMP +-static DEFINE_SPINLOCK(show_lock); ++static DEFINE_RAW_SPINLOCK(show_lock); + + static void showacpu(void *dummy) + { +@@ -225,10 +225,10 @@ static void showacpu(void *dummy) + if (idle_cpu(smp_processor_id())) + return; + +- spin_lock_irqsave(&show_lock, flags); ++ raw_spin_lock_irqsave(&show_lock, flags); + pr_info("CPU%d:\n", smp_processor_id()); + show_stack(NULL, NULL); +- spin_unlock_irqrestore(&show_lock, flags); ++ raw_spin_unlock_irqrestore(&show_lock, flags); + } + + static void sysrq_showregs_othercpus(struct work_struct *dummy) +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index b82a7d787..2f3015356 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) + struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); + struct usb_anchor *anchor = urb->anchor; + int status = urb->unlinked; +- unsigned long flags; + + urb->hcpriv = NULL; + if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && +@@ -1766,9 +1765,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) + * and no one may trigger the above deadlock situation when + * running complete() in tasklet. + */ +- local_irq_save(flags); + urb->complete(urb); +- local_irq_restore(flags); + + usb_anchor_resume_wakeups(anchor); + atomic_dec(&urb->use_count); +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 2050993fb..e2ca75a6e 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs) + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || +- waitqueue_active(&ffs->ep0req_completion.wait) || ++ swait_active(&ffs->ep0req_completion.wait) || + waitqueue_active(&ffs->wait)); + destroy_workqueue(ffs->io_completion_wq); + kfree(ffs->dev_name); +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index 6528df6f3..8ffc59405 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) + spin_unlock_irq (&epdata->dev->lock); + + if (likely (value == 0)) { +- value = wait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible_exclusive(done.wait, done.done); + if (value != 0) { + spin_lock_irq (&epdata->dev->lock); + if (likely (epdata->ep != NULL)) { +@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) + usb_ep_dequeue (epdata->ep, epdata->req); + spin_unlock_irq (&epdata->dev->lock); + +- wait_event (done.wait, done.done); ++ swait_event_exclusive(done.wait, done.done); + if (epdata->status == -ECONNRESET) + epdata->status = -EINTR; + } else { +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c +index 2b652df44..3737ab178 100644 +--- a/drivers/watchdog/watchdog_dev.c ++++ b/drivers/watchdog/watchdog_dev.c +@@ -145,7 +145,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) + ktime_t t = watchdog_next_keepalive(wdd); + + if (t > 0) +- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); ++ hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD); + } else { + hrtimer_cancel(&wd_data->timer); + } +@@ -164,7 +164,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) + if (ktime_after(earliest_keepalive, now)) { + hrtimer_start(&wd_data->timer, + ktime_sub(earliest_keepalive, now), +- HRTIMER_MODE_REL); ++ HRTIMER_MODE_REL_HARD); + return 0; + } + +@@ -1022,7 +1022,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + + kthread_init_work(&wd_data->work, watchdog_ping_work); +- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + wd_data->timer.function = watchdog_timer_expired; + + if (wdd->id == 0) { +@@ -1070,7 +1070,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) + __module_get(wdd->ops->owner); + get_device(&wd_data->dev); + if (handle_boot_enabled) +- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); ++ hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD); + else + pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", + wdd->id); +diff --git a/fs/aio.c b/fs/aio.c +index d221260b8..1b16d4e18 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -121,6 +121,7 @@ struct kioctx { + long nr_pages; + + struct rcu_work free_rwork; /* see free_ioctx() */ ++ struct work_struct free_work; /* see free_ioctx() */ + + /* + * signals when all in-flight requests are done +@@ -608,9 +609,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users(struct percpu_ref *ref) ++static void free_ioctx_users_work(struct work_struct *work) + { +- struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ struct kioctx *ctx = container_of(work, struct kioctx, free_work); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -628,6 +629,14 @@ static void free_ioctx_users(struct percpu_ref *ref) + percpu_ref_put(&ctx->reqs); + } + ++static void free_ioctx_users(struct percpu_ref *ref) ++{ ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ ++ INIT_WORK(&ctx->free_work, free_ioctx_users_work); ++ schedule_work(&ctx->free_work); ++} ++ + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; +diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c +index 70e9afe58..1a6b88ad4 100644 +--- a/fs/autofs/expire.c ++++ b/fs/autofs/expire.c +@@ -8,6 +8,7 @@ + * option, any later version, incorporated herein by reference. + */ + ++#include + #include "autofs_i.h" + + /* Check if a dentry can be expired */ +@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev, + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +diff --git a/fs/buffer.c b/fs/buffer.c +index 2a213e8bb..b6ac68803 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -301,9 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3373,6 +3365,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c +index 3925a7bfc..33f7723fb 100644 +--- a/fs/cifs/readdir.c ++++ b/fs/cifs/readdir.c +@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, + struct inode *inode; + struct super_block *sb = parent->d_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); + +diff --git a/fs/dcache.c b/fs/dcache.c +index ef1641398..a620f5553 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2472,9 +2472,10 @@ EXPORT_SYMBOL(d_rehash); + static inline unsigned start_dir_add(struct inode *dir) + { + ++ preempt_disable_rt(); + for (;;) { +- unsigned n = dir->i_dir_seq; +- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) ++ unsigned n = dir->__i_dir_seq; ++ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n) + return n; + cpu_relax(); + } +@@ -2482,26 +2483,30 @@ static inline unsigned start_dir_add(struct inode *dir) + + static inline void end_dir_add(struct inode *dir, unsigned n) + { +- smp_store_release(&dir->i_dir_seq, n + 2); ++ smp_store_release(&dir->__i_dir_seq, n + 2); ++ preempt_enable_rt(); + } + + static void d_wait_lookup(struct dentry *dentry) + { +- if (d_in_lookup(dentry)) { +- DECLARE_WAITQUEUE(wait, current); +- add_wait_queue(dentry->d_wait, &wait); +- do { +- set_current_state(TASK_UNINTERRUPTIBLE); +- spin_unlock(&dentry->d_lock); +- schedule(); +- spin_lock(&dentry->d_lock); +- } while (d_in_lookup(dentry)); +- } ++ struct swait_queue __wait; ++ ++ if (!d_in_lookup(dentry)) ++ return; ++ ++ INIT_LIST_HEAD(&__wait.task_list); ++ do { ++ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); ++ spin_unlock(&dentry->d_lock); ++ schedule(); ++ spin_lock(&dentry->d_lock); ++ } while (d_in_lookup(dentry)); ++ finish_swait(dentry->d_wait, &__wait); + } + + struct dentry *d_alloc_parallel(struct dentry *parent, + const struct qstr *name, +- wait_queue_head_t *wq) ++ struct swait_queue_head *wq) + { + unsigned int hash = name->hash; + struct hlist_bl_head *b = in_lookup_hash(parent, hash); +@@ -2515,7 +2520,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, + + retry: + rcu_read_lock(); +- seq = smp_load_acquire(&parent->d_inode->i_dir_seq); ++ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq); + r_seq = read_seqbegin(&rename_lock); + dentry = __d_lookup_rcu(parent, name, &d_seq); + if (unlikely(dentry)) { +@@ -2543,7 +2548,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, + } + + hlist_bl_lock(b); +- if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { ++ if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) { + hlist_bl_unlock(b); + rcu_read_unlock(); + goto retry; +@@ -2616,7 +2621,7 @@ void __d_lookup_done(struct dentry *dentry) + hlist_bl_lock(b); + dentry->d_flags &= ~DCACHE_PAR_LOOKUP; + __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); +- wake_up_all(dentry->d_wait); ++ swake_up_all(dentry->d_wait); + dentry->d_wait = NULL; + hlist_bl_unlock(b); + INIT_HLIST_NODE(&dentry->d_u.d_alias); +@@ -3128,6 +3133,8 @@ __setup("dhash_entries=", set_dhash_entries); + + static void __init dcache_init_early(void) + { ++ unsigned int loop; ++ + /* If hashes are distributed across NUMA nodes, defer + * hash allocation until vmalloc space is available. + */ +@@ -3144,11 +3151,16 @@ static void __init dcache_init_early(void) + NULL, + 0, + 0); ++ ++ for (loop = 0; loop < (1U << d_hash_shift); loop++) ++ INIT_HLIST_BL_HEAD(dentry_hashtable + loop); ++ + d_hash_shift = 32 - d_hash_shift; + } + + static void __init dcache_init(void) + { ++ unsigned int loop; + /* + * A constructor could be added for stable state like the lists, + * but it is probably not worth it because of the cache nature +@@ -3172,6 +3184,10 @@ static void __init dcache_init(void) + NULL, + 0, + 0); ++ ++ for (loop = 0; loop < (1U << d_hash_shift); loop++) ++ INIT_HLIST_BL_HEAD(dentry_hashtable + loop); ++ + d_hash_shift = 32 - d_hash_shift; + } + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index d46007154..626baf547 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -563,12 +563,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) + + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + #else +diff --git a/fs/exec.c b/fs/exec.c +index 426842c38..e9e848b37 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1028,6 +1028,7 @@ static int exec_mmap(struct mm_struct *mm) + } + } + task_lock(tsk); ++ preempt_disable_rt(); + + local_irq_disable(); + active_mm = tsk->active_mm; +@@ -1048,6 +1049,7 @@ static int exec_mmap(struct mm_struct *mm) + local_irq_enable(); + tsk->mm->vmacache_seqnum = 0; + vmacache_flush(tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c +index 9cc79b7b0..3f4ba2011 100644 +--- a/fs/ext4/page-io.c ++++ b/fs/ext4/page-io.c +@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio) + * We check all buffers in the page under BH_Uptodate_Lock + * to avoid races with other end io clearing async_write flags + */ +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &head->b_state); ++ flags = bh_uptodate_lock_irqsave(head); + do { + if (bh_offset(bh) < bio_start || + bh_offset(bh) + bh->b_size > bio_end) { +@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio) + if (bio->bi_status) + buffer_io_error(bh); + } while ((bh = bh->b_this_page) != head); +- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(head, flags); + if (!under_io) { + #ifdef CONFIG_EXT4_FS_ENCRYPTION + if (data_page) +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index c550512ce..fe69e46ba 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie, + return -ESTALE; + } + EXPORT_SYMBOL(__fscache_check_consistency); ++ ++void __init fscache_cookie_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(fscache_cookie_hash); i++) ++ INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]); ++} +diff --git a/fs/fscache/main.c b/fs/fscache/main.c +index 30ad89db1..1d5f1d679 100644 +--- a/fs/fscache/main.c ++++ b/fs/fscache/main.c +@@ -149,6 +149,7 @@ static int __init fscache_init(void) + ret = -ENOMEM; + goto error_cookie_jar; + } ++ fscache_cookie_init(); + + fscache_root = kobject_create_and_add("fscache", kernel_kobj); + if (!fscache_root) +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index b468ccb29..8dcacd08e 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1234,7 +1234,7 @@ static int fuse_direntplus_link(struct file *file, + struct inode *dir = d_inode(parent); + struct fuse_conn *fc; + struct inode *inode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + if (!o->nodeid) { + /* +diff --git a/fs/inode.c b/fs/inode.c +index c9eb5041f..e1fb0b743 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -156,7 +156,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) + inode->i_bdev = NULL; + inode->i_cdev = NULL; + inode->i_link = NULL; +- inode->i_dir_seq = 0; ++ inode->__i_dir_seq = 0; + inode->i_rdev = 0; + inode->dirtied_when = 0; + +diff --git a/fs/locks.c b/fs/locks.c +index 9dc72fda5..bfdb9d238 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) + return -ENOMEM; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + if (request->fl_flags & FL_ACCESS) + goto find_conflict; +@@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) + + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + if (new_fl) + locks_free_lock(new_fl); + locks_dispose_list(&dispose); +@@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, + new_fl2 = locks_alloc_lock(); + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + /* + * New lock request. Walk all POSIX locks and look for conflicts. If +@@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, + } + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + /* + * Free any unused locks. + */ +@@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) + goto free_lock; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + + time_out_leases(inode, &dispose); +@@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) + locks_insert_block(fl, new_fl); + trace_break_lease_block(inode, new_fl); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + trace_break_lease_unblock(inode, new_fl); + locks_delete_block(new_fl); +@@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) + } + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + free_lock: + locks_free_lock(new_fl); +@@ -1610,7 +1610,7 @@ int fcntl_getlease(struct file *filp) + + ctx = smp_load_acquire(&inode->i_flctx); + if (ctx && !list_empty_careful(&ctx->flc_lease)) { +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + time_out_leases(inode, &dispose); + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { +@@ -1620,7 +1620,7 @@ int fcntl_getlease(struct file *filp) + break; + } + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + } +@@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr + return -EINVAL; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + time_out_leases(inode, &dispose); + error = check_conflicting_open(dentry, arg, lease->fl_flags); +@@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr + lease->fl_lmops->lm_setup(lease, priv); + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + if (is_deleg) + inode_unlock(inode); +@@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner) + return error; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { + if (fl->fl_file == filp && +@@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner) + if (victim) + error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + return error; + } +@@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) + if (list_empty(&ctx->flc_lease)) + return; + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) + if (filp == fl->fl_file) + lease_modify(fl, F_UNLCK, &dispose); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + } +diff --git a/fs/namei.c b/fs/namei.c +index eeb2c064d..70d406ff7 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1653,7 +1653,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, + { + struct dentry *dentry, *old; + struct inode *inode = dir->d_inode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + /* Don't go there if it's already dead */ + if (unlikely(IS_DEADDIR(inode))) +@@ -3143,7 +3143,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, + struct dentry *dentry; + int error, create_error = 0; + umode_t mode = op->mode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + if (unlikely(IS_DEADDIR(dir_inode))) + return -ENOENT; +diff --git a/fs/namespace.c b/fs/namespace.c +index f47d4850b..ca764475c 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -328,8 +329,11 @@ int __mnt_want_write(struct vfsmount *m) + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) +- cpu_relax(); ++ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); ++ cpu_chill(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 98811a077..ec6ccb228 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode, + sp = state->owner; + /* Block nfs4_proc_unlck */ + mutex_lock(&sp->so_delegreturn_mutex); +- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); ++ seq = read_seqbegin(&sp->so_reclaim_seqlock); + err = nfs4_open_delegation_recall(ctx, state, stateid); + if (!err) + err = nfs_delegation_claim_locks(state, stateid); +- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) ++ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) + err = -EAGAIN; + mutex_unlock(&sp->so_delegreturn_mutex); + put_nfs_open_context(ctx); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 67ece39c7..86e5c60b5 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -457,7 +457,7 @@ static + void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) + { + struct qstr filename = QSTR_INIT(entry->name, entry->len); +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + struct dentry *dentry; + struct dentry *alias; + struct inode *dir = d_inode(parent); +@@ -1530,7 +1530,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned open_flags, + umode_t mode) + { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + struct nfs_open_context *ctx; + struct dentry *res; + struct iattr attr = { .ia_valid = ATTR_OPEN }; +@@ -1876,7 +1876,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) + + trace_nfs_rmdir_enter(dir, dentry); + if (d_really_is_positive(dentry)) { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ down(&NFS_I(d_inode(dentry))->rmdir_sem); ++#else + down_write(&NFS_I(d_inode(dentry))->rmdir_sem); ++#endif + error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); + /* Ensure the VFS deletes this inode */ + switch (error) { +@@ -1886,7 +1890,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) + case -ENOENT: + nfs_dentry_handle_enoent(dentry); + } ++#ifdef CONFIG_PREEMPT_RT_BASE ++ up(&NFS_I(d_inode(dentry))->rmdir_sem); ++#else + up_write(&NFS_I(d_inode(dentry))->rmdir_sem); ++#endif + } else + error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); + trace_nfs_rmdir_exit(dir, dentry, error); +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 6d5fedcd3..621e9bef1 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -2116,7 +2116,11 @@ static void init_once(void *foo) + atomic_long_set(&nfsi->nrequests, 0); + atomic_long_set(&nfsi->commit_info.ncommit, 0); + atomic_set(&nfsi->commit_info.rpcs_out, 0); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ sema_init(&nfsi->rmdir_sem, 1); ++#else + init_rwsem(&nfsi->rmdir_sem); ++#endif + mutex_init(&nfsi->commit_mutex); + nfs4_init_once(nfsi); + } +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h +index 4f3b5add0..96aea0e44 100644 +--- a/fs/nfs/nfs4_fs.h ++++ b/fs/nfs/nfs4_fs.h +@@ -114,7 +114,7 @@ struct nfs4_state_owner { + unsigned long so_flags; + struct list_head so_states; + struct nfs_seqid_counter so_seqid; +- seqcount_t so_reclaim_seqcount; ++ seqlock_t so_reclaim_seqlock; + struct mutex so_delegreturn_mutex; + }; + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 24df1c068..2f09c476c 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2918,7 +2918,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + unsigned int seq; + int ret; + +- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); ++ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); + + ret = _nfs4_proc_open(opendata, ctx); + if (ret != 0) +@@ -2959,7 +2959,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + + if (d_inode(dentry) == state->inode) { + nfs_inode_attach_open_context(ctx); +- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) ++ if (read_seqretry(&sp->so_reclaim_seqlock, seq)) + nfs4_schedule_stateid_recovery(server, state); + } + +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 5ff516104..db44815d2 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -513,7 +513,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, + nfs4_init_seqid_counter(&sp->so_seqid); + atomic_set(&sp->so_count, 1); + INIT_LIST_HEAD(&sp->so_lru); +- seqcount_init(&sp->so_reclaim_seqcount); ++ seqlock_init(&sp->so_reclaim_seqlock); + mutex_init(&sp->so_delegreturn_mutex); + return sp; + } +@@ -1581,8 +1581,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs + * recovering after a network partition or a reboot from a + * server that doesn't support a grace period. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_seqlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); ++#endif + spin_lock(&sp->so_lock); +- raw_write_seqcount_begin(&sp->so_reclaim_seqcount); + restart: + list_for_each_entry(state, &sp->so_states, open_states) { + if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) +@@ -1669,14 +1673,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs + spin_lock(&sp->so_lock); + goto restart; + } +- raw_write_seqcount_end(&sp->so_reclaim_seqcount); + spin_unlock(&sp->so_lock); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_sequnlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); ++#endif + return 0; + out_err: + nfs4_put_open_state(state); +- spin_lock(&sp->so_lock); +- raw_write_seqcount_end(&sp->so_reclaim_seqcount); +- spin_unlock(&sp->so_lock); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_sequnlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); ++#endif + return status; + } + +diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c +index fd61bf0fc..839bfa76f 100644 +--- a/fs/nfs/unlink.c ++++ b/fs/nfs/unlink.c +@@ -13,7 +13,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) + rpc_restart_call_prepare(task); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void nfs_down_anon(struct semaphore *sema) ++{ ++ down(sema); ++} ++ ++static void nfs_up_anon(struct semaphore *sema) ++{ ++ up(sema); ++} ++ ++#else ++static void nfs_down_anon(struct rw_semaphore *rwsem) ++{ ++ down_read_non_owner(rwsem); ++} ++ ++static void nfs_up_anon(struct rw_semaphore *rwsem) ++{ ++ up_read_non_owner(rwsem); ++} ++#endif ++ + /** + * nfs_async_unlink_release - Release the sillydelete data. + * @task: rpc_task of the sillydelete +@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(void *calldata) + struct dentry *dentry = data->dentry; + struct super_block *sb = dentry->d_sb; + +- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); ++ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); + d_lookup_done(dentry); + nfs_free_unlinkdata(data); + dput(dentry); +@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf + struct inode *dir = d_inode(dentry->d_parent); + struct dentry *alias; + +- down_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_down_anon(&NFS_I(dir)->rmdir_sem); + alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); + if (IS_ERR(alias)) { +- up_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_up_anon(&NFS_I(dir)->rmdir_sem); + return 0; + } + if (!d_in_lookup(alias)) { +@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf + ret = 0; + spin_unlock(&alias->d_lock); + dput(alias); +- up_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_up_anon(&NFS_I(dir)->rmdir_sem); + /* + * If we'd displaced old cached devname, free it. At that + * point dentry is definitely not a root, so we won't need +@@ -183,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name) + goto out_free_name; + } + data->res.dir_attr = &data->dir_attr; +- init_waitqueue_head(&data->wq); ++ init_swait_queue_head(&data->wq); + + status = -EBUSY; + spin_lock(&dentry->d_lock); +diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c +index 8946130c8..71d0b3ba7 100644 +--- a/fs/ntfs/aops.c ++++ b/fs/ntfs/aops.c +@@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 9eb99a43f..e4d0cfeba 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m, + static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) + { + seq_printf(m, "Cpus_allowed:\t%*pb\n", +- cpumask_pr_args(&task->cpus_allowed)); ++ cpumask_pr_args(task->cpus_ptr)); + seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", +- cpumask_pr_args(&task->cpus_allowed)); ++ cpumask_pr_args(task->cpus_ptr)); + } + + static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) +diff --git a/fs/proc/base.c b/fs/proc/base.c +index d7e94f7b5..9cc73c7a7 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -96,6 +96,7 @@ + #include + #include + #include ++#include + #include "internal.h" + #include "fd.h" + +@@ -1980,7 +1981,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, + + child = d_hash_and_lookup(dir, &qname); + if (!child) { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) + goto end_instantiate; +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index c95f32b83..75f500cb7 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -681,7 +681,7 @@ static bool proc_sys_fill_cache(struct file *file, + + child = d_lookup(dir, &qname); + if (!child) { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) + return false; +diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c +index 23a9c28ad..6a73c4fa8 100644 +--- a/fs/squashfs/decompressor_multi_percpu.c ++++ b/fs/squashfs/decompressor_multi_percpu.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "squashfs_fs.h" + #include "squashfs_fs_sb.h" +@@ -25,6 +26,8 @@ struct squashfs_stream { + void *stream; + }; + ++static DEFINE_LOCAL_IRQ_LOCK(stream_lock); ++ + void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, + void *comp_opts) + { +@@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, + { + struct squashfs_stream __percpu *percpu = + (struct squashfs_stream __percpu *) msblk->stream; +- struct squashfs_stream *stream = get_cpu_ptr(percpu); +- int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, +- offset, length, output); +- put_cpu_ptr(stream); ++ struct squashfs_stream *stream; ++ int res; ++ ++ stream = get_locked_ptr(stream_lock, percpu); ++ ++ res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, ++ offset, length, output); ++ ++ put_locked_ptr(stream_lock, stream); + + if (res < 0) + ERROR("%s decompression failed, data probably corrupt\n", +diff --git a/fs/timerfd.c b/fs/timerfd.c +index d69ad801e..f84509346 100644 +--- a/fs/timerfd.c ++++ b/fs/timerfd.c +@@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, int flags, + break; + } + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ ++ if (isalarm(ctx)) ++ hrtimer_grab_expiry_lock(&ctx->t.alarm.timer); ++ else ++ hrtimer_grab_expiry_lock(&ctx->t.tmr); + } + + /* +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index 803c61f66..ee1191bfe 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -59,7 +59,7 @@ struct userfaultfd_ctx { + /* waitqueue head for events */ + wait_queue_head_t event_wqh; + /* a refile sequence protected by fault_pending_wqh lock */ +- struct seqcount refile_seq; ++ seqlock_t refile_seq; + /* pseudo fd refcounting */ + atomic_t refcount; + /* userfaultfd syscall flags */ +@@ -1071,7 +1071,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, + * waitqueue could become empty if this is the + * only userfault. + */ +- write_seqcount_begin(&ctx->refile_seq); ++ write_seqlock(&ctx->refile_seq); + + /* + * The fault_pending_wqh.lock prevents the uwq +@@ -1097,7 +1097,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, + list_del(&uwq->wq.entry); + add_wait_queue(&ctx->fault_wqh, &uwq->wq); + +- write_seqcount_end(&ctx->refile_seq); ++ write_sequnlock(&ctx->refile_seq); + + /* careful to always initialize msg if ret == 0 */ + *msg = uwq->msg; +@@ -1270,11 +1270,11 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, + * sure we've userfaults to wake. + */ + do { +- seq = read_seqcount_begin(&ctx->refile_seq); ++ seq = read_seqbegin(&ctx->refile_seq); + need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || + waitqueue_active(&ctx->fault_wqh); + cond_resched(); +- } while (read_seqcount_retry(&ctx->refile_seq, seq)); ++ } while (read_seqretry(&ctx->refile_seq, seq)); + if (need_wakeup) + __wake_userfault(ctx, range); + } +@@ -1970,7 +1970,7 @@ static void init_once_userfaultfd_ctx(void *mem) + init_waitqueue_head(&ctx->fault_wqh); + init_waitqueue_head(&ctx->event_wqh); + init_waitqueue_head(&ctx->fd_wqh); +- seqcount_init(&ctx->refile_seq); ++ seqlock_init(&ctx->refile_seq); + } + + SYSCALL_DEFINE1(userfaultfd, int, flags) +diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h +index 1817a8415..942d64c04 100644 +--- a/include/asm-generic/percpu.h ++++ b/include/asm-generic/percpu.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_SMP + +diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h +index 5435f97bc..d425ebc34 100644 +--- a/include/linux/blk-cgroup.h ++++ b/include/linux/blk-cgroup.h +@@ -14,7 +14,7 @@ + * Nauman Rafique + */ + +-#include ++#include + #include + #include + #include +diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h +index 3695a43eb..c4dfc638a 100644 +--- a/include/linux/blk-mq.h ++++ b/include/linux/blk-mq.h +@@ -287,6 +287,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) + } + + ++void __blk_mq_complete_request_remote_work(struct work_struct *work); + int blk_mq_request_started(struct request *rq); + int blk_mq_request_completed(struct request *rq); + void blk_mq_start_request(struct request *rq); +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 1deaf36eb..850dab661 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -158,6 +159,9 @@ enum mq_rq_state { + */ + struct request { + struct request_queue *q; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct work_struct work; ++#endif + struct blk_mq_ctx *mq_ctx; + + int cpu; +@@ -682,6 +686,7 @@ struct request_queue { + #endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; ++ struct work_struct mq_pcpu_wake; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + +diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h +index a19519f42..40dd5ef9c 100644 +--- a/include/linux/bottom_half.h ++++ b/include/linux/bottom_half.h +@@ -4,6 +4,39 @@ + + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++extern void __local_bh_disable(void); ++extern void _local_bh_enable(void); ++extern void __local_bh_enable(void); ++ ++static inline void local_bh_disable(void) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void local_bh_enable(void) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void local_bh_enable_ip(unsigned long ip) ++{ ++ __local_bh_enable(); ++} ++ ++#else ++ + #ifdef CONFIG_TRACE_IRQFLAGS + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else +@@ -31,5 +64,6 @@ static inline void local_bh_enable(void) + { + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); + } ++#endif + + #endif /* _LINUX_BH_H */ +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h +index 9168fc33a..703bf3335 100644 +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -76,8 +76,50 @@ struct buffer_head { + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#if IS_ENABLED(CONFIG_JBD2) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#if IS_ENABLED(CONFIG_JBD2) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h +index cad1a82ca..3febaa98d 100644 +--- a/include/linux/cgroup-defs.h ++++ b/include/linux/cgroup-defs.h +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_CGROUPS + +@@ -157,6 +158,7 @@ struct cgroup_subsys_state { + + /* percpu_ref killing and RCU release */ + struct work_struct destroy_work; ++ struct swork_event destroy_swork; + struct rcu_work destroy_rwork; + + /* +diff --git a/include/linux/completion.h b/include/linux/completion.h +index 519e94915..bf8e77001 100644 +--- a/include/linux/completion.h ++++ b/include/linux/completion.h +@@ -9,7 +9,7 @@ + * See kernel/sched/completion.c for details. + */ + +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -25,7 +25,7 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_queue_head wait; + }; + + #define init_completion_map(x, m) __init_completion(x) +@@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {} + static inline void complete_release(struct completion *x) {} + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ + (*({ init_completion_map(&(work), &(map)); &(work); })) +@@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {} + static inline void __init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_queue_head(&x->wait); + } + + /** +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index aab427381..87347ccbb 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -129,6 +129,7 @@ static inline int cpus_read_trylock(void) { return true; } + static inline void lockdep_assert_cpus_held(void) { } + static inline void cpu_hotplug_disable(void) { } + static inline void cpu_hotplug_enable(void) { } ++ + #endif /* !CONFIG_HOTPLUG_CPU */ + + /* Wrappers which go away once all code is converted */ +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index d4d030340..00afcb4b0 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -107,7 +107,7 @@ struct dentry { + + union { + struct list_head d_lru; /* LRU list */ +- wait_queue_head_t *d_wait; /* in-lookup ones only */ ++ struct swait_queue_head *d_wait; /* in-lookup ones only */ + }; + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ +@@ -249,7 +249,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); + extern struct dentry * d_alloc_anon(struct super_block *); + extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); + extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, +- wait_queue_head_t *); ++ struct swait_queue_head *); + extern struct dentry * d_splice_alias(struct inode *, struct dentry *); + extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); + extern struct dentry * d_exact_alias(struct dentry *, struct inode *); +diff --git a/include/linux/delay.h b/include/linux/delay.h +index b78bab439..7c4bc414a 100644 +--- a/include/linux/delay.h ++++ b/include/linux/delay.h +@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int seconds) + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void cpu_chill(void); ++#else ++# define cpu_chill() cpu_relax() ++#endif ++ + #endif /* defined(_LINUX_DELAY_H) */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index bcd2131ca..01414b979 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -705,7 +705,7 @@ struct inode { + struct block_device *i_bdev; + struct cdev *i_cdev; + char *i_link; +- unsigned i_dir_seq; ++ unsigned __i_dir_seq; + }; + + __u32 i_generation; +diff --git a/include/linux/fscache.h b/include/linux/fscache.h +index 84b90a79d..87a9330ea 100644 +--- a/include/linux/fscache.h ++++ b/include/linux/fscache.h +@@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, + extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); + extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, + bool (*)(void *), void *); ++extern void fscache_cookie_init(void); + + /** + * fscache_register_netfs - Register a filesystem as desiring caching services +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 1fed918bb..c3d13515e 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + +@@ -66,7 +67,7 @@ static inline void kunmap(struct page *page) + + static inline void *kmap_atomic(struct page *page) + { +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + return page_address(page); + } +@@ -75,7 +76,7 @@ static inline void *kmap_atomic(struct page *page) + static inline void __kunmap_atomic(void *addr) + { + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + + #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +@@ -87,32 +88,51 @@ static inline void __kunmap_atomic(void *addr) + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h +index 542b4fa2c..aee31b1f0 100644 +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -41,6 +41,7 @@ enum hrtimer_mode { + HRTIMER_MODE_REL = 0x01, + HRTIMER_MODE_PINNED = 0x02, + HRTIMER_MODE_SOFT = 0x04, ++ HRTIMER_MODE_HARD = 0x08, + + HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, + HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, +@@ -51,6 +52,11 @@ enum hrtimer_mode { + HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + ++ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, ++ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, ++ ++ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, ++ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, + }; + + /* +@@ -186,6 +192,8 @@ enum hrtimer_base_type { + * @nr_retries: Total number of hrtimer interrupt retries + * @nr_hangs: Total number of hrtimer interrupt hangs + * @max_hang_time: Maximum time spent in hrtimer_interrupt ++ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are ++ * expired + * @expires_next: absolute time of the next event, is required for remote + * hrtimer enqueue; it is the total first expiry time (hard + * and soft hrtimer are taken into account) +@@ -213,6 +221,7 @@ struct hrtimer_cpu_base { + unsigned short nr_hangs; + unsigned int max_hang_time; + #endif ++ spinlock_t softirq_expiry_lock; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; +@@ -364,10 +373,17 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device); + /* Initialize timers: */ + extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); ++extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, ++ enum hrtimer_mode mode, ++ struct task_struct *task); + + #ifdef CONFIG_DEBUG_OBJECTS_TIMERS + extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); ++extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, ++ clockid_t clock_id, ++ enum hrtimer_mode mode, ++ struct task_struct *task); + + extern void destroy_hrtimer_on_stack(struct hrtimer *timer); + #else +@@ -377,6 +393,15 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, + { + hrtimer_init(timer, which_clock, mode); + } ++ ++static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, ++ clockid_t clock_id, ++ enum hrtimer_mode mode, ++ struct task_struct *task) ++{ ++ hrtimer_init_sleeper(sl, clock_id, mode, task); ++} ++ + static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } + #endif + +@@ -400,6 +425,7 @@ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, + + extern int hrtimer_cancel(struct hrtimer *timer); + extern int hrtimer_try_to_cancel(struct hrtimer *timer); ++extern void hrtimer_grab_expiry_lock(const struct hrtimer *timer); + + static inline void hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) +@@ -486,9 +512,6 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp, + const enum hrtimer_mode mode, + const clockid_t clockid); + +-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, +- struct task_struct *tsk); +- + extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, + const enum hrtimer_mode mode); + extern int schedule_hrtimeout_range_clock(ktime_t *expires, +diff --git a/include/linux/idr.h b/include/linux/idr.h +index b6c6151c7..81c9df5c0 100644 +--- a/include/linux/idr.h ++++ b/include/linux/idr.h +@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr) + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ +-static inline void idr_preload_end(void) +-{ +- preempt_enable(); +-} ++void idr_preload_end(void); + + /** + * idr_for_each_entry() - Iterate over an IDR's elements of a given type. +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 97de36a38..d05ef847f 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -61,6 +61,7 @@ + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_SHARED 0x00000080 + #define IRQF_PROBE_SHARED 0x00000100 +@@ -74,6 +75,7 @@ + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 + #define IRQF_COND_SUSPEND 0x00040000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00080000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +@@ -447,7 +449,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, + bool state); + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifdef CONFIG_PREEMPT_RT_BASE ++# define force_irqthreads (true) ++# else + extern bool force_irqthreads; ++# endif + #else + #define force_irqthreads (0) + #endif +@@ -513,9 +519,10 @@ struct softirq_action + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- ++static inline void thread_do_softirq(void) { do_softirq(); } + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -524,13 +531,25 @@ static inline void do_softirq_own_stack(void) + __do_softirq(); + } + #endif ++#else ++extern void thread_do_softirq(void); ++#endif + + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void __raise_softirq_irqoff_ksoft(unsigned int nr); ++#else ++static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++} ++#endif + + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void softirq_check_pending_idle(void); + + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +@@ -552,8 +571,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -578,27 +598,39 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING, /* Tasklet is pending */ ++ TASKLET_STATE_CHAINED /* Tasklet is chained */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) ++#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED) ++#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) ++{ ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++} ++ + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); ++ + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -632,17 +664,18 @@ static inline void tasklet_disable(struct tasklet_struct *t) + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- atomic_dec(&t->count); +-} +- ++extern void tasklet_enable(struct tasklet_struct *t); + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); + extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + struct tasklet_hrtimer { + struct hrtimer timer; + struct tasklet_struct tasklet; +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 536f1abc9..32f9bd593 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -69,6 +69,7 @@ enum irqchip_irq_state; + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable + */ + enum { +@@ -96,13 +97,14 @@ enum { + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 20), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ +- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) ++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h +index b11fcdfd0..0c5055998 100644 +--- a/include/linux/irq_work.h ++++ b/include/linux/irq_work.h +@@ -18,6 +18,8 @@ + + /* Doesn't want IPI, wait for tick: */ + #define IRQ_WORK_LAZY BIT(2) ++/* Run hard IRQ context, even on RT */ ++#define IRQ_WORK_HARD_IRQ BIT(3) + + #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) + +@@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; } + static inline void irq_work_run(void) { } + #endif + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void); ++#else ++static inline void irq_work_tick_soft(void) { } ++#endif ++ + #endif /* _LINUX_IRQ_WORK_H */ +diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h +index 8140d8ca5..cbb4fff3a 100644 +--- a/include/linux/irqdesc.h ++++ b/include/linux/irqdesc.h +@@ -71,6 +71,7 @@ struct irq_desc { + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; +diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h +index 21619c92c..b20eeb25e 100644 +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h +@@ -43,14 +43,6 @@ do { \ + do { \ + current->hardirq_context--; \ + } while (0) +-# define lockdep_softirq_enter() \ +-do { \ +- current->softirq_context++; \ +-} while (0) +-# define lockdep_softirq_exit() \ +-do { \ +- current->softirq_context--; \ +-} while (0) + #else + # define trace_hardirqs_on() do { } while (0) + # define trace_hardirqs_off() do { } while (0) +@@ -64,6 +56,21 @@ do { \ + # define lockdep_softirq_exit() do { } while (0) + #endif + ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() \ ++do { \ ++ current->softirq_context++; \ ++} while (0) ++# define lockdep_softirq_exit() \ ++do { \ ++ current->softirq_context--; \ ++} while (0) ++ ++#else ++# define lockdep_softirq_enter() do { } while (0) ++# define lockdep_softirq_exit() do { } while (0) ++#endif ++ + #if defined(CONFIG_IRQSOFF_TRACER) || \ + defined(CONFIG_PREEMPT_TRACER) + extern void stop_critical_timings(void); +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h +index 2192a10e9..a10b1f090 100644 +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -349,32 +349,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #define J_ASSERT(assert) BUG_ON(!(assert)) +diff --git a/include/linux/kdb.h b/include/linux/kdb.h +index 68bd88223..e033b25b0 100644 +--- a/include/linux/kdb.h ++++ b/include/linux/kdb.h +@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); + ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *, + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +diff --git a/include/linux/kernel.h b/include/linux/kernel.h +index 06c738d39..f36ee1f1a 100644 +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -260,6 +260,9 @@ extern int _cond_resched(void); + */ + # define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) ++ ++# define might_sleep_no_state_check() \ ++ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) + # define sched_annotate_sleep() (current->task_state_change = 0) + #else + static inline void ___might_sleep(const char *file, int line, +@@ -267,6 +270,7 @@ extern int _cond_resched(void); + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } + # define might_sleep() do { might_resched(); } while (0) ++# define might_sleep_no_state_check() do { might_resched(); } while (0) + # define sched_annotate_sleep() do { } while (0) + #endif + +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 8613e4981..a1b0b661e 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -89,7 +89,7 @@ enum { + + struct kthread_worker { + unsigned int flags; +- spinlock_t lock; ++ raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; +@@ -110,7 +110,7 @@ struct kthread_delayed_work { + }; + + #define KTHREAD_WORKER_INIT(worker) { \ +- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ + } +diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h +index 3fc2cc57b..0b5de7d9f 100644 +--- a/include/linux/list_bl.h ++++ b/include/linux/list_bl.h +@@ -3,6 +3,7 @@ + #define _LINUX_LIST_BL_H + + #include ++#include + #include + + /* +@@ -33,13 +34,24 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++#define INIT_HLIST_BL_HEAD(h) \ ++do { \ ++ (h)->first = NULL; \ ++ raw_spin_lock_init(&(h)->lock); \ ++} while (0) ++#else ++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL ++#endif + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -119,12 +131,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __set_bit(0, (unsigned long *)b); ++#endif ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __clear_bit(0, (unsigned long *)b); ++#endif ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) +diff --git a/include/linux/locallock.h b/include/linux/locallock.h +new file mode 100644 +index 000000000..81c89d877 +--- /dev/null ++++ b/include/linux/locallock.h +@@ -0,0 +1,282 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++#include ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_on(lvar, cpu) \ ++ do { __local_lock(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } else if (lv->owner == current) { ++ lv->nestcnt++; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_on(lvar, cpu) \ ++ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __this_cpu_read(lvar.flags); \ ++ } while (0) ++ ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ this_cpu_ptr(&var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar); ++ ++#define get_locked_ptr(lvar, var) \ ++ ({ \ ++ local_lock(lvar); \ ++ this_cpu_ptr(var); \ ++ }) ++ ++#define put_locked_ptr(lvar, var) local_unlock(lvar); ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ preempt_disable(); \ ++ 1; \ ++ }) ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_lock_irq_on(lvar, cpu) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_unlock_irq_on(lvar, cpu) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++#define get_locked_ptr(lvar, var) get_cpu_ptr(var) ++#define put_locked_ptr(lvar, var) put_cpu_ptr(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index f2142a5ea..d0ff067ec 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -503,6 +504,9 @@ struct mm_struct { + bool tlb_flush_batched; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + #ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; + #endif +diff --git a/include/linux/mutex.h b/include/linux/mutex.h +index 8f7cdf83f..6aa217c6e 100644 +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h +@@ -22,6 +22,17 @@ + + struct ww_acquire_ctx; + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -118,13 +129,6 @@ do { \ + __mutex_init((mutex), #mutex, &__key); \ + } while (0) + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .owner = ATOMIC_LONG_INIT(0) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -229,4 +233,6 @@ mutex_trylock_recursive(struct mutex *lock) + return mutex_trylock(lock); + } + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_MUTEX_H */ +diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h +new file mode 100644 +index 000000000..3fcb5edb1 +--- /dev/null ++++ b/include/linux/mutex_rt.h +@@ -0,0 +1,130 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include ++ ++/* FIXME: Just for __lockfunc */ ++#include ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern void __lockfunc _mutex_lock_io(struct mutex *lock); ++extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_lock_io(l) _mutex_lock_io(l); ++ ++#define __mutex_owner(l) ((l)->lock.owner) ++ ++#ifdef CONFIG_DEBUG_MUTEXES ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++#else ++static inline void mutex_destroy(struct mutex *lock) {} ++#endif ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++# define mutex_lock_io_nested(l, s) _mutex_lock_io(l) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++/** ++ * These values are chosen such that FAIL and SUCCESS match the ++ * values of the regular mutex_trylock(). ++ */ ++enum mutex_trylock_recursive_enum { ++ MUTEX_TRYLOCK_FAILED = 0, ++ MUTEX_TRYLOCK_SUCCESS = 1, ++ MUTEX_TRYLOCK_RECURSIVE, ++}; ++/** ++ * mutex_trylock_recursive - trylock variant that allows recursive locking ++ * @lock: mutex to be locked ++ * ++ * This function should not be used, _ever_. It is purely for hysterical GEM ++ * raisins, and once those are gone this will be removed. ++ * ++ * Returns: ++ * MUTEX_TRYLOCK_FAILED - trylock failed, ++ * MUTEX_TRYLOCK_SUCCESS - lock acquired, ++ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. ++ */ ++int __rt_mutex_owner_current(struct rt_mutex *lock); ++ ++static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum ++mutex_trylock_recursive(struct mutex *lock) ++{ ++ if (unlikely(__rt_mutex_owner_current(&lock->lock))) ++ return MUTEX_TRYLOCK_RECURSIVE; ++ ++ return mutex_trylock(lock); ++} ++ ++extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); ++ ++#endif +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 9a7d7e630..1600f4dc5 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -435,7 +435,19 @@ typedef enum rx_handler_result rx_handler_result_t; + typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); + + void __napi_schedule(struct napi_struct *n); ++ ++/* ++ * When PREEMPT_RT_FULL is defined, all device interrupt handlers ++ * run as threads, and they can also be preempted (without PREEMPT_RT ++ * interrupt threads can not be preempted). Which means that calling ++ * __napi_schedule_irqoff() from an interrupt handler can be preempted ++ * and can corrupt the napi->poll_list. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define __napi_schedule_irqoff(n) __napi_schedule(n) ++#else + void __napi_schedule_irqoff(struct napi_struct *n); ++#endif + + static inline bool napi_disable_pending(struct napi_struct *n) + { +@@ -600,7 +612,11 @@ struct netdev_queue { + * write-mostly part + */ + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct task_struct *xmit_lock_owner; ++#else + int xmit_lock_owner; ++#endif + /* + * Time (in jiffies) of last Tx + */ +@@ -3110,6 +3126,7 @@ struct softnet_data { + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + + }; + +@@ -3128,14 +3145,38 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, + #endif + } + ++#define XMIT_RECURSION_LIMIT 8 + DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int dev_recursion_level(void) ++{ ++ return current->xmit_recursion; ++} ++ ++static inline bool dev_xmit_recursion(void) ++{ ++ return unlikely(current->xmit_recursion > ++ XMIT_RECURSION_LIMIT); ++} ++ ++static inline void dev_xmit_recursion_inc(void) ++{ ++ current->xmit_recursion++; ++} ++ ++static inline void dev_xmit_recursion_dec(void) ++{ ++ current->xmit_recursion--; ++} ++ ++#else ++ + static inline int dev_recursion_level(void) + { + return this_cpu_read(softnet_data.xmit.recursion); + } + +-#define XMIT_RECURSION_LIMIT 8 + static inline bool dev_xmit_recursion(void) + { + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > +@@ -3151,6 +3192,7 @@ static inline void dev_xmit_recursion_dec(void) + { + __this_cpu_dec(softnet_data.xmit.recursion); + } ++#endif + + void __netif_schedule(struct Qdisc *q); + void netif_schedule_queue(struct netdev_queue *txq); +@@ -3954,13 +3996,52 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) + return (1U << debug_value) - 1; + } + +-static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) ++{ ++ WRITE_ONCE(txq->xmit_lock_owner, current); ++} ++ ++static inline void netdev_queue_clear_owner(struct netdev_queue *txq) ++{ ++ WRITE_ONCE(txq->xmit_lock_owner, NULL); ++} ++ ++static inline bool netdev_queue_has_owner(struct netdev_queue *txq) ++{ ++ if (READ_ONCE(txq->xmit_lock_owner) != NULL) ++ return true; ++ return false; ++} ++ ++#else ++ ++static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) + { +- spin_lock(&txq->_xmit_lock); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); + } + ++static inline void netdev_queue_clear_owner(struct netdev_queue *txq) ++{ ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, -1); ++} ++ ++static inline bool netdev_queue_has_owner(struct netdev_queue *txq) ++{ ++ if (READ_ONCE(txq->xmit_lock_owner != -1)) ++ return true; ++ return false; ++} ++#endif ++ ++static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) ++{ ++ spin_lock(&txq->_xmit_lock); ++ netdev_queue_set_owner(txq, cpu); ++} ++ + static inline bool __netif_tx_acquire(struct netdev_queue *txq) + { + __acquire(&txq->_xmit_lock); +@@ -3975,8 +4056,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq) + static inline void __netif_tx_lock_bh(struct netdev_queue *txq) + { + spin_lock_bh(&txq->_xmit_lock); +- /* Pairs with READ_ONCE() in __dev_queue_xmit() */ +- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); ++ netdev_queue_set_owner(txq, smp_processor_id()); + } + + static inline bool __netif_tx_trylock(struct netdev_queue *txq) +@@ -3984,29 +4064,26 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq) + bool ok = spin_trylock(&txq->_xmit_lock); + + if (likely(ok)) { +- /* Pairs with READ_ONCE() in __dev_queue_xmit() */ +- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); ++ netdev_queue_set_owner(txq, smp_processor_id()); + } + return ok; + } + + static inline void __netif_tx_unlock(struct netdev_queue *txq) + { +- /* Pairs with READ_ONCE() in __dev_queue_xmit() */ +- WRITE_ONCE(txq->xmit_lock_owner, -1); ++ netdev_queue_clear_owner(txq); + spin_unlock(&txq->_xmit_lock); + } + + static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) + { +- /* Pairs with READ_ONCE() in __dev_queue_xmit() */ +- WRITE_ONCE(txq->xmit_lock_owner, -1); ++ netdev_queue_clear_owner(txq); + spin_unlock_bh(&txq->_xmit_lock); + } + + static inline void txq_trans_update(struct netdev_queue *txq) + { +- if (txq->xmit_lock_owner != -1) ++ if (netdev_queue_has_owner(txq)) + txq->trans_start = jiffies; + } + +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h +index 0ade4d1e4..3e21ce64c 100644 +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + + /* Test a struct->invflags and a boolean for inequality */ +@@ -345,6 +346,8 @@ void xt_free_table_info(struct xt_table_info *info); + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++ + /* xt_tee_enabled - true if x_tables needs to handle reentrancy + * + * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. +@@ -365,6 +368,9 @@ static inline unsigned int xt_write_recseq_begin(void) + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); ++ + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -395,6 +401,7 @@ static inline void xt_write_recseq_end(unsigned int addend) + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index 798dee258..5a337a14b 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -164,7 +164,11 @@ struct nfs_inode { + + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct semaphore rmdir_sem; ++#else + struct rw_semaphore rmdir_sem; ++#endif + struct mutex commit_mutex; + + #if IS_ENABLED(CONFIG_NFS_V4) +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h +index 62cf39e74..db4c05957 100644 +--- a/include/linux/nfs_xdr.h ++++ b/include/linux/nfs_xdr.h +@@ -1551,7 +1551,7 @@ struct nfs_unlinkdata { + struct nfs_removeargs args; + struct nfs_removeres res; + struct dentry *dentry; +- wait_queue_head_t wq; ++ struct swait_queue_head wq; + struct rpc_cred *cred; + struct nfs_fattr dir_attr; + long timeout; +diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h +index 78abe15f7..9d48079be 100644 +--- a/include/linux/percpu-rwsem.h ++++ b/include/linux/percpu-rwsem.h +@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ + extern int __percpu_down_read(struct percpu_rw_semaphore *, int); + extern void __percpu_up_read(struct percpu_rw_semaphore *); + +-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) ++static inline void percpu_down_read(struct percpu_rw_semaphore *sem) + { + might_sleep(); + +@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * + this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + __percpu_down_read(sem, false); /* Unconditional memory barrier */ +- barrier(); + /* +- * The barrier() prevents the compiler from ++ * The preempt_enable() prevents the compiler from + * bleeding the critical section out. + */ +-} +- +-static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +-{ +- percpu_down_read_preempt_disable(sem); + preempt_enable(); + } + +@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) + return ret; + } + +-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) ++static inline void percpu_up_read(struct percpu_rw_semaphore *sem) + { +- /* +- * The barrier() prevents the compiler from +- * bleeding the critical section out. +- */ +- barrier(); ++ preempt_disable(); + /* + * Same as in percpu_down_read(). + */ +@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); + } + +-static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +-{ +- preempt_disable(); +- percpu_up_read_preempt_enable(sem); +-} +- + extern void percpu_down_write(struct percpu_rw_semaphore *); + extern void percpu_up_write(struct percpu_rw_semaphore *); + +diff --git a/include/linux/percpu.h b/include/linux/percpu.h +index 70b7123f3..24421bf8c 100644 +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h +@@ -19,6 +19,35 @@ + #define PERCPU_MODULE_RESERVE 0 + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++#define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(&var); })) ++ ++#define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++#else ++ ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) ++ ++#endif ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +diff --git a/include/linux/pid.h b/include/linux/pid.h +index 14a9a39da..a9026a5da 100644 +--- a/include/linux/pid.h ++++ b/include/linux/pid.h +@@ -3,6 +3,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { +diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h +index ee7e987ea..3e6c91bdf 100644 +--- a/include/linux/posix-timers.h ++++ b/include/linux/posix-timers.h +@@ -15,6 +15,7 @@ struct cpu_timer_list { + u64 expires, incr; + struct task_struct *task; + int firing; ++ int firing_cpu; + }; + + /* +@@ -114,8 +115,8 @@ struct k_itimer { + struct { + struct alarm alarmtimer; + } alarm; +- struct rcu_head rcu; + } it; ++ struct rcu_head rcu; + }; + + void run_posix_cpu_timers(struct task_struct *task); +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index f10333a2b..d71f2111b 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -51,7 +51,11 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + /* We use the MSB mostly because its available */ + #define PREEMPT_NEED_RESCHED 0x80000000 +@@ -81,9 +85,15 @@ + #include + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() ((unsigned long)current->softirq_nestcnt) ++extern int in_serving_softirq(void); ++#endif + + /* + * Are we doing bottom half or hardware interrupt processing? +@@ -101,7 +111,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + #define in_nmi() (preempt_count() & NMI_MASK) + #define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) +@@ -118,7 +127,11 @@ + /* + * The preempt_count offset after spin_lock() + */ ++#if !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET ++#else ++#define PREEMPT_LOCK_OFFSET 0 ++#endif + + /* + * The preempt_count offset needed for things like: +@@ -167,6 +180,20 @@ extern void preempt_count_sub(int val); + #define preempt_count_inc() preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif ++ + #ifdef CONFIG_PREEMPT_COUNT + + #define preempt_disable() \ +@@ -175,16 +202,53 @@ do { \ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ + preempt_count_dec(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); ++#endif + + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) + ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ ++extern void migrate_disable(void); ++extern void migrate_enable(void); ++ ++int __migrate_disabled(struct task_struct *p); ++ ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ ++extern void migrate_disable(void); ++extern void migrate_enable(void); ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++ return 0; ++} ++ ++#else ++#define migrate_disable() preempt_disable() ++#define migrate_enable() preempt_enable() ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++ return 0; ++} ++#endif ++ + #ifdef CONFIG_PREEMPT + #define preempt_enable() \ + do { \ +@@ -206,6 +270,13 @@ do { \ + __preempt_schedule(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) ++ + #else /* !CONFIG_PREEMPT */ + #define preempt_enable() \ + do { \ +@@ -213,6 +284,12 @@ do { \ + preempt_count_dec(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define preempt_enable_notrace() \ + do { \ + barrier(); \ +@@ -251,8 +328,16 @@ do { \ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + #define preemptible() 0 + ++#define migrate_disable() barrier() ++#define migrate_enable() barrier() ++ ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++ return 0; ++} + #endif /* CONFIG_PREEMPT_COUNT */ + + #ifdef MODULE +@@ -271,10 +356,22 @@ do { \ + } while (0) + #define preempt_fold_need_resched() \ + do { \ +- if (tif_need_resched()) \ ++ if (tif_need_resched_now()) \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; +diff --git a/include/linux/printk.h b/include/linux/printk.h +index 895a46da8..6541a4909 100644 +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -140,9 +140,11 @@ struct va_format { + #ifdef CONFIG_EARLY_PRINTK + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + #ifdef CONFIG_PRINTK_NMI +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h +index 34149e8b5..affb0fc4c 100644 +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -330,6 +330,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); ++void radix_tree_preload_end(void); ++ + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *, + unsigned long index, unsigned int tag); +@@ -349,11 +351,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + unsigned int max_items, unsigned int tag); + int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); + +-static inline void radix_tree_preload_end(void) +-{ +- preempt_enable(); +-} +- + int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); + int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); +diff --git a/include/linux/random.h b/include/linux/random.h +index 37209b3b2..c4342f765 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -32,7 +32,7 @@ static inline void add_latent_entropy(void) {} + + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy; + + extern void get_random_bytes(void *buf, int nbytes); + extern int wait_for_random_bytes(void); +diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h +index fcbeed405..2aa2aec35 100644 +--- a/include/linux/rbtree.h ++++ b/include/linux/rbtree.h +@@ -31,7 +31,7 @@ + + #include + #include +-#include ++#include + + struct rb_node { + unsigned long __rb_parent_color; +diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h +new file mode 100644 +index 000000000..7066962a4 +--- /dev/null ++++ b/include/linux/rcu_assign_pointer.h +@@ -0,0 +1,54 @@ ++#ifndef __LINUX_RCU_ASSIGN_POINTER_H__ ++#define __LINUX_RCU_ASSIGN_POINTER_H__ ++#include ++#include ++ ++/** ++ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable ++ * @v: The value to statically initialize with. ++ */ ++#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) ++ ++/** ++ * rcu_assign_pointer() - assign to RCU-protected pointer ++ * @p: pointer to assign to ++ * @v: value to assign (publish) ++ * ++ * Assigns the specified value to the specified RCU-protected ++ * pointer, ensuring that any concurrent RCU readers will see ++ * any prior initialization. ++ * ++ * Inserts memory barriers on architectures that require them ++ * (which is most of them), and also prevents the compiler from ++ * reordering the code that initializes the structure after the pointer ++ * assignment. More importantly, this call documents which pointers ++ * will be dereferenced by RCU read-side code. ++ * ++ * In some special cases, you may use RCU_INIT_POINTER() instead ++ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due ++ * to the fact that it does not constrain either the CPU or the compiler. ++ * That said, using RCU_INIT_POINTER() when you should have used ++ * rcu_assign_pointer() is a very bad thing that results in ++ * impossible-to-diagnose memory corruption. So please be careful. ++ * See the RCU_INIT_POINTER() comment header for details. ++ * ++ * Note that rcu_assign_pointer() evaluates each of its arguments only ++ * once, appearances notwithstanding. One of the "extra" evaluations ++ * is in typeof() and the other visible only to sparse (__CHECKER__), ++ * neither of which actually execute the argument. As with most cpp ++ * macros, this execute-arguments-only-once property is important, so ++ * please be careful when making changes to rcu_assign_pointer() and the ++ * other macros that it invokes. ++ */ ++#define rcu_assign_pointer(p, v) \ ++({ \ ++ uintptr_t _r_a_p__v = (uintptr_t)(v); \ ++ \ ++ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ ++ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ ++ else \ ++ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ ++ _r_a_p__v; \ ++}) ++ ++#endif +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 68cbe1114..08d64e571 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) + #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) +@@ -55,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); + #define call_rcu call_rcu_sched + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); ++#endif + void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); + void synchronize_sched(void); + void rcu_barrier_tasks(void); +@@ -73,6 +78,11 @@ void synchronize_rcu(void); + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -96,6 +106,8 @@ static inline int rcu_preempt_depth(void) + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +@@ -253,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map; + extern struct lockdep_map rcu_callback_map; + int debug_lockdep_rcu_enabled(void); + int rcu_read_lock_held(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + int rcu_read_lock_bh_held(void); ++#endif + int rcu_read_lock_sched_held(void); + + #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +@@ -362,54 +381,6 @@ static inline void rcu_preempt_sleep_check(void) { } + ((typeof(*p) __force __kernel *)(________p1)); \ + }) + +-/** +- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable +- * @v: The value to statically initialize with. +- */ +-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) +- +-/** +- * rcu_assign_pointer() - assign to RCU-protected pointer +- * @p: pointer to assign to +- * @v: value to assign (publish) +- * +- * Assigns the specified value to the specified RCU-protected +- * pointer, ensuring that any concurrent RCU readers will see +- * any prior initialization. +- * +- * Inserts memory barriers on architectures that require them +- * (which is most of them), and also prevents the compiler from +- * reordering the code that initializes the structure after the pointer +- * assignment. More importantly, this call documents which pointers +- * will be dereferenced by RCU read-side code. +- * +- * In some special cases, you may use RCU_INIT_POINTER() instead +- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due +- * to the fact that it does not constrain either the CPU or the compiler. +- * That said, using RCU_INIT_POINTER() when you should have used +- * rcu_assign_pointer() is a very bad thing that results in +- * impossible-to-diagnose memory corruption. So please be careful. +- * See the RCU_INIT_POINTER() comment header for details. +- * +- * Note that rcu_assign_pointer() evaluates each of its arguments only +- * once, appearances notwithstanding. One of the "extra" evaluations +- * is in typeof() and the other visible only to sparse (__CHECKER__), +- * neither of which actually execute the argument. As with most cpp +- * macros, this execute-arguments-only-once property is important, so +- * please be careful when making changes to rcu_assign_pointer() and the +- * other macros that it invokes. +- */ +-#define rcu_assign_pointer(p, v) \ +-({ \ +- uintptr_t _r_a_p__v = (uintptr_t)(v); \ +- \ +- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ +- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ +- else \ +- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ +- _r_a_p__v; \ +-}) +- + /** + * rcu_swap_protected() - swap an RCU and a regular pointer + * @rcu_ptr: RCU pointer +@@ -701,10 +672,14 @@ static inline void rcu_read_unlock(void) + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -714,10 +689,14 @@ static inline void rcu_read_lock_bh(void) + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h +index 914655848..462ce061b 100644 +--- a/include/linux/rcutree.h ++++ b/include/linux/rcutree.h +@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) + rcu_note_context_switch(false); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + void synchronize_rcu_bh(void); ++#endif + void synchronize_sched_expedited(void); + void synchronize_rcu_expedited(void); + +@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) + } + + void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + void rcu_barrier_bh(void); ++#endif + void rcu_barrier_sched(void); + bool rcu_eqs_special_set(int cpu); + unsigned long get_state_synchronize_rcu(void); +diff --git a/include/linux/reservation.h b/include/linux/reservation.h +index 02166e815..0b31df1af 100644 +--- a/include/linux/reservation.h ++++ b/include/linux/reservation.h +@@ -72,7 +72,7 @@ struct reservation_object_list { + */ + struct reservation_object { + struct ww_mutex lock; +- seqcount_t seq; ++ seqlock_t seq; + + struct dma_fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; +@@ -92,7 +92,7 @@ reservation_object_init(struct reservation_object *obj) + { + ww_mutex_init(&obj->lock, &reservation_ww_class); + +- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); ++ seqlock_init(&obj->seq); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); + obj->staged = NULL; +diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h +index 6fd615a0e..138bd1e18 100644 +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h +@@ -14,11 +14,15 @@ + #define __LINUX_RT_MUTEX_H + + #include ++#include + #include +-#include + + extern int max_lock_depth; /* for sysctl */ + ++#ifdef CONFIG_DEBUG_MUTEXES ++#include ++#endif ++ + /** + * The rt_mutex structure + * +@@ -31,8 +35,8 @@ struct rt_mutex { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; ++#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *name, *file; + int line; + void *magic; +@@ -82,16 +86,23 @@ do { \ + #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT_CACHED \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ +- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) + ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } ++ + /** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried +@@ -115,6 +126,7 @@ extern void rt_mutex_lock(struct rt_mutex *lock); + #endif + + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + +diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h +new file mode 100644 +index 000000000..a9c4c2ac4 +--- /dev/null ++++ b/include/linux/rwlock_rt.h +@@ -0,0 +1,119 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern int __lockfunc rt_read_can_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_can_lock(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_can_lock(rwlock) rt_read_can_lock(rwlock) ++#define write_can_lock(rwlock) rt_write_can_lock(rwlock) ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags) ++{ ++ /* XXX ARCH_IRQ_ENABLED */ ++ *flags = 0; ++ return rt_write_trylock(lock); ++} ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags))) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ rt_read_lock(lock); \ ++ flags = 0; \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ rt_write_lock(lock); \ ++ flags = 0; \ ++ } while (0) ++ ++#define read_lock(lock) rt_read_lock(lock) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) rt_write_lock(lock) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) rt_read_unlock(lock) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) rt_write_unlock(lock) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ } while (0) ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++/* ++ * Internal functions made global for CPU pinning ++ */ ++void __read_rt_lock(struct rt_rw_lock *lock); ++int __read_rt_trylock(struct rt_rw_lock *lock); ++void __write_rt_lock(struct rt_rw_lock *lock); ++int __write_rt_trylock(struct rt_rw_lock *lock); ++void __read_rt_unlock(struct rt_rw_lock *lock); ++void __write_rt_unlock(struct rt_rw_lock *lock); ++ ++#endif +diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h +index 857a72ceb..c21683f3e 100644 +--- a/include/linux/rwlock_types.h ++++ b/include/linux/rwlock_types.h +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h +new file mode 100644 +index 000000000..546a1f8f1 +--- /dev/null ++++ b/include/linux/rwlock_types_rt.h +@@ -0,0 +1,55 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++typedef struct rt_rw_lock rwlock_t; ++ ++#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name = __RW_LOCK_UNLOCKED(name) ++ ++/* ++ * A reader biased implementation primarily for CPU pinning. ++ * ++ * Can be selected as general replacement for the single reader RT rwlock ++ * variant ++ */ ++struct rt_rw_lock { ++ struct rt_mutex rtmutex; ++ atomic_t readers; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define READER_BIAS (1U << 31) ++#define WRITER_BIAS (1U << 30) ++ ++#define __RWLOCK_RT_INITIALIZER(name) \ ++{ \ ++ .readers = ATOMIC_INIT(READER_BIAS), \ ++ .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \ ++ RW_DEP_MAP_INIT(name) \ ++} ++ ++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, ++ struct lock_class_key *key); ++ ++#define rwlock_biased_rt_init(rwlock) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \ ++ } while (0) ++ ++#endif +diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h +index ab93b6eae..b1e32373f 100644 +--- a/include/linux/rwsem.h ++++ b/include/linux/rwsem.h +@@ -20,6 +20,10 @@ + #include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) + return !list_empty(&sem->wait_list); + } + ++#endif /* !PREEMPT_RT_FULL */ ++ ++/* ++ * The functions below are the same for all rwsem implementations including ++ * the RT specific variant. ++ */ ++ + /* + * lock for reading + */ +diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h +new file mode 100644 +index 000000000..3fb092b7b +--- /dev/null ++++ b/include/linux/rwsem_rt.h +@@ -0,0 +1,69 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++#include ++#include ++ ++#define READER_BIAS (1U << 31) ++#define WRITER_BIAS (1U << 30) ++ ++struct rw_semaphore { ++ atomic_t readers; ++ struct rt_mutex rtmutex; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++{ \ ++ .readers = ATOMIC_INIT(READER_BIAS), \ ++ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \ ++ RW_DEP_MAP_INIT(name) \ ++} ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); ++ ++#define __init_rwsem(sem, name, key) \ ++do { \ ++ rt_mutex_init(&(sem)->rtmutex); \ ++ __rwsem_init((sem), (name), (key)); \ ++} while (0) ++ ++#define init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_rwsem((sem), #sem, &__key); \ ++} while (0) ++ ++static inline int rwsem_is_locked(struct rw_semaphore *sem) ++{ ++ return atomic_read(&sem->readers) != READER_BIAS; ++} ++ ++static inline int rwsem_is_contended(struct rw_semaphore *sem) ++{ ++ return atomic_read(&sem->readers) > 0; ++} ++ ++extern void __down_read(struct rw_semaphore *sem); ++extern int __down_read_interruptible(struct rw_semaphore *sem); ++extern int __down_read_killable(struct rw_semaphore *sem); ++extern int __down_read_trylock(struct rw_semaphore *sem); ++extern void __down_write(struct rw_semaphore *sem); ++extern int __must_check __down_write_killable(struct rw_semaphore *sem); ++extern int __down_write_trylock(struct rw_semaphore *sem); ++extern void __up_read(struct rw_semaphore *sem); ++extern void __up_write(struct rw_semaphore *sem); ++extern void __downgrade_write(struct rw_semaphore *sem); ++ ++#endif +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 0202fe007..b11ac5d80 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + /* task_struct member predeclarations (sorted alphabetically): */ + struct audit_context; +@@ -102,12 +103,8 @@ struct task_group; + __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ + TASK_PARKED) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) +- + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +- + #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) +@@ -135,6 +132,9 @@ struct task_group; + smp_store_mb(current->state, (state_value)); \ + } while (0) + ++#define __set_current_state_no_track(state_value) \ ++ current->state = (state_value); ++ + #define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ +@@ -144,6 +144,7 @@ struct task_group; + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) ++ + #else + /* + * set_current_state() includes a barrier so that the write of current->state +@@ -188,6 +189,9 @@ struct task_group; + #define set_current_state(state_value) \ + smp_store_mb(current->state, (state_value)) + ++#define __set_current_state_no_track(state_value) \ ++ __set_current_state(state_value) ++ + /* + * set_special_state() should be used for those states when the blocking task + * can not use the regular condition based wait-loop. In that case we must +@@ -224,6 +228,8 @@ extern void io_schedule_finish(int token); + extern long io_schedule_timeout(long timeout); + extern void io_schedule(void); + ++int cpu_nr_pinned(int cpu); ++ + /** + * struct prev_cputime - snapshot of system and user cputime + * @utime: time spent in user mode +@@ -613,6 +619,8 @@ struct task_struct { + #endif + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; ++ /* saved state for "spinlock sleepers" */ ++ volatile long saved_state; + + /* + * This begins the randomizable portion of task_struct. Only +@@ -673,7 +681,22 @@ struct task_struct { + + unsigned int policy; + int nr_cpus_allowed; +- cpumask_t cpus_allowed; ++ const cpumask_t *cpus_ptr; ++ cpumask_t cpus_mask; ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ int migrate_disable; ++ bool migrate_disable_scheduled; ++# ifdef CONFIG_SCHED_DEBUG ++ int pinned_on_cpu; ++# endif ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable; ++# endif ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int sleeping_lock; ++#endif + + #ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; +@@ -837,6 +860,9 @@ struct task_struct { + #ifdef CONFIG_POSIX_TIMERS + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + #endif + + /* Process credentials: */ +@@ -881,11 +907,17 @@ struct task_struct { + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; ++ + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; +@@ -910,6 +942,7 @@ struct task_struct { + raw_spinlock_t pi_lock; + + struct wake_q_node wake_q; ++ struct wake_q_node wake_q_sleeper; + + #ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task: */ +@@ -1199,8 +1232,22 @@ struct task_struct { + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++ int softirq_nestcnt; ++ unsigned int softirqs_raised; ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int xmit_recursion; + #endif + int pagefault_disabled; + #ifdef CONFIG_MMU +@@ -1419,6 +1466,7 @@ extern struct pid *cad_pid; + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_IDLE 0x00000002 /* I am an IDLE thread */ + #define PF_EXITING 0x00000004 /* Getting shut down */ + #define PF_RELIABLE 0x00000008 /* Allocate from reliable memory */ +@@ -1443,7 +1491,7 @@ extern struct pid *cad_pid; + #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ + #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ + #define PF_UCE_KERNEL_RECOVERY 0x02000000 /* Task in uce kernel recovery state */ +-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ ++#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ + #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ + #define PF_IO_WORKER 0x20000000 /* Task is an IO worker */ + #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +@@ -1648,6 +1696,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct *tsk); + extern void wake_up_new_task(struct task_struct *tsk); + + #ifdef CONFIG_SMP +@@ -1730,6 +1779,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} ++ ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#endif ++ ++ ++static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++{ ++ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#endif ++ return false; ++} ++ ++static inline bool task_is_stopped_or_traced(struct task_struct *task) ++{ ++ bool traced_stopped; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ traced_stopped = __task_is_stopped_or_traced(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#else ++ traced_stopped = __task_is_stopped_or_traced(task); ++#endif ++ return traced_stopped; ++} ++ ++static inline bool task_is_traced(struct task_struct *task) ++{ ++ bool traced = false; ++ ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; ++} ++ + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +@@ -1782,6 +1914,23 @@ static __always_inline bool need_resched(void) + return unlikely(tif_need_resched()); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline void sleeping_lock_inc(void) ++{ ++ current->sleeping_lock++; ++} ++ ++static inline void sleeping_lock_dec(void) ++{ ++ current->sleeping_lock--; ++} ++ ++#else ++ ++static inline void sleeping_lock_inc(void) { } ++static inline void sleeping_lock_dec(void) { } ++#endif ++ + /* + * Wrappers for p->thread_info->cpu access. No-op on UP. + */ +@@ -1953,6 +2102,8 @@ static inline void rseq_syscall(struct pt_regs *regs) + + #endif + ++extern struct task_struct *takedown_cpu_task; ++ + #ifdef CONFIG_QOS_SCHED + void sched_move_offline_task(struct task_struct *p); + void sched_qos_offline_wait(void); +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h +index bd762e7a2..c671d19ff 100644 +--- a/include/linux/sched/mm.h ++++ b/include/linux/sched/mm.h +@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + void mmdrop(struct mm_struct *mm); + + /* +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h +index c8b52b3ec..9c9832404 100644 +--- a/include/linux/sched/task.h ++++ b/include/linux/sched/task.h +@@ -91,6 +91,21 @@ extern void sched_exec(void); + + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++static inline void put_task_struct_many(struct task_struct *t, int nr) ++{ ++ if (atomic_sub_and_test(nr, &t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++ ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -104,6 +119,7 @@ static inline void put_task_struct_many(struct task_struct *t, int nr) + if (atomic_sub_and_test(nr, &t->usage)) + __put_task_struct(t); + } ++#endif + + void put_task_struct_rcu_user(struct task_struct *task); + +diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h +index 545f37138..e33edee35 100644 +--- a/include/linux/sched/wake_q.h ++++ b/include/linux/sched/wake_q.h +@@ -51,8 +51,29 @@ static inline void wake_q_init(struct wake_q_head *head) + head->lastp = &head->first; + } + +-extern void wake_q_add(struct wake_q_head *head, +- struct task_struct *task); +-extern void wake_up_q(struct wake_q_head *head); ++extern void __wake_q_add(struct wake_q_head *head, ++ struct task_struct *task, bool sleeper); ++static inline void wake_q_add(struct wake_q_head *head, ++ struct task_struct *task) ++{ ++ __wake_q_add(head, task, false); ++} ++ ++static inline void wake_q_add_sleeper(struct wake_q_head *head, ++ struct task_struct *task) ++{ ++ __wake_q_add(head, task, true); ++} ++ ++extern void __wake_up_q(struct wake_q_head *head, bool sleeper); ++static inline void wake_up_q(struct wake_q_head *head) ++{ ++ __wake_up_q(head, false); ++} ++ ++static inline void wake_up_q_sleeper(struct wake_q_head *head) ++{ ++ __wake_up_q(head, true); ++} + + #endif /* _LINUX_SCHED_WAKE_Q_H */ +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h +index bcf4cf26b..58f9909d6 100644 +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); ++} ++ ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) ++{ ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t +@@ -428,10 +438,33 @@ typedef struct { + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = READ_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_unlock_wait(&sl->lock); ++ goto repeat; ++ } ++ smp_rmb(); ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -446,36 +479,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline int try_write_seqlock(seqlock_t *sl) ++{ ++ if (spin_trylock(&sl->lock)) { ++ __raw_write_seqcount_begin(&sl->seqcount); ++ return 1; ++ } ++ return 0; + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -484,7 +526,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -494,7 +536,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +diff --git a/include/linux/signal.h b/include/linux/signal.h +index 0be5ce237..6495fda18 100644 +--- a/include/linux/signal.h ++++ b/include/linux/signal.h +@@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig) + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index e9f100bd7..1423c8487 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -287,6 +287,7 @@ struct sk_buff_head { + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1735,6 +1736,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +diff --git a/include/linux/smp.h b/include/linux/smp.h +index 9fb239e12..5801e516b 100644 +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void) + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: +diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h +index e089157dc..5f5ad0630 100644 +--- a/include/linux/spinlock.h ++++ b/include/linux/spinlock.h +@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) + }) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -429,6 +437,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock) + + #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) + ++#endif /* !PREEMPT_RT_FULL */ ++ + /* + * Pull the atomic_t declaration: + * (asm-mips/atomic.h needs above definitions) +diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h +index 42dfab89e..29d99ae5a 100644 +--- a/include/linux/spinlock_api_smp.h ++++ b/include/linux/spinlock_api_smp.h +@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h +new file mode 100644 +index 000000000..3696a77fa +--- /dev/null ++++ b/include/linux/spinlock_rt.h +@@ -0,0 +1,156 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#include ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ * Migrate disable handling must be done at the call site. ++ */ ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++ ++#define spin_lock(lock) rt_spin_lock(lock) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ __locked = spin_do_trylock(lock); \ ++ __locked; \ ++}) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++#define spin_lock_bh_nested(lock, subclass) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) rt_spin_unlock(lock) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#endif +diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h +index 24b4e6f2c..10bac715e 100644 +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h +@@ -9,77 +9,15 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif +- +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif ++#include + +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include + #else +-# define SPIN_DEBUG_INIT(lockname) ++# include ++# include ++# include + #endif + +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +- +-#include +- + #endif /* __LINUX_SPINLOCK_TYPES_H */ +diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h +new file mode 100644 +index 000000000..f1dac1fb1 +--- /dev/null ++++ b/include/linux/spinlock_types_nort.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h +new file mode 100644 +index 000000000..822bf64a6 +--- /dev/null ++++ b/include/linux/spinlock_types_raw.h +@@ -0,0 +1,55 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#include ++ ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h +new file mode 100644 +index 000000000..3e3d8c5f7 +--- /dev/null ++++ b/include/linux/spinlock_types_rt.h +@@ -0,0 +1,48 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#include ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif +diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h +index c09b6407a..b0243ba07 100644 +--- a/include/linux/spinlock_types_up.h ++++ b/include/linux/spinlock_types_up.h +@@ -1,10 +1,6 @@ + #ifndef __LINUX_SPINLOCK_TYPES_UP_H + #define __LINUX_SPINLOCK_TYPES_UP_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H +-# error "please don't include this file directly" +-#endif +- + /* + * include/linux/spinlock_types_up.h - spinlock type definitions for UP + * +diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h +index 6d3635c86..82fc686dd 100644 +--- a/include/linux/stop_machine.h ++++ b/include/linux/stop_machine.h +@@ -26,6 +26,8 @@ struct cpu_stop_work { + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; ++ /* Did not run due to disabled stopper; for nowait debug checks */ ++ bool disabled; + }; + + int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); +diff --git a/include/linux/suspend.h b/include/linux/suspend.h +index aff248925..87c21a106 100644 +--- a/include/linux/suspend.h ++++ b/include/linux/suspend.h +@@ -196,6 +196,12 @@ struct platform_s2idle_ops { + void (*end)(void); + }; + ++#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) ++extern bool pm_in_action; ++#else ++# define pm_in_action false ++#endif ++ + #ifdef CONFIG_SUSPEND + extern suspend_state_t mem_sleep_current; + extern suspend_state_t mem_sleep_default; +diff --git a/include/linux/swait.h b/include/linux/swait.h +index 73e06e998..21ae66cd4 100644 +--- a/include/linux/swait.h ++++ b/include/linux/swait.h +@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) + extern void swake_up_one(struct swait_queue_head *q); + extern void swake_up_all(struct swait_queue_head *q); + extern void swake_up_locked(struct swait_queue_head *q); ++extern void swake_up_all_locked(struct swait_queue_head *q); + ++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); + extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); + extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); + +@@ -297,4 +299,18 @@ do { \ + __ret; \ + }) + ++#define __swait_event_lock_irq(wq, condition, lock, cmd) \ ++ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ ++ raw_spin_unlock_irq(&lock); \ ++ cmd; \ ++ schedule(); \ ++ raw_spin_lock_irq(&lock)) ++ ++#define swait_event_lock_irq(wq_head, condition, lock) \ ++ do { \ ++ if (condition) \ ++ break; \ ++ __swait_event_lock_irq(wq_head, condition, lock, ); \ ++ } while (0) ++ + #endif /* _LINUX_SWAIT_H */ +diff --git a/include/linux/swap.h b/include/linux/swap.h +index 959a2e381..d625689b1 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + + struct notifier_block; +@@ -354,6 +355,7 @@ extern unsigned long nr_free_pagecache_pages(void); + + + /* linux/mm/swap.c */ ++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); + extern void lru_cache_add(struct page *); + extern void lru_cache_add_anon(struct page *page); + extern void lru_cache_add_file(struct page *page); +diff --git a/include/linux/swork.h b/include/linux/swork.h +new file mode 100644 +index 000000000..f175fa9a6 +--- /dev/null ++++ b/include/linux/swork.h +@@ -0,0 +1,24 @@ ++#ifndef _LINUX_SWORK_H ++#define _LINUX_SWORK_H ++ ++#include ++ ++struct swork_event { ++ struct list_head item; ++ unsigned long flags; ++ void (*func)(struct swork_event *); ++}; ++ ++static inline void INIT_SWORK(struct swork_event *event, ++ void (*func)(struct swork_event *)) ++{ ++ event->flags = 0; ++ event->func = func; ++} ++ ++bool swork_queue(struct swork_event *sev); ++ ++int swork_get(void); ++void swork_put(void); ++ ++#endif /* _LINUX_SWORK_H */ +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index e22fdce95..ad373939e 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -37,7 +37,17 @@ static inline long set_restart_fn(struct restart_block *restart, + + #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) + +-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#ifdef CONFIG_PREEMPT_LAZY ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++ ++#else ++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_lazy() 0 ++#endif + + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES + static inline int arch_within_stack_frames(const void * const stack, +diff --git a/include/linux/timer.h b/include/linux/timer.h +index 4759f2d94..d350bac62 100644 +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -178,7 +178,7 @@ extern void add_timer(struct timer_list *timer); + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h +index 133cfea84..49f04d7a9 100644 +--- a/include/linux/trace_events.h ++++ b/include/linux/trace_events.h +@@ -62,6 +62,8 @@ struct trace_entry { + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned char migrate_disable; ++ unsigned char preempt_lazy_count; + }; + + #define TRACE_EVENT_TYPE_MAX \ +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index a0061e018..77d48dd75 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -182,6 +182,7 @@ static __always_inline void pagefault_disabled_dec(void) + */ + static inline void pagefault_disable(void) + { ++ migrate_disable(); + pagefault_disabled_inc(); + /* + * make sure to have issued the store before a pagefault +@@ -198,6 +199,7 @@ static inline void pagefault_enable(void) + */ + barrier(); + pagefault_disabled_dec(); ++ migrate_enable(); + } + + /* +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index 0beaea0e5..d521f42bf 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -55,7 +55,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); + */ + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + raw_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -65,7 +67,9 @@ static inline void count_vm_event(enum vm_event_item item) + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + raw_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +diff --git a/include/linux/wait.h b/include/linux/wait.h +index 60a62d3ad..6f33ccf6b 100644 +--- a/include/linux/wait.h ++++ b/include/linux/wait.h +@@ -10,6 +10,7 @@ + + #include + #include ++#include + + typedef struct wait_queue_entry wait_queue_entry_t; + +@@ -527,8 +528,8 @@ do { \ + int __ret = 0; \ + struct hrtimer_sleeper __t; \ + \ +- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ +- hrtimer_init_sleeper(&__t, current); \ ++ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \ ++ current); \ + if ((timeout) != KTIME_MAX) \ + hrtimer_start_range_ns(&__t.timer, timeout, \ + current->timer_slack_ns, \ +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index 6f2b042fc..f0e786dc7 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -473,10 +473,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, + + extern void destroy_workqueue(struct workqueue_struct *wq); + +-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); +-void free_workqueue_attrs(struct workqueue_attrs *attrs); +-int apply_workqueue_attrs(struct workqueue_struct *wq, +- const struct workqueue_attrs *attrs); + int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); + + extern bool queue_work_on(int cpu, struct workqueue_struct *wq, +diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h +index 883bb9085..3b593cdeb 100644 +--- a/include/net/gen_stats.h ++++ b/include/net/gen_stats.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + + struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; +@@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, + spinlock_t *lock, struct gnet_dump *d, + int padattr); + +-int gnet_stats_copy_basic(const seqcount_t *running, ++int gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +-void __gnet_stats_copy_basic(const seqcount_t *running, ++void __gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +@@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **rate_est, + spinlock_t *lock, +- seqcount_t *running, struct nlattr *opt); ++ net_seqlock_t *running, struct nlattr *opt); + void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); + int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **ptr, + spinlock_t *lock, +- seqcount_t *running, struct nlattr *opt); ++ net_seqlock_t *running, struct nlattr *opt); + bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); + bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, + struct gnet_stats_rate_est64 *sample); +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index bec7e96a3..18f351426 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -455,7 +455,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int hh_alen = 0; + unsigned int seq; +@@ -497,7 +497,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb + + static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) + { +- const struct hh_cache *hh = &n->hh; ++ struct hh_cache *hh = &n->hh; + + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); +@@ -538,7 +538,7 @@ struct neighbour_cb { + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; +diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h +new file mode 100644 +index 000000000..a7034298a +--- /dev/null ++++ b/include/net/net_seq_lock.h +@@ -0,0 +1,15 @@ ++#ifndef __NET_NET_SEQ_LOCK_H__ ++#define __NET_NET_SEQ_LOCK_H__ ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define net_seqlock_t seqlock_t ++# define net_seq_begin(__r) read_seqbegin(__r) ++# define net_seq_retry(__r, __s) read_seqretry(__r, __s) ++ ++#else ++# define net_seqlock_t seqcount_t ++# define net_seq_begin(__r) read_seqcount_begin(__r) ++# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s) ++#endif ++ ++#endif +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index c9cd5086b..b6328680d 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -100,7 +101,7 @@ struct Qdisc { + struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; + struct qdisc_skb_head q; + struct gnet_stats_basic_packed bstats; +- seqcount_t running; ++ net_seqlock_t running; + struct gnet_stats_queue qstats; + unsigned long state; + struct Qdisc *next_sched; +@@ -121,7 +122,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc) + { + if (qdisc->flags & TCQ_F_NOLOCK) + return spin_is_locked(&qdisc->seqlock); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ return spin_is_locked(&qdisc->running.lock) ? true : false; ++#else + return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; ++#endif + } + + static inline bool qdisc_run_begin(struct Qdisc *qdisc) +@@ -132,17 +137,27 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) + } else if (qdisc_is_running(qdisc)) { + return false; + } ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (try_write_seqlock(&qdisc->running)) ++ return true; ++ return false; ++#else + /* Variant of write_seqcount_begin() telling lockdep a trylock + * was attempted. + */ + raw_write_seqcount_begin(&qdisc->running); + seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); + return true; ++#endif + } + + static inline void qdisc_run_end(struct Qdisc *qdisc) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ write_sequnlock(&qdisc->running); ++#else + write_seqcount_end(&qdisc->running); ++#endif + if (qdisc->flags & TCQ_F_NOLOCK) + spin_unlock(&qdisc->seqlock); + } +@@ -458,7 +473,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) + return qdisc_lock(root); + } + +-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) ++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) + { + struct Qdisc *root = qdisc_root_sleeping(qdisc); + +diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h +new file mode 100644 +index 000000000..657e234b1 +--- /dev/null ++++ b/include/soc/at91/atmel_tcb.h +@@ -0,0 +1,183 @@ ++//SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2018 Microchip */ ++ ++#ifndef __SOC_ATMEL_TCB_H ++#define __SOC_ATMEL_TCB_H ++ ++/* Channel registers */ ++#define ATMEL_TC_COFFS(c) ((c) * 0x40) ++#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c) ++#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4) ++#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8) ++#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc) ++#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10) ++#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14) ++#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18) ++#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c) ++#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20) ++#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24) ++#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28) ++#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c) ++#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30) ++ ++/* Block registers */ ++#define ATMEL_TC_BCR 0xc0 ++#define ATMEL_TC_BMR 0xc4 ++#define ATMEL_TC_QIER 0xc8 ++#define ATMEL_TC_QIDR 0xcc ++#define ATMEL_TC_QIMR 0xd0 ++#define ATMEL_TC_QISR 0xd4 ++#define ATMEL_TC_FMR 0xd8 ++#define ATMEL_TC_WPMR 0xe4 ++ ++/* CCR fields */ ++#define ATMEL_TC_CCR_CLKEN BIT(0) ++#define ATMEL_TC_CCR_CLKDIS BIT(1) ++#define ATMEL_TC_CCR_SWTRG BIT(2) ++ ++/* Common CMR fields */ ++#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0) ++#define ATMEL_TC_CMR_TCLK(x) (x) ++#define ATMEL_TC_CMR_XC(x) ((x) + 5) ++#define ATMEL_TC_CMR_CLKI BIT(3) ++#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4) ++#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4) ++#define ATMEL_TC_CMR_WAVE BIT(15) ++ ++/* Capture mode CMR fields */ ++#define ATMEL_TC_CMR_LDBSTOP BIT(6) ++#define ATMEL_TC_CMR_LDBDIS BIT(7) ++#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8) ++#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8) ++#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8) ++#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8) ++#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8) ++#define ATMEL_TC_CMR_ABETRG BIT(10) ++#define ATMEL_TC_CMR_CPCTRG BIT(14) ++#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16) ++#define ATMEL_TC_CMR_LDRA_NONE (0 << 16) ++#define ATMEL_TC_CMR_LDRA_RISING (1 << 16) ++#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16) ++#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16) ++#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18) ++#define ATMEL_TC_CMR_LDRB_NONE (0 << 18) ++#define ATMEL_TC_CMR_LDRB_RISING (1 << 18) ++#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18) ++#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18) ++#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20) ++#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20) ++ ++/* Waveform mode CMR fields */ ++#define ATMEL_TC_CMR_CPCSTOP BIT(6) ++#define ATMEL_TC_CMR_CPCDIS BIT(7) ++#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8) ++#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8) ++#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8) ++#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8) ++#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8) ++#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10) ++#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10) ++#define ATMEL_TC_CMR_ENETRG BIT(12) ++#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13) ++#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13) ++#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13) ++#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13) ++#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13) ++#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16) ++#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16) ++#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18) ++#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18) ++#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20) ++#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20) ++#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22) ++#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22) ++#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24) ++#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24) ++#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26) ++#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26) ++#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28) ++#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28) ++#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30) ++#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30) ++#define ATMEL_TC_CMR_ACTION_NONE 0 ++#define ATMEL_TC_CMR_ACTION_SET 1 ++#define ATMEL_TC_CMR_ACTION_CLEAR 2 ++#define ATMEL_TC_CMR_ACTION_TOGGLE 3 ++ ++/* SMMR fields */ ++#define ATMEL_TC_SMMR_GCEN BIT(0) ++#define ATMEL_TC_SMMR_DOWN BIT(1) ++ ++/* SR/IER/IDR/IMR fields */ ++#define ATMEL_TC_COVFS BIT(0) ++#define ATMEL_TC_LOVRS BIT(1) ++#define ATMEL_TC_CPAS BIT(2) ++#define ATMEL_TC_CPBS BIT(3) ++#define ATMEL_TC_CPCS BIT(4) ++#define ATMEL_TC_LDRAS BIT(5) ++#define ATMEL_TC_LDRBS BIT(6) ++#define ATMEL_TC_ETRGS BIT(7) ++#define ATMEL_TC_CLKSTA BIT(16) ++#define ATMEL_TC_MTIOA BIT(17) ++#define ATMEL_TC_MTIOB BIT(18) ++ ++/* EMR fields */ ++#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0) ++#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0 ++#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1 ++#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4) ++#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4) ++#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4) ++#define ATMEL_TC_EMR_NOCLKDIV BIT(8) ++ ++/* BCR fields */ ++#define ATMEL_TC_BCR_SYNC BIT(0) ++ ++/* BMR fields */ ++#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2) ++#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c))) ++#define ATMEL_TC_BMR_QDEN BIT(8) ++#define ATMEL_TC_BMR_POSEN BIT(9) ++#define ATMEL_TC_BMR_SPEEDEN BIT(10) ++#define ATMEL_TC_BMR_QDTRANS BIT(11) ++#define ATMEL_TC_BMR_EDGPHA BIT(12) ++#define ATMEL_TC_BMR_INVA BIT(13) ++#define ATMEL_TC_BMR_INVB BIT(14) ++#define ATMEL_TC_BMR_INVIDX BIT(15) ++#define ATMEL_TC_BMR_SWAP BIT(16) ++#define ATMEL_TC_BMR_IDXPHB BIT(17) ++#define ATMEL_TC_BMR_AUTOC BIT(18) ++#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20) ++#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20) ++#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26) ++#define ATMEL_TC_MAXCMP(x) ((x) << 26) ++ ++/* QEDC fields */ ++#define ATMEL_TC_QEDC_IDX BIT(0) ++#define ATMEL_TC_QEDC_DIRCHG BIT(1) ++#define ATMEL_TC_QEDC_QERR BIT(2) ++#define ATMEL_TC_QEDC_MPE BIT(3) ++#define ATMEL_TC_QEDC_DIR BIT(8) ++ ++/* FMR fields */ ++#define ATMEL_TC_FMR_ENCF(x) BIT(x) ++ ++/* WPMR fields */ ++#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8) ++#define ATMEL_TC_WPMR_WPEN BIT(0) ++ ++static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; ++ ++static const struct of_device_id atmel_tcb_dt_ids[] = { ++ { ++ .compatible = "atmel,at91rm9200-tcb", ++ .data = (void *)16, ++ }, { ++ .compatible = "atmel,at91sam9x5-tcb", ++ .data = (void *)32, ++ }, { ++ /* sentinel */ ++ } ++}; ++ ++#endif /* __SOC_ATMEL_TCB_H */ +diff --git a/init/Kconfig b/init/Kconfig +index 1a0b15c5a..abac5e8b0 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -814,6 +814,7 @@ config CFS_BANDWIDTH + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth +@@ -1702,6 +1703,7 @@ choice + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + select HAVE_HARDENED_USERCOPY_ALLOCATOR + help + The regular slab allocator that is established and known to work +@@ -1722,6 +1724,7 @@ config SLUB + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but +@@ -1763,7 +1766,7 @@ config SLAB_FREELIST_HARDENED + + config SLUB_CPU_PARTIAL + default y +- depends on SLUB && SMP ++ depends on SLUB && SMP && !PREEMPT_RT_FULL + bool "SLUB per cpu partial cache" + help + Per cpu partial caches accellerate objects allocation and freeing +diff --git a/init/Makefile b/init/Makefile +index a3e5ce2bc..777923256 100644 +--- a/init/Makefile ++++ b/init/Makefile +@@ -34,4 +34,4 @@ silent_chk_compile.h = : + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +diff --git a/init/init_task.c b/init/init_task.c +index 57ff82ab9..593bbeff8 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = { + .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), + }; + ++#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE) ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + /* + * Set up the first task table, touch at your own risk!. Base=0, + * limit=0x1fffff (=2MB) +@@ -71,8 +77,13 @@ struct task_struct init_task + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, + .policy = SCHED_NORMAL, +- .cpus_allowed = CPU_MASK_ALL, ++ .cpus_ptr = &init_task.cpus_mask, ++ .cpus_mask = CPU_MASK_ALL, + .nr_cpus_allowed= NR_CPUS, ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) && \ ++ defined(CONFIG_SCHED_DEBUG) ++ .pinned_on_cpu = -1, ++#endif + .mm = NULL, + .active_mm = &init_mm, + .restart_block = { +@@ -118,6 +129,7 @@ struct task_struct init_task + INIT_CPU_TIMERS(init_task) + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), + .timer_slack_ns = 50000, /* 50 usec default slack */ ++ INIT_TIMER_LIST + .thread_pid = &init_struct_pid, + .thread_group = LIST_HEAD_INIT(init_task.thread_group), + .thread_node = LIST_HEAD_INIT(init_signals.thread_head), +diff --git a/init/main.c b/init/main.c +index 4e041fc2a..a705b0e41 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -560,6 +560,7 @@ asmlinkage __visible void __init start_kernel(void) + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + boot_cpu_hotplug_init(); + +diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks +index 84d882f3e..af27c4000 100644 +--- a/kernel/Kconfig.locks ++++ b/kernel/Kconfig.locks +@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW + + config MUTEX_SPIN_ON_OWNER + def_bool y +- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config RWSEM_SPIN_ON_OWNER + def_bool y +- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config LOCK_SPIN_ON_OWNER + def_bool y +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index cd1655122..fd924c0bc 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -1,3 +1,19 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT ++ ++config PREEMPT_RT ++ bool ++ ++config HAVE_PREEMPT_LAZY ++ bool ++ ++config PREEMPT_LAZY ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL + + choice + prompt "Preemption Model" +@@ -34,10 +50,10 @@ config PREEMPT_VOLUNTARY + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" + depends on !ARCH_NO_PREEMPT +- select PREEMPT_COUNT ++ select PREEMPT + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + help + This option reduces the latency of the kernel by making +@@ -54,7 +70,24 @@ config PREEMPT + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptible ++ RT kernel. ++ ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ select PREEMPT_RCU ++ select PREEMPT_RT ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +- bool +\ No newline at end of file ++ bool +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 7456882e1..1c13e5d37 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -4766,10 +4766,10 @@ static void css_free_rwork_fn(struct work_struct *work) + } + } + +-static void css_release_work_fn(struct work_struct *work) ++static void css_release_work_fn(struct swork_event *sev) + { + struct cgroup_subsys_state *css = +- container_of(work, struct cgroup_subsys_state, destroy_work); ++ container_of(sev, struct cgroup_subsys_state, destroy_swork); + struct cgroup_subsys *ss = css->ss; + struct cgroup *cgrp = css->cgroup; + +@@ -4831,8 +4831,8 @@ static void css_release(struct percpu_ref *ref) + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + +- INIT_WORK(&css->destroy_work, css_release_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ INIT_SWORK(&css->destroy_swork, css_release_work_fn); ++ swork_queue(&css->destroy_swork); + } + + static void init_and_link_css(struct cgroup_subsys_state *css, +@@ -5552,6 +5552,7 @@ static int __init cgroup_wq_init(void) + */ + cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); + BUG_ON(!cgroup_destroy_wq); ++ BUG_ON(swork_get()); + return 0; + } + core_initcall(cgroup_wq_init); +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index feb911772..b6f84c024 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -288,7 +288,7 @@ static struct cpuset top_cpuset = { + */ + + static DEFINE_MUTEX(cpuset_mutex); +-static DEFINE_SPINLOCK(callback_lock); ++static DEFINE_RAW_SPINLOCK(callback_lock); + + static struct workqueue_struct *cpuset_migrate_mm_wq; + +@@ -924,9 +924,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cp->effective_cpus, new_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!is_in_v2_mode() && + !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); +@@ -991,9 +991,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, + if (retval < 0) + return retval; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* use trialcs->cpus_allowed as a temp variable */ + update_cpumasks_hier(cs, trialcs->cpus_allowed); +@@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cp->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!is_in_v2_mode() && + !nodes_equal(cp->mems_allowed, cp->effective_mems)); +@@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, + if (retval < 0) + goto done; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = trialcs->mems_allowed; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* use trialcs->mems_allowed as a temp variable */ + update_nodemasks_hier(cs, &trialcs->mems_allowed); +@@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, + spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) + || (is_spread_page(cs) != is_spread_page(trialcs))); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->flags = trialcs->flags; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) + rebuild_sched_domains_locked(); +@@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) + cpuset_filetype_t type = seq_cft(sf)->private; + int ret = 0; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + switch (type) { + case FILE_CPULIST: +@@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) + ret = -EINVAL; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + return ret; + } + +@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + + cpuset_inc(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (is_in_v2_mode()) { + cpumask_copy(cs->effective_cpus, parent->effective_cpus); + cs->effective_mems = parent->effective_mems; + } +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) + goto out_unlock; +@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + } + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = parent->mems_allowed; + cs->effective_mems = parent->mems_allowed; + cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); + cpumask_copy(cs->effective_cpus, parent->cpus_allowed); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + out_unlock: + mutex_unlock(&cpuset_mutex); + return 0; +@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) + static void cpuset_bind(struct cgroup_subsys_state *root_css) + { + mutex_lock(&cpuset_mutex); +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + if (is_in_v2_mode()) { + cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); +@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) + top_cpuset.mems_allowed = top_cpuset.effective_mems; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + mutex_unlock(&cpuset_mutex); + } + +@@ -2092,7 +2092,7 @@ static void cpuset_fork(struct task_struct *task) + if (task_css_is_root(task, cpuset_cgrp_id)) + return; + +- set_cpus_allowed_ptr(task, ¤t->cpus_allowed); ++ set_cpus_allowed_ptr(task, current->cpus_ptr); + task->mems_allowed = current->mems_allowed; + } + +@@ -2176,12 +2176,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, + { + bool is_empty; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, new_cpus); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->mems_allowed = *new_mems; + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* + * Don't call update_tasks_cpumask() if the cpuset becomes empty, +@@ -2218,10 +2218,10 @@ hotplug_update_tasks(struct cpuset *cs, + if (nodes_empty(*new_mems)) + *new_mems = parent_cs(cs)->effective_mems; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (cpus_updated) + update_tasks_cpumask(cs); +@@ -2314,21 +2314,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work) + + /* synchronize cpus_allowed to cpu_active_mask */ + if (cpus_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); + cpumask_copy(top_cpuset.effective_cpus, &new_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + /* we don't mess with cpumasks of tasks in top_cpuset */ + } + + /* synchronize mems_allowed to N_MEMORY */ + if (mems_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + top_cpuset.mems_allowed = new_mems; + top_cpuset.effective_mems = new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + update_tasks_nodemask(&top_cpuset); + } + +@@ -2427,11 +2427,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) + { + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_cpus(task_cs(tsk), pmask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + } + + /** +@@ -2492,11 +2492,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) + nodemask_t mask; + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_mems(task_cs(tsk), &mask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + + return mask; + } +@@ -2588,14 +2588,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) + return true; + + /* Not hardwall and node outside mems_allowed: scan up cpusets */ +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + + rcu_read_lock(); + cs = nearest_hardwall_ancestor(task_cs(current)); + allowed = node_isset(node, cs->mems_allowed); + rcu_read_unlock(); + +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + return allowed; + } + +diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c +index d0ed410b4..3c949c46c 100644 +--- a/kernel/cgroup/rstat.c ++++ b/kernel/cgroup/rstat.c +@@ -149,8 +149,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) + raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, + cpu); + struct cgroup *pos = NULL; ++ unsigned long flags; + +- raw_spin_lock(cpu_lock); ++ raw_spin_lock_irqsave(cpu_lock, flags); + while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { + struct cgroup_subsys_state *css; + +@@ -162,7 +163,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) + css->ss->css_rstat_flush(css, cpu); + rcu_read_unlock(); + } +- raw_spin_unlock(cpu_lock); ++ raw_spin_unlock_irqrestore(cpu_lock, flags); + + /* if @may_sleep, play nice and yield if necessary */ + if (may_sleep && (need_resched() || +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 80c85406e..3a11a7fea 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -825,6 +825,15 @@ static int take_cpu_down(void *_param) + int err, cpu = smp_processor_id(); + int ret; + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* ++ * If any tasks disabled migration before we got here, ++ * go back and sleep again. ++ */ ++ if (cpu_nr_pinned(cpu)) ++ return -EAGAIN; ++#endif ++ + /* Ensure this CPU doesn't handle any more interrupts. */ + err = __cpu_disable(); + if (err < 0) +@@ -854,6 +863,10 @@ static int take_cpu_down(void *_param) + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++struct task_struct *takedown_cpu_task; ++#endif ++ + static int takedown_cpu(unsigned int cpu) + { + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); +@@ -868,11 +881,39 @@ static int takedown_cpu(unsigned int cpu) + */ + irq_lock_sparse(); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ WARN_ON_ONCE(takedown_cpu_task); ++ takedown_cpu_task = current; ++ ++again: ++ /* ++ * If a task pins this CPU after we pass this check, take_cpu_down ++ * will return -EAGAIN. ++ */ ++ for (;;) { ++ int nr_pinned; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ nr_pinned = cpu_nr_pinned(cpu); ++ if (nr_pinned == 0) ++ break; ++ schedule(); ++ } ++ set_current_state(TASK_RUNNING); ++#endif ++ + /* + * So now all preempt/rcu users must observe !cpu_active(). + */ + err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (err == -EAGAIN) ++ goto again; ++#endif + if (err) { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ takedown_cpu_task = NULL; ++#endif + /* CPU refused to die */ + irq_unlock_sparse(); + /* Unpark the hotplug thread so we can rollback there */ +@@ -891,6 +932,9 @@ static int takedown_cpu(unsigned int cpu) + wait_for_ap_thread(st, false); + BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ takedown_cpu_task = NULL; ++#endif + /* Interrupts are moved away from the dying cpu, reenable alloc/free */ + irq_unlock_sparse(); + +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c +index 6a4b41484..197cb422f 100644 +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -857,9 +857,11 @@ int kdb_printf(const char *fmt, ...) + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 00b22c820..5f6b9db8c 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -1108,7 +1108,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) + cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); + + raw_spin_lock_init(&cpuctx->hrtimer_lock); +- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); ++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); + timer->function = perf_mux_hrtimer_handler; + } + +@@ -9338,7 +9338,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) + if (!is_sampling_event(event)) + return; + +- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + hwc->hrtimer.function = perf_swevent_hrtimer; + + /* +diff --git a/kernel/exit.c b/kernel/exit.c +index 2a32d32bd..6b38a0490 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -151,7 +151,7 @@ static void __exit_signal(struct task_struct *tsk) + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +diff --git a/kernel/fork.c b/kernel/fork.c +index 88463fd56..97cd89df1 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -40,6 +40,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -683,6 +684,19 @@ void __mmdrop(struct mm_struct *mm) + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + static void mmdrop_async_fn(struct work_struct *work) + { + struct mm_struct *mm; +@@ -717,13 +731,24 @@ static inline void put_signal_struct(struct signal_struct *sig) + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); + WARN_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(tsk); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(tsk); ++ + cgroup_free(tsk); + task_numa_free(tsk, true); + security_task_free(tsk); +@@ -736,7 +761,18 @@ void __put_task_struct(struct task_struct *tsk) + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + void __init __weak arch_task_cache_init(void) { } + +@@ -896,6 +932,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) + #ifdef CONFIG_STACKPROTECTOR + tsk->stack_canary = get_random_canary(); + #endif ++ if (orig->cpus_ptr == &orig->cpus_mask) ++ tsk->cpus_ptr = &tsk->cpus_mask; + + /* + * One for the user space visible state that goes away when reaped. +@@ -910,6 +948,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) + tsk->splice_pipe = NULL; + tsk->task_frag.page = NULL; + tsk->wake_q.next = NULL; ++ tsk->wake_q_sleeper.next = NULL; + + account_kernel_stack(tsk, 1); + +@@ -1657,6 +1696,9 @@ static void rt_mutex_init_task(struct task_struct *p) + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +@@ -1881,6 +1923,7 @@ static __latent_entropy struct task_struct *copy_process( + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = p->stime = p->gtime = 0; + #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +diff --git a/kernel/futex.c b/kernel/futex.c +index a36a006e5..b1c8d5866 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -967,7 +967,9 @@ static void exit_pi_state_list(struct task_struct *curr) + if (head->next != next) { + /* retain curr->pi_lock for the loop invariant */ + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + put_pi_state(pi_state); + continue; + } +@@ -1578,6 +1580,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ + struct task_struct *new_owner; + bool postunlock = false; + DEFINE_WAKE_Q(wake_q); ++ DEFINE_WAKE_Q(wake_sleeper_q); + int ret = 0; + + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); +@@ -1627,14 +1630,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ + * not fail. + */ + pi_state_update_owner(pi_state, new_owner); +- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); ++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, ++ &wake_sleeper_q); + } + + out_unlock: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + + if (postunlock) +- rt_mutex_postunlock(&wake_q); ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); + + return ret; + } +@@ -2258,6 +2262,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ put_pi_state(pi_state); ++ continue; + } else if (ret) { + /* + * rt_mutex_start_proxy_lock() detected a +@@ -2821,10 +2835,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, + if (abs_time) { + to = &timeout; + +- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? +- CLOCK_REALTIME : CLOCK_MONOTONIC, +- HRTIMER_MODE_ABS); +- hrtimer_init_sleeper(to, current); ++ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? ++ CLOCK_REALTIME : CLOCK_MONOTONIC, ++ HRTIMER_MODE_ABS, current); + hrtimer_set_expires_range_ns(&to->timer, *abs_time, + current->timer_slack_ns); + } +@@ -2922,9 +2935,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + + if (time) { + to = &timeout; +- hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, +- HRTIMER_MODE_ABS); +- hrtimer_init_sleeper(to, current); ++ hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME, ++ HRTIMER_MODE_ABS, current); + hrtimer_set_expires(&to->timer, *time); + } + +@@ -2987,7 +2999,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + goto no_block; + } + +- rt_mutex_init_waiter(&rt_waiter); ++ rt_mutex_init_waiter(&rt_waiter, false); + + /* + * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not +@@ -3003,6 +3015,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + * before __rt_mutex_start_proxy_lock() is done. + */ + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); ++ /* ++ * the migrate_disable() here disables migration in the in_atomic() fast ++ * path which is enabled again in the following spin_unlock(). We have ++ * one migrate_disable() pending in the slow-path which is reversed ++ * after the raw_spin_unlock_irq() where we leave the atomic context. ++ */ ++ migrate_disable(); ++ + spin_unlock(q.lock_ptr); + /* + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter +@@ -3011,6 +3031,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + */ + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); ++ migrate_enable(); + + if (ret) { + if (ret == 1) +@@ -3145,10 +3166,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * rt_waiter. Also see the WARN in wake_futex_pi(). + */ + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ /* ++ * Magic trickery for now to make the RT migrate disable ++ * logic happy. The following spin_unlock() happens with ++ * interrupts disabled so the internal migrate_enable() ++ * won't undo the migrate_disable() which was issued when ++ * locking hb->lock. ++ */ ++ migrate_disable(); + spin_unlock(&hb->lock); + + /* drops pi_state->pi_mutex.wait_lock */ + ret = wake_futex_pi(uaddr, uval, pi_state); ++ migrate_enable(); + + put_pi_state(pi_state); + +@@ -3319,7 +3349,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + { + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -3335,10 +3365,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + + if (abs_time) { + to = &timeout; +- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? +- CLOCK_REALTIME : CLOCK_MONOTONIC, +- HRTIMER_MODE_ABS); +- hrtimer_init_sleeper(to, current); ++ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? ++ CLOCK_REALTIME : CLOCK_MONOTONIC, ++ HRTIMER_MODE_ABS, current); + hrtimer_set_expires_range_ns(&to->timer, *abs_time, + current->timer_slack_ns); + } +@@ -3347,7 +3376,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- rt_mutex_init_waiter(&rt_waiter); ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); + if (unlikely(ret != 0)) +@@ -3378,20 +3407,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -3400,14 +3464,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. + */ + put_pi_state(q.pi_state); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + /* + * Adjust the return value. It's either -EFAULT or + * success (1) but the caller expects 0 for success. +@@ -3426,7 +3491,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + pi_mutex = &q.pi_state->pi_mutex; + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) + ret = 0; + +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c +index 38554bc35..06a80bbf7 100644 +--- a/kernel/irq/handle.c ++++ b/kernel/irq/handle.c +@@ -185,10 +185,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) + { + irqreturn_t retval; + unsigned int flags = 0; ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + + retval = __handle_irq_event_percpu(desc, &flags); + +- add_interrupt_randomness(desc->irq_data.irq, flags); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ desc->random_ip = ip; ++#else ++ add_interrupt_randomness(desc->irq_data.irq, flags, ip); ++#endif + + if (!noirqdebug) + note_interrupt(desc, retval); +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index a8c66acee..ba63896ae 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -24,6 +24,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + EXPORT_SYMBOL_GPL(force_irqthreads); + +@@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg) + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) +@@ -1127,6 +1129,12 @@ static int irq_thread(void *data) + if (action_ret == IRQ_WAKE_THREAD) + irq_wake_secondary(desc, action); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++ add_interrupt_randomness(action->irq, 0, ++ desc->random_ip ^ (unsigned long) action); ++ migrate_enable(); ++#endif + wake_threads_waitq(desc); + } + +@@ -1580,6 +1588,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + } + ++ if (new->flags & IRQF_NO_SOFTIRQ_CALL) ++ irq_settings_set_no_softirq_call(desc); ++ + if (irq_settings_can_autoenable(desc)) { + irq_startup(desc, IRQ_RESEND, IRQ_START_COND); + } else { +@@ -2710,7 +2721,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); + * This call sets the internal irqchip state of an interrupt, + * depending on the value of @which. + * +- * This function should be called with preemption disabled if the ++ * This function should be called with migration disabled if the + * interrupt controller has per-cpu registers. + */ + int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, +diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h +index e43795cd2..47e2f9e23 100644 +--- a/kernel/irq/settings.h ++++ b/kernel/irq/settings.h +@@ -17,6 +17,7 @@ enum { + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, + _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, ++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, + _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, + }; + +@@ -31,6 +32,7 @@ enum { + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON + #define IRQ_IS_POLLED GOT_YOU_MORON + #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON ++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON + #undef IRQF_MODIFY_MASK + #define IRQF_MODIFY_MASK GOT_YOU_MORON + +@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); + } + ++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) ++{ ++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; ++} ++ ++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) ++{ ++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; ++} ++ + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) + { + return desc->status_use_accessors & _IRQ_PER_CPU; +diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c +index d867d6dda..cd12ee86c 100644 +--- a/kernel/irq/spurious.c ++++ b/kernel/irq/spurious.c +@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644); + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); +diff --git a/kernel/irq_work.c b/kernel/irq_work.c +index 73288914e..b6d9d3594 100644 +--- a/kernel/irq_work.c ++++ b/kernel/irq_work.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + + +@@ -57,29 +58,40 @@ void __weak arch_irq_work_raise(void) + } + + /* Enqueue on current CPU, work must already be claimed and preempt disabled */ +-static void __irq_work_queue_local(struct irq_work *work) ++static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) + { +- /* If the work is "lazy", handle it from next tick if any */ +- if (work->flags & IRQ_WORK_LAZY) { +- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && +- tick_nohz_tick_stopped()) +- arch_irq_work_raise(); +- } else { +- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) +- arch_irq_work_raise(); +- } ++ bool empty; ++ ++ empty = llist_add(&work->llnode, list); ++ ++ if (empty && ++ (!(work->flags & IRQ_WORK_LAZY) || ++ tick_nohz_tick_stopped())) ++ arch_irq_work_raise(); ++} ++ ++static inline bool use_lazy_list(struct irq_work *work) ++{ ++ return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) ++ || (work->flags & IRQ_WORK_LAZY); + } + + /* Enqueue the irq work @work on the current CPU */ + bool irq_work_queue(struct irq_work *work) + { ++ struct llist_head *list; ++ + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return false; + + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); +- __irq_work_queue_local(work); ++ if (use_lazy_list(work)) ++ list = this_cpu_ptr(&lazy_list); ++ else ++ list = this_cpu_ptr(&raised_list); ++ __irq_work_queue_local(work, list); + preempt_enable(); + + return true; +@@ -98,6 +110,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) + return irq_work_queue(work); + + #else /* CONFIG_SMP: */ ++ struct llist_head *list; ++ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + +@@ -106,13 +120,18 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) + return false; + + preempt_disable(); ++ if (use_lazy_list(work)) ++ list = &per_cpu(lazy_list, cpu); ++ else ++ list = &per_cpu(raised_list, cpu); ++ + if (cpu != smp_processor_id()) { + /* Arch remote IPI send/receive backend aren't NMI safe */ + WARN_ON_ONCE(in_nmi()); +- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) ++ if (llist_add(&work->llnode, list)) + arch_send_call_function_single_ipi(cpu); + } else { +- __irq_work_queue_local(work); ++ __irq_work_queue_local(work, list); + } + preempt_enable(); + +@@ -128,9 +147,8 @@ bool irq_work_needs_cpu(void) + raised = this_cpu_ptr(&raised_list); + lazy = this_cpu_ptr(&lazy_list); + +- if (llist_empty(raised) || arch_irq_work_has_interrupt()) +- if (llist_empty(lazy)) +- return false; ++ if (llist_empty(raised) && llist_empty(lazy)) ++ return false; + + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); +@@ -144,8 +162,12 @@ static void irq_work_run_list(struct llist_head *list) + struct llist_node *llnode; + unsigned long flags; + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ /* ++ * nort: On RT IRQ-work may run in SOFTIRQ context. ++ */ + BUG_ON(!irqs_disabled()); +- ++#endif + if (llist_empty(list)) + return; + +@@ -177,7 +199,16 @@ static void irq_work_run_list(struct llist_head *list) + void irq_work_run(void) + { + irq_work_run_list(this_cpu_ptr(&raised_list)); +- irq_work_run_list(this_cpu_ptr(&lazy_list)); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { ++ /* ++ * NOTE: we raise softirq via IPI for safety, ++ * and execute in irq_work_tick() to move the ++ * overhead from hard to soft irq context. ++ */ ++ if (!llist_empty(this_cpu_ptr(&lazy_list))) ++ raise_softirq(TIMER_SOFTIRQ); ++ } else ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); + } + EXPORT_SYMBOL_GPL(irq_work_run); + +@@ -187,8 +218,17 @@ void irq_work_tick(void) + + if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) + irq_work_run_list(raised); ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); ++} ++ ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void) ++{ + irq_work_run_list(this_cpu_ptr(&lazy_list)); + } ++#endif + + /* + * Synchronize against the irq_work @entry, ensures the entry is not +diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c +index 46ba85365..9a23632b6 100644 +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo); + + #endif /* CONFIG_CRASH_CORE */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = { + #ifndef CONFIG_TINY_RCU + &rcu_expedited_attr.attr, + &rcu_normal_attr.attr, ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, + #endif + NULL + }; +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 4191fed62..c02e3a406 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -181,7 +181,7 @@ void *kthread_data(struct task_struct *task) + */ + void set_kthreadd_affinity(void) + { +- set_cpus_allowed_ptr(current, &kthreadd_task->cpus_allowed); ++ set_cpus_allowed_ptr(current, kthreadd_task->cpus_ptr); + } + + /** +@@ -653,7 +653,7 @@ void __kthread_init_worker(struct kthread_worker *worker, + struct lock_class_key *key) + { + memset(worker, 0, sizeof(struct kthread_worker)); +- spin_lock_init(&worker->lock); ++ raw_spin_lock_init(&worker->lock); + lockdep_set_class_and_name(&worker->lock, key, name); + INIT_LIST_HEAD(&worker->work_list); + INIT_LIST_HEAD(&worker->delayed_work_list); +@@ -695,21 +695,21 @@ int kthread_worker_fn(void *worker_ptr) + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + worker->task = NULL; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + worker->current_work = work; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); +@@ -866,12 +866,12 @@ bool kthread_queue_work(struct kthread_worker *worker, + bool ret = false; + unsigned long flags; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + if (!queuing_blocked(worker, work)) { + kthread_insert_work(worker, work, &worker->work_list); + ret = true; + } +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_work); +@@ -897,7 +897,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) + if (WARN_ON_ONCE(!worker)) + return; + +- spin_lock(&worker->lock); ++ raw_spin_lock(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -907,7 +907,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) + if (!work->canceling) + kthread_insert_work(worker, work, &worker->work_list); + +- spin_unlock(&worker->lock); ++ raw_spin_unlock(&worker->lock); + } + EXPORT_SYMBOL(kthread_delayed_work_timer_fn); + +@@ -963,14 +963,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, + unsigned long flags; + bool ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + if (!queuing_blocked(worker, work)) { + __kthread_queue_delayed_work(worker, dwork, delay); + ret = true; + } + +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); +@@ -1006,7 +1006,7 @@ void kthread_flush_work(struct kthread_work *work) + if (!worker) + return; + +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -1018,7 +1018,7 @@ void kthread_flush_work(struct kthread_work *work) + else + noop = true; + +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (!noop) + wait_for_completion(&fwork.done); +@@ -1046,9 +1046,9 @@ static void kthread_cancel_delayed_work_timer(struct kthread_work *work, + * any queuing is blocked by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, *flags); ++ raw_spin_unlock_irqrestore(&worker->lock, *flags); + del_timer_sync(&dwork->timer); +- spin_lock_irqsave(&worker->lock, *flags); ++ raw_spin_lock_irqsave(&worker->lock, *flags); + work->canceling--; + } + +@@ -1110,7 +1110,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, + unsigned long flags; + int ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + /* Do not bother with canceling when never queued. */ + if (!work->worker) +@@ -1139,7 +1139,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, + fast_queue: + __kthread_queue_delayed_work(worker, dwork, delay); + out: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); +@@ -1153,7 +1153,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) + if (!worker) + goto out; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -1170,13 +1170,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) + * In the meantime, block any queuing by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + kthread_flush_work(work); +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + work->canceling--; + + out_fast: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + out: + return ret; + } +diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile +index 392c7f23a..c0bf04b6b 100644 +--- a/kernel/locking/Makefile ++++ b/kernel/locking/Makefile +@@ -3,7 +3,7 @@ + # and is generally not a function of system call inputs. + KCOV_INSTRUMENT := n + +-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o ++obj-y += semaphore.o percpu-rwsem.o + + ifdef CONFIG_FUNCTION_TRACER + CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) +@@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) + endif + ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++endif ++obj-y += rwsem.o + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o ++endif ++obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o + obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o + obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o + obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 1e272f6a0..1938b4bfb 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -3826,6 +3826,7 @@ static void check_flags(unsigned long flags) + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3840,6 +3841,7 @@ static void check_flags(unsigned long flags) + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); +diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c +index 7d0b0ed74..a81e6ef33 100644 +--- a/kernel/locking/locktorture.c ++++ b/kernel/locking/locktorture.c +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + #include + #include + #include +diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c +new file mode 100644 +index 000000000..4f81595c0 +--- /dev/null ++++ b/kernel/locking/mutex-rt.c +@@ -0,0 +1,223 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++void __lockfunc _mutex_lock_io(struct mutex *lock) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ _mutex_lock(lock); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL_GPL(_mutex_lock_io); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); ++ ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL_GPL(_mutex_lock_io_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = __rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ __rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index a5ec4f685..fe5153fc7 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -7,6 +7,11 @@ + * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt + * + * See Documentation/locking/rt-mutex-design.txt for details. + */ +@@ -18,6 +23,8 @@ + #include + #include + #include ++#include ++#include + + #include "rtmutex_common.h" + +@@ -135,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if there's no debugging state to be + * set up. +@@ -228,7 +241,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + * Only use with rt_mutex_waiter_{less,equal}() + */ + #define task_to_waiter(p) \ +- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } ++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) } + + static inline int + rt_mutex_waiter_less(struct rt_mutex_waiter *left, +@@ -268,6 +281,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + return 1; + } + ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++ ++static inline int ++rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode) ++{ ++ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); ++ ++ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter)) ++ return 1; ++ ++ /* ++ * Note that RT tasks are excluded from lateral-steals ++ * to prevent the introduction of an unbounded latency. ++ */ ++ if (mode == STEAL_NORMAL || rt_task(waiter->task)) ++ return 0; ++ ++ return rt_mutex_waiter_equal(waiter, top_waiter); ++} ++ + static void + rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) + { +@@ -372,6 +406,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, + return debug_rt_mutex_detect_deadlock(waiter, chwalk); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -379,7 +421,8 @@ int max_lock_depth = 1024; + + static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) + { +- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; ++ return rt_mutex_real_waiter(p->pi_blocked_on) ? ++ p->pi_blocked_on->lock : NULL; + } + + /* +@@ -515,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -696,13 +739,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * follow here. This is the end of the chain we are walking. + */ + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue [7] above changed the top waiter, + * then we need to wake the new top waiter up to try + * to get the lock. + */ +- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (prerequeue_top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); + raw_spin_unlock_irq(&lock->wait_lock); + return 0; + } +@@ -804,9 +850,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * @task: The task which wants to acquire the lock + * @waiter: The waiter that is queued to the lock's wait tree if the + * callsite called task_blocked_on_lock(), otherwise NULL ++ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL) + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int __try_to_take_rt_mutex(struct rt_mutex *lock, ++ struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { + lockdep_assert_held(&lock->wait_lock); + +@@ -842,12 +890,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + */ + if (waiter) { + /* +- * If waiter is not the highest priority waiter of +- * @lock, give up. ++ * If waiter is not the highest priority waiter of @lock, ++ * or its peer when lateral steal is allowed, give up. + */ +- if (waiter != rt_mutex_top_waiter(lock)) ++ if (!rt_mutex_steal(lock, waiter, mode)) + return 0; +- + /* + * We can acquire the lock. Remove the waiter from the + * lock waiters tree. +@@ -865,14 +912,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + */ + if (rt_mutex_has_waiters(lock)) { + /* +- * If @task->prio is greater than or equal to +- * the top waiter priority (kernel view), +- * @task lost. ++ * If @task->prio is greater than the top waiter ++ * priority (kernel view), or equal to it when a ++ * lateral steal is forbidden, @task lost. + */ +- if (!rt_mutex_waiter_less(task_to_waiter(task), +- rt_mutex_top_waiter(lock))) ++ if (!rt_mutex_steal(lock, task_to_waiter(task), mode)) + return 0; +- + /* + * The current top waiter stays enqueued. We + * don't have to change anything in the lock +@@ -919,6 +964,344 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * preemptible spin_lock functions: ++ */ ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ might_sleep_no_state_check(); ++ ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) ++ return; ++ else ++ slowfn(lock); ++} ++ ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ return; ++ else ++ slowfn(lock); ++} ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; ++ ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif ++ ++static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task, ++ enum rtmutex_chainwalk chwalk); ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ unsigned long flags) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter *top_waiter; ++ int ret; ++ ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) ++ return; ++ ++ BUG_ON(rt_mutex_owner(lock) == self); ++ ++ /* ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). ++ */ ++ raw_spin_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ ++ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK); ++ BUG_ON(ret); ++ ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL)) ++ break; ++ ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); ++ ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ ++ debug_rt_mutex_print_deadlock(waiter); ++ ++ if (top_waiter != waiter || adaptive_wait(lock, lock_owner)) ++ schedule(); ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ } ++ ++ /* ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ raw_spin_unlock(&self->pi_lock); ++ ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry)); ++} ++ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) ++{ ++ struct rt_mutex_waiter waiter; ++ unsigned long flags; ++ ++ rt_mutex_init_waiter(&waiter, true); ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ rt_spin_lock_slowlock_locked(lock, &waiter, flags); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper); ++/* ++ * Slow path to release a rt_mutex spin_lock style ++ */ ++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ unsigned long flags; ++ DEFINE_WAKE_Q(wake_q); ++ DEFINE_WAKE_Q(wake_sleeper_q); ++ bool postunlock; ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ ++ if (postunlock) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); ++} ++ ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++} ++EXPORT_SYMBOL(rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); ++} ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif ++ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++ migrate_enable(); ++ rcu_read_unlock(); ++ sleeping_lock_dec(); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret; ++ ++ sleeping_lock_inc(); ++ migrate_disable(); ++ ret = __rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); ++ } else { ++ migrate_enable(); ++ sleeping_lock_dec(); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = __rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ ret = __rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++void ++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); ++ ++ if (!hold_ctx) ++ return 0; ++ ++ if (unlikely(ctx == hold_ctx)) ++ return -EALREADY; ++ ++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && ++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ++ ctx->contending_lock = ww; ++#endif ++ return -EDEADLK; ++ } ++ ++ return 0; ++} ++#else ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ BUG(); ++ return 0; ++} ++ ++#endif ++ ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ + /* + * Task blocks on lock. + * +@@ -951,6 +1334,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + return -EDEADLK; + + raw_spin_lock(&task->pi_lock); ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; +@@ -974,7 +1373,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + rt_mutex_enqueue_pi(owner, waiter); + + rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { + chain_walk = 1; +@@ -1016,6 +1415,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + * Called with lock->wait_lock held and interrupts disabled. + */ + static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q, + struct rt_mutex *lock) + { + struct rt_mutex_waiter *waiter; +@@ -1055,7 +1455,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, + * Pairs with preempt_enable() in rt_mutex_postunlock(); + */ + preempt_disable(); +- wake_q_add(wake_q, waiter->task); ++ if (waiter->savestate) ++ wake_q_add_sleeper(wake_sleeper_q, waiter->task); ++ else ++ wake_q_add(wake_q, waiter->task); + raw_spin_unlock(¤t->pi_lock); + } + +@@ -1070,7 +1473,7 @@ static void remove_waiter(struct rt_mutex *lock, + { + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); + struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex *next_lock; ++ struct rt_mutex *next_lock = NULL; + + lockdep_assert_held(&lock->wait_lock); + +@@ -1096,7 +1499,8 @@ static void remove_waiter(struct rt_mutex *lock, + rt_mutex_adjust_prio(owner); + + /* Store the lock on which owner is blocked or NULL */ +- next_lock = task_blocked_on_lock(owner); ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ next_lock = task_blocked_on_lock(owner); + + raw_spin_unlock(&owner->pi_lock); + +@@ -1132,26 +1536,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { ++ if (!rt_mutex_real_waiter(waiter) || ++ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } + next_lock = waiter->lock; +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); + + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); + ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, + next_lock, NULL, task); + } + +-void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) ++void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) + { + debug_rt_mutex_init_waiter(waiter); + RB_CLEAR_NODE(&waiter->pi_tree_entry); + RB_CLEAR_NODE(&waiter->tree_entry); + waiter->task = NULL; ++ waiter->savestate = savestate; + } + + /** +@@ -1167,7 +1573,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) + static int __sched + __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- struct rt_mutex_waiter *waiter) ++ struct rt_mutex_waiter *waiter, ++ struct ww_acquire_ctx *ww_ctx) + { + int ret = 0; + +@@ -1176,16 +1583,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, + if (try_to_take_rt_mutex(lock, current, waiter)) + break; + +- /* +- * TASK_INTERRUPTIBLE checks for signals and +- * timeout. Ignored otherwise. +- */ +- if (likely(state == TASK_INTERRUPTIBLE)) { +- /* Signal pending? */ +- if (signal_pending(current)) +- ret = -EINTR; +- if (timeout && !timeout->task) +- ret = -ETIMEDOUT; ++ if (timeout && !timeout->task) { ++ ret = -ETIMEDOUT; ++ break; ++ } ++ if (signal_pending_state(state, current)) { ++ ret = -EINTR; ++ break; ++ } ++ ++ if (ww_ctx && ww_ctx->acquired > 0) { ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); + if (ret) + break; + } +@@ -1224,33 +1632,104 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, + } + } + +-/* +- * Slow path lock function: +- */ +-static int __sched +-rt_mutex_slowlock(struct rt_mutex *lock, int state, +- struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk) ++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, ++ struct ww_acquire_ctx *ww_ctx) + { +- struct rt_mutex_waiter waiter; +- unsigned long flags; +- int ret = 0; ++#ifdef CONFIG_DEBUG_MUTEXES ++ /* ++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, ++ * but released with a normal mutex_unlock in this call. ++ * ++ * This should never happen, always use ww_mutex_unlock. ++ */ ++ DEBUG_LOCKS_WARN_ON(ww->ctx); ++ ++ /* ++ * Not quite done after calling ww_acquire_done() ? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); + +- rt_mutex_init_waiter(&waiter); ++ if (ww_ctx->contending_lock) { ++ /* ++ * After -EDEADLK you tried to ++ * acquire a different ww_mutex? Bad! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); ++ ++ /* ++ * You called ww_mutex_lock after receiving -EDEADLK, ++ * but 'forgot' to unlock everything else first? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ++ ww_ctx->contending_lock = NULL; ++ } + + /* +- * Technically we could use raw_spin_[un]lock_irq() here, but this can +- * be called in early boot if the cmpxchg() fast path is disabled +- * (debug, no architecture support). In this case we will acquire the +- * rtmutex with lock->wait_lock held. But we cannot unconditionally +- * enable interrupts in that early boot case. So we need to use the +- * irqsave/restore variants. ++ * Naughty, using a different class will lead to undefined behavior! + */ +- raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); ++#endif ++ ww_ctx->acquired++; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct rt_mutex_waiter *waiter, *n; ++ ++ /* ++ * This branch gets optimized out for the common case, ++ * and is only important for ww_mutex_lock. ++ */ ++ ww_mutex_lock_acquired(ww, ww_ctx); ++ ww->ctx = ww_ctx; ++ ++ /* ++ * Give any possible sleeping processes the chance to wake up, ++ * so they can recheck if they have to back off. ++ */ ++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root, ++ tree_entry) { ++ /* XXX debug rt mutex waiter wakeup */ ++ ++ BUG_ON(waiter->lock != lock); ++ rt_mutex_wake_waiter(waiter); ++ } ++} ++ ++#else ++ ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ BUG(); ++} ++#endif ++ ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, ++ struct rt_mutex_waiter *waiter) ++{ ++ int ret; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (ww_ctx) { ++ struct ww_mutex *ww; ++ ++ ww = container_of(lock, struct ww_mutex, base.lock); ++ if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) ++ return -EALREADY; ++ } ++#endif + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { +- raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ if (ww_ctx) ++ ww_mutex_account_lock(lock, ww_ctx); + return 0; + } + +@@ -1260,16 +1739,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + if (unlikely(timeout)) + hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); + +- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); ++ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); + +- if (likely(!ret)) ++ if (likely(!ret)) { + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); ++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter, ++ ww_ctx); ++ } else if (ww_ctx) { ++ /* ww_mutex received EDEADLK, let it become EALREADY */ ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); ++ BUG_ON(!ret); ++ } + + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); +- remove_waiter(lock, &waiter); +- rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ remove_waiter(lock, waiter); ++ /* ww_mutex wants to report EDEADLK/EALREADY, let it */ ++ if (!ww_ctx) ++ rt_mutex_handle_deadlock(ret, chwalk, waiter); ++ } else if (ww_ctx) { ++ ww_mutex_account_lock(lock, ww_ctx); + } + + /* +@@ -1277,6 +1766,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + * unconditionally. We might have to fix that up. + */ + fixup_rt_mutex_waiters(lock); ++ return ret; ++} ++ ++/* ++ * Slow path lock function: ++ */ ++static int __sched ++rt_mutex_slowlock(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ struct rt_mutex_waiter waiter; ++ unsigned long flags; ++ int ret = 0; ++ ++ rt_mutex_init_waiter(&waiter, false); ++ ++ /* ++ * Technically we could use raw_spin_[un]lock_irq() here, but this can ++ * be called in early boot if the cmpxchg() fast path is disabled ++ * (debug, no architecture support). In this case we will acquire the ++ * rtmutex with lock->wait_lock held. But we cannot unconditionally ++ * enable interrupts in that early boot case. So we need to use the ++ * irqsave/restore variants. ++ */ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, ++ &waiter); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +@@ -1337,7 +1856,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) + * Return whether the current task needs to call rt_mutex_postunlock(). + */ + static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +- struct wake_q_head *wake_q) ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q) + { + unsigned long flags; + +@@ -1391,7 +1911,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, + * + * Queue the next waiter for wakeup once we release the wait_lock. + */ +- mark_wakeup_next_waiter(wake_q, lock); ++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + return true; /* call rt_mutex_postunlock() */ +@@ -1405,29 +1925,45 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, + */ + static inline int + rt_mutex_fastlock(struct rt_mutex *lock, int state, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; + +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); ++ /* ++ * If rt_mutex blocks, the function sched_submit_work will not call ++ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). ++ * We must call blk_schedule_flush_plug here, if we don't call it, ++ * a deadlock in I/O may happen. ++ */ ++ if (unlikely(blk_needs_flush_plug(current))) ++ blk_schedule_flush_plug(current); ++ ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); + } + + static inline int + rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (chwalk == RT_MUTEX_MIN_CHAINWALK && + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; + +- return slowfn(lock, state, timeout, chwalk); ++ if (unlikely(blk_needs_flush_plug(current))) ++ blk_schedule_flush_plug(current); ++ ++ return slowfn(lock, state, timeout, chwalk, ww_ctx); + } + + static inline int +@@ -1443,9 +1979,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, + /* + * Performs the wakeup of the the top-waiter and re-enables preemption. + */ +-void rt_mutex_postunlock(struct wake_q_head *wake_q) ++void rt_mutex_postunlock(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q) + { + wake_up_q(wake_q); ++ wake_up_q_sleeper(wake_sleeper_q); + + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ + preempt_enable(); +@@ -1454,23 +1992,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) + static inline void + rt_mutex_fastunlock(struct rt_mutex *lock, + bool (*slowfn)(struct rt_mutex *lock, +- struct wake_q_head *wqh)) ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper)) + { + DEFINE_WAKE_Q(wake_q); ++ DEFINE_WAKE_Q(wake_sleeper_q); + + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + +- if (slowfn(lock, &wake_q)) +- rt_mutex_postunlock(&wake_q); ++ if (slowfn(lock, &wake_q, &wake_sleeper_q)) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); + } + +-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) ++int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) + { + might_sleep(); ++ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock); ++} ++ ++/** ++ * rt_mutex_lock_state - lock a rt_mutex with a given state ++ * ++ * @lock: The rt_mutex to be locked ++ * @state: The state to set when blocking on the rt_mutex ++ */ ++static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock, ++ unsigned int subclass, int state) ++{ ++ int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); ++ ret = __rt_mutex_lock_state(lock, state); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++ ++static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) ++{ ++ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE); + } + + #ifdef CONFIG_DEBUG_LOCK_ALLOC +@@ -1511,16 +2072,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); + */ + int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) + { +- int ret; +- +- might_sleep(); +- +- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); +- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); +- if (ret) +- mutex_release(&lock->dep_map, 1, _RET_IP_); +- +- return ret; ++ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + +@@ -1537,6 +2089,22 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) + return __rt_mutex_slowtrylock(lock); + } + ++/** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal ++ */ ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock) ++{ ++ return rt_mutex_lock_state(lock, 0, TASK_KILLABLE); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); ++ + /** + * rt_mutex_timed_lock - lock a rt_mutex interruptible + * the timeout structure is provided +@@ -1560,6 +2128,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + RT_MUTEX_MIN_CHAINWALK, ++ NULL, + rt_mutex_slowlock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); +@@ -1568,6 +2137,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) + } + EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); + ++int __sched __rt_mutex_trylock(struct rt_mutex *lock) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (WARN_ON_ONCE(in_irq() || in_nmi())) ++#else ++ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) ++#endif ++ return 0; ++ ++ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); ++} ++ + /** + * rt_mutex_trylock - try to lock a rt_mutex + * +@@ -1583,10 +2164,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) + { + int ret; + +- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) +- return 0; +- +- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); ++ ret = __rt_mutex_trylock(lock); + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + +@@ -1594,6 +2172,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) + } + EXPORT_SYMBOL_GPL(rt_mutex_trylock); + ++void __sched __rt_mutex_unlock(struct rt_mutex *lock) ++{ ++ rt_mutex_fastunlock(lock, rt_mutex_slowunlock); ++} ++ + /** + * rt_mutex_unlock - unlock a rt_mutex + * +@@ -1602,16 +2185,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); + void __sched rt_mutex_unlock(struct rt_mutex *lock) + { + mutex_release(&lock->dep_map, 1, _RET_IP_); +- rt_mutex_fastunlock(lock, rt_mutex_slowunlock); ++ __rt_mutex_unlock(lock); + } + EXPORT_SYMBOL_GPL(rt_mutex_unlock); + +-/** +- * Futex variant, that since futex variants do not use the fast-path, can be +- * simple and will not need to retry. +- */ +-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wake_q) ++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper) + { + lockdep_assert_held(&lock->wait_lock); + +@@ -1628,23 +2208,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, + * avoid inversion prior to the wakeup. preempt_disable() + * therein pairs with rt_mutex_postunlock(). + */ +- mark_wakeup_next_waiter(wake_q, lock); ++ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock); + + return true; /* call postunlock() */ + } + ++/** ++ * Futex variant, that since futex variants do not use the fast-path, can be ++ * simple and will not need to retry. ++ */ ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper) ++{ ++ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper); ++} ++ + void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) + { + DEFINE_WAKE_Q(wake_q); ++ DEFINE_WAKE_Q(wake_sleeper_q); + unsigned long flags; + bool postunlock; + + raw_spin_lock_irqsave(&lock->wait_lock, flags); +- postunlock = __rt_mutex_futex_unlock(lock, &wake_q); ++ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + if (postunlock) +- rt_mutex_postunlock(&wake_q); ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); + } + + /** +@@ -1683,7 +2275,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, + if (name && key) + debug_rt_mutex_init(lock, name, key); + } +-EXPORT_SYMBOL_GPL(__rt_mutex_init); ++EXPORT_SYMBOL(__rt_mutex_init); + + /** + * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a +@@ -1703,6 +2295,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { + __rt_mutex_init(lock, NULL, NULL); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* ++ * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is ++ * holding the ->wait_lock of the proxy_lock while unlocking a sleeping ++ * lock. ++ */ ++ raw_spin_lock_init(&lock->wait_lock); ++#endif + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); + } +@@ -1725,6 +2325,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) + rt_mutex_set_owner(lock, NULL); + } + ++static void fixup_rt_mutex_blocked(struct rt_mutex *lock) ++{ ++ struct task_struct *tsk = current; ++ /* ++ * RT has a problem here when the wait got interrupted by a timeout ++ * or a signal. task->pi_blocked_on is still set. The task must ++ * acquire the hash bucket lock when returning from this function. ++ * ++ * If the hash bucket lock is contended then the ++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in ++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by ++ * clearing task->pi_blocked_on which removes the task from the ++ * boosting chain of the rtmutex. That's correct because the task ++ * is not longer blocked on it. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ tsk->pi_blocked_on = NULL; ++ raw_spin_unlock(&tsk->pi_lock); ++} ++ + /** + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take +@@ -1755,6 +2375,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + if (try_to_take_rt_mutex(lock, task, NULL)) + return 1; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock(&task->pi_lock); ++#endif ++ + /* We enforce deadlock detection for futexes */ + ret = task_blocks_on_rt_mutex(lock, waiter, task, + RT_MUTEX_FULL_CHAINWALK); +@@ -1769,6 +2417,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + ret = 0; + } + ++ if (ret) ++ fixup_rt_mutex_blocked(lock); ++ + debug_rt_mutex_print_deadlock(waiter); + + return ret; +@@ -1854,12 +2505,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + raw_spin_lock_irq(&lock->wait_lock); + /* sleep on the mutex */ + set_current_state(TASK_INTERRUPTIBLE); +- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might + * have to fix that up. + */ + fixup_rt_mutex_waiters(lock); ++ if (ret) ++ fixup_rt_mutex_blocked(lock); ++ + raw_spin_unlock_irq(&lock->wait_lock); + + return ret; +@@ -1921,3 +2575,99 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + + return cleanup; + } ++ ++static inline int ++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ++ unsigned tmp; ++ ++ if (ctx->deadlock_inject_countdown-- == 0) { ++ tmp = ctx->deadlock_inject_interval; ++ if (tmp > UINT_MAX/4) ++ tmp = UINT_MAX; ++ else ++ tmp = tmp*2 + tmp + tmp/2; ++ ++ ctx->deadlock_inject_interval = tmp; ++ ctx->deadlock_inject_countdown = tmp; ++ ctx->contending_lock = lock; ++ ++ ww_mutex_unlock(lock); ++ ++ return -EDEADLK; ++ } ++#endif ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++int __sched ++ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, ++ ctx ? &ctx->dep_map : NULL, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ++ ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ctx && ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); ++ ++int __sched ++ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, ++ ctx ? &ctx->dep_map : NULL, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ++ ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ctx && ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ww_mutex_lock); ++ ++void __sched ww_mutex_unlock(struct ww_mutex *lock) ++{ ++ int nest = !!lock->ctx; ++ ++ /* ++ * The unlocking fastpath is the 0->1 transition from 'locked' ++ * into 'unlocked' state: ++ */ ++ if (nest) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); ++#endif ++ if (lock->ctx->acquired > 0) ++ lock->ctx->acquired--; ++ lock->ctx = NULL; ++ } ++ ++ mutex_release(&lock->base.dep_map, nest, _RET_IP_); ++ __rt_mutex_unlock(&lock->base.lock); ++} ++EXPORT_SYMBOL(ww_mutex_unlock); ++ ++int __rt_mutex_owner_current(struct rt_mutex *lock) ++{ ++ return rt_mutex_owner(lock) == current; ++} ++EXPORT_SYMBOL(__rt_mutex_owner_current); ++#endif +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index ca6fb4890..8e0c59227 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -15,6 +15,7 @@ + + #include + #include ++#include + + /* + * This is the control structure for tasks blocked on a rt_mutex, +@@ -29,6 +30,7 @@ struct rt_mutex_waiter { + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; +@@ -130,11 +132,14 @@ enum rtmutex_chainwalk { + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); + extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); +-extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); ++extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate); + extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); +@@ -152,9 +157,27 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l); + + extern void rt_mutex_futex_unlock(struct rt_mutex *lock); + extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh); +- +-extern void rt_mutex_postunlock(struct wake_q_head *wake_q); ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper); ++ ++extern void rt_mutex_postunlock(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q); ++ ++/* RW semaphore special interface */ ++struct ww_acquire_ctx; ++ ++extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); ++extern int __rt_mutex_trylock(struct rt_mutex *lock); ++extern void __rt_mutex_unlock(struct rt_mutex *lock); ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, ++ struct rt_mutex_waiter *waiter); ++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ unsigned long flags); ++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock); + + #ifdef CONFIG_DEBUG_RT_MUTEXES + # include "rtmutex-debug.h" +diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c +new file mode 100644 +index 000000000..0ae8c62ea +--- /dev/null ++++ b/kernel/locking/rwlock-rt.c +@@ -0,0 +1,384 @@ ++/* ++ */ ++#include ++#include ++ ++#include "rtmutex_common.h" ++#include ++ ++/* ++ * RT-specific reader/writer locks ++ * ++ * write_lock() ++ * 1) Lock lock->rtmutex ++ * 2) Remove the reader BIAS to force readers into the slow path ++ * 3) Wait until all readers have left the critical region ++ * 4) Mark it write locked ++ * ++ * write_unlock() ++ * 1) Remove the write locked marker ++ * 2) Set the reader BIAS so readers can use the fast path again ++ * 3) Unlock lock->rtmutex to release blocked readers ++ * ++ * read_lock() ++ * 1) Try fast path acquisition (reader BIAS is set) ++ * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag ++ * 3) If !writelocked, acquire it for read ++ * 4) If writelocked, block on lock->rtmutex ++ * 5) unlock lock->rtmutex, goto 1) ++ * ++ * read_unlock() ++ * 1) Try fast path release (reader count != 1) ++ * 2) Wake the writer waiting in write_lock()#3 ++ * ++ * read_lock()#3 has the consequence, that rw locks on RT are not writer ++ * fair, but writers, which should be avoided in RT tasks (think tasklist ++ * lock), are subject to the rtmutex priority/DL inheritance mechanism. ++ * ++ * It's possible to make the rw locks writer fair by keeping a list of ++ * active readers. A blocked writer would force all newly incoming readers ++ * to block on the rtmutex, but the rtmutex would have to be proxy locked ++ * for one reader after the other. We can't use multi-reader inheritance ++ * because there is no way to support that with ++ * SCHED_DEADLINE. Implementing the one by one reader boosting/handover ++ * mechanism is a major surgery for a very dubious value. ++ * ++ * The risk of writer starvation is there, but the pathological use cases ++ * which trigger it are not necessarily the typical RT workloads. ++ */ ++ ++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held semaphore: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++ atomic_set(&lock->readers, READER_BIAS); ++ rt_mutex_init(&lock->rtmutex); ++ lock->rtmutex.save_state = 1; ++} ++ ++int __read_rt_trylock(struct rt_rw_lock *lock) ++{ ++ int r, old; ++ ++ /* ++ * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is ++ * set. ++ */ ++ for (r = atomic_read(&lock->readers); r < 0;) { ++ old = atomic_cmpxchg(&lock->readers, r, r + 1); ++ if (likely(old == r)) ++ return 1; ++ r = old; ++ } ++ return 0; ++} ++ ++void __sched __read_rt_lock(struct rt_rw_lock *lock) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ struct rt_mutex_waiter waiter; ++ unsigned long flags; ++ ++ if (__read_rt_trylock(lock)) ++ return; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ /* ++ * Allow readers as long as the writer has not completely ++ * acquired the semaphore for write. ++ */ ++ if (atomic_read(&lock->readers) != WRITER_BIAS) { ++ atomic_inc(&lock->readers); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return; ++ } ++ ++ /* ++ * Call into the slow lock path with the rtmutex->wait_lock ++ * held, so this can't result in the following race: ++ * ++ * Reader1 Reader2 Writer ++ * read_lock() ++ * write_lock() ++ * rtmutex_lock(m) ++ * swait() ++ * read_lock() ++ * unlock(m->wait_lock) ++ * read_unlock() ++ * swake() ++ * lock(m->wait_lock) ++ * lock->writelocked=true ++ * unlock(m->wait_lock) ++ * ++ * write_unlock() ++ * lock->writelocked=false ++ * rtmutex_unlock(m) ++ * read_lock() ++ * write_lock() ++ * rtmutex_lock(m) ++ * swait() ++ * rtmutex_lock(m) ++ * ++ * That would put Reader1 behind the writer waiting on ++ * Reader2 to call read_unlock() which might be unbound. ++ */ ++ rt_mutex_init_waiter(&waiter, true); ++ rt_spin_lock_slowlock_locked(m, &waiter, flags); ++ /* ++ * The slowlock() above is guaranteed to return with the rtmutex is ++ * now held, so there can't be a writer active. Increment the reader ++ * count and immediately drop the rtmutex again. ++ */ ++ atomic_inc(&lock->readers); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ rt_spin_lock_slowunlock(m); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++void __read_rt_unlock(struct rt_rw_lock *lock) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ struct task_struct *tsk; ++ ++ /* ++ * sem->readers can only hit 0 when a writer is waiting for the ++ * active readers to leave the critical region. ++ */ ++ if (!atomic_dec_and_test(&lock->readers)) ++ return; ++ ++ raw_spin_lock_irq(&m->wait_lock); ++ /* ++ * Wake the writer, i.e. the rtmutex owner. It might release the ++ * rtmutex concurrently in the fast path, but to clean up the rw ++ * lock it needs to acquire m->wait_lock. The worst case which can ++ * happen is a spurious wakeup. ++ */ ++ tsk = rt_mutex_owner(m); ++ if (tsk) ++ wake_up_process(tsk); ++ ++ raw_spin_unlock_irq(&m->wait_lock); ++} ++ ++static void __write_unlock_common(struct rt_rw_lock *lock, int bias, ++ unsigned long flags) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ ++ atomic_add(READER_BIAS - bias, &lock->readers); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ rt_spin_lock_slowunlock(m); ++} ++ ++void __sched __write_rt_lock(struct rt_rw_lock *lock) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ struct task_struct *self = current; ++ unsigned long flags; ++ ++ /* Take the rtmutex as a first step */ ++ __rt_spin_lock(m); ++ ++ /* Force readers into slow path */ ++ atomic_sub(READER_BIAS, &lock->readers); ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ ++ raw_spin_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ ++ for (;;) { ++ /* Have all readers left the critical region? */ ++ if (!atomic_read(&lock->readers)) { ++ atomic_set(&lock->readers, WRITER_BIAS); ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ raw_spin_unlock(&self->pi_lock); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return; ++ } ++ ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ ++ if (atomic_read(&lock->readers) != 0) ++ schedule(); ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ } ++} ++ ++int __write_rt_trylock(struct rt_rw_lock *lock) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ unsigned long flags; ++ ++ if (!__rt_mutex_trylock(m)) ++ return 0; ++ ++ atomic_sub(READER_BIAS, &lock->readers); ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ if (!atomic_read(&lock->readers)) { ++ atomic_set(&lock->readers, WRITER_BIAS); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return 1; ++ } ++ __write_unlock_common(lock, 0, flags); ++ return 0; ++} ++ ++void __write_rt_unlock(struct rt_rw_lock *lock) ++{ ++ struct rt_mutex *m = &lock->rtmutex; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ __write_unlock_common(lock, WRITER_BIAS, flags); ++} ++ ++/* Map the reader biased implementation */ ++static inline int do_read_rt_trylock(rwlock_t *rwlock) ++{ ++ return __read_rt_trylock(rwlock); ++} ++ ++static inline int do_write_rt_trylock(rwlock_t *rwlock) ++{ ++ return __write_rt_trylock(rwlock); ++} ++ ++static inline void do_read_rt_lock(rwlock_t *rwlock) ++{ ++ __read_rt_lock(rwlock); ++} ++ ++static inline void do_write_rt_lock(rwlock_t *rwlock) ++{ ++ __write_rt_lock(rwlock); ++} ++ ++static inline void do_read_rt_unlock(rwlock_t *rwlock) ++{ ++ __read_rt_unlock(rwlock); ++} ++ ++static inline void do_write_rt_unlock(rwlock_t *rwlock) ++{ ++ __write_rt_unlock(rwlock); ++} ++ ++static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name, ++ struct lock_class_key *key) ++{ ++ __rwlock_biased_rt_init(rwlock, name, key); ++} ++ ++int __lockfunc rt_read_can_lock(rwlock_t *rwlock) ++{ ++ return atomic_read(&rwlock->readers) < 0; ++} ++ ++int __lockfunc rt_write_can_lock(rwlock_t *rwlock) ++{ ++ return atomic_read(&rwlock->readers) == READER_BIAS; ++} ++ ++/* ++ * The common functions which get wrapped into the rwlock API. ++ */ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ int ret; ++ ++ sleeping_lock_inc(); ++ migrate_disable(); ++ ret = do_read_rt_trylock(rwlock); ++ if (ret) { ++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); ++ } else { ++ migrate_enable(); ++ sleeping_lock_dec(); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret; ++ ++ sleeping_lock_inc(); ++ migrate_disable(); ++ ret = do_write_rt_trylock(rwlock); ++ if (ret) { ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); ++ } else { ++ migrate_enable(); ++ sleeping_lock_dec(); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); ++ do_read_rt_lock(rwlock); ++} ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ sleeping_lock_inc(); ++ rcu_read_lock(); ++ migrate_disable(); ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ do_write_rt_lock(rwlock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ do_read_rt_unlock(rwlock); ++ migrate_enable(); ++ rcu_read_unlock(); ++ sleeping_lock_dec(); ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ do_write_rt_unlock(rwlock); ++ migrate_enable(); ++ rcu_read_unlock(); ++ sleeping_lock_dec(); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++ do_rwlock_rt_init(rwlock, name, key); ++} ++EXPORT_SYMBOL(__rt_rwlock_init); +diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c +new file mode 100644 +index 000000000..966946454 +--- /dev/null ++++ b/kernel/locking/rwsem-rt.c +@@ -0,0 +1,312 @@ ++/* ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * RT-specific reader/writer semaphores ++ * ++ * down_write() ++ * 1) Lock sem->rtmutex ++ * 2) Remove the reader BIAS to force readers into the slow path ++ * 3) Wait until all readers have left the critical region ++ * 4) Mark it write locked ++ * ++ * up_write() ++ * 1) Remove the write locked marker ++ * 2) Set the reader BIAS so readers can use the fast path again ++ * 3) Unlock sem->rtmutex to release blocked readers ++ * ++ * down_read() ++ * 1) Try fast path acquisition (reader BIAS is set) ++ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag ++ * 3) If !writelocked, acquire it for read ++ * 4) If writelocked, block on sem->rtmutex ++ * 5) unlock sem->rtmutex, goto 1) ++ * ++ * up_read() ++ * 1) Try fast path release (reader count != 1) ++ * 2) Wake the writer waiting in down_write()#3 ++ * ++ * down_read()#3 has the consequence, that rw semaphores on RT are not writer ++ * fair, but writers, which should be avoided in RT tasks (think mmap_sem), ++ * are subject to the rtmutex priority/DL inheritance mechanism. ++ * ++ * It's possible to make the rw semaphores writer fair by keeping a list of ++ * active readers. A blocked writer would force all newly incoming readers to ++ * block on the rtmutex, but the rtmutex would have to be proxy locked for one ++ * reader after the other. We can't use multi-reader inheritance because there ++ * is no way to support that with SCHED_DEADLINE. Implementing the one by one ++ * reader boosting/handover mechanism is a major surgery for a very dubious ++ * value. ++ * ++ * The risk of writer starvation is there, but the pathological use cases ++ * which trigger it are not necessarily the typical RT workloads. ++ */ ++ ++void __rwsem_init(struct rw_semaphore *sem, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held semaphore: ++ */ ++ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); ++ lockdep_init_map(&sem->dep_map, name, key, 0); ++#endif ++ atomic_set(&sem->readers, READER_BIAS); ++} ++EXPORT_SYMBOL(__rwsem_init); ++ ++int __down_read_trylock(struct rw_semaphore *sem) ++{ ++ int r, old; ++ ++ /* ++ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is ++ * set. ++ */ ++ for (r = atomic_read(&sem->readers); r < 0;) { ++ old = atomic_cmpxchg(&sem->readers, r, r + 1); ++ if (likely(old == r)) ++ return 1; ++ r = old; ++ } ++ return 0; ++} ++ ++static int __sched __down_read_common(struct rw_semaphore *sem, int state) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ struct rt_mutex_waiter waiter; ++ int ret; ++ ++ if (__down_read_trylock(sem)) ++ return 0; ++ /* ++ * If rt_mutex blocks, the function sched_submit_work will not call ++ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). ++ * We must call blk_schedule_flush_plug here, if we don't call it, ++ * a deadlock in I/O may happen. ++ */ ++ if (unlikely(blk_needs_flush_plug(current))) ++ blk_schedule_flush_plug(current); ++ ++ might_sleep(); ++ raw_spin_lock_irq(&m->wait_lock); ++ /* ++ * Allow readers as long as the writer has not completely ++ * acquired the semaphore for write. ++ */ ++ if (atomic_read(&sem->readers) != WRITER_BIAS) { ++ atomic_inc(&sem->readers); ++ raw_spin_unlock_irq(&m->wait_lock); ++ return 0; ++ } ++ ++ /* ++ * Call into the slow lock path with the rtmutex->wait_lock ++ * held, so this can't result in the following race: ++ * ++ * Reader1 Reader2 Writer ++ * down_read() ++ * down_write() ++ * rtmutex_lock(m) ++ * swait() ++ * down_read() ++ * unlock(m->wait_lock) ++ * up_read() ++ * swake() ++ * lock(m->wait_lock) ++ * sem->writelocked=true ++ * unlock(m->wait_lock) ++ * ++ * up_write() ++ * sem->writelocked=false ++ * rtmutex_unlock(m) ++ * down_read() ++ * down_write() ++ * rtmutex_lock(m) ++ * swait() ++ * rtmutex_lock(m) ++ * ++ * That would put Reader1 behind the writer waiting on ++ * Reader2 to call up_read() which might be unbound. ++ */ ++ rt_mutex_init_waiter(&waiter, false); ++ ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK, ++ NULL, &waiter); ++ /* ++ * The slowlock() above is guaranteed to return with the rtmutex (for ++ * ret = 0) is now held, so there can't be a writer active. Increment ++ * the reader count and immediately drop the rtmutex again. ++ * For ret != 0 we don't hold the rtmutex and need unlock the wait_lock. ++ * We don't own the lock then. ++ */ ++ if (!ret) ++ atomic_inc(&sem->readers); ++ raw_spin_unlock_irq(&m->wait_lock); ++ if (!ret) ++ __rt_mutex_unlock(m); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++ return ret; ++} ++ ++void __down_read(struct rw_semaphore *sem) ++{ ++ int ret; ++ ++ ret = __down_read_common(sem, TASK_UNINTERRUPTIBLE); ++ WARN_ON_ONCE(ret); ++} ++ ++int __down_read_interruptible(struct rw_semaphore *sem) ++{ ++ int ret; ++ ++ ret = __down_read_common(sem, TASK_INTERRUPTIBLE); ++ if (likely(!ret)) ++ return ret; ++ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); ++ return -EINTR; ++} ++ ++int __down_read_killable(struct rw_semaphore *sem) ++{ ++ int ret; ++ ++ ret = __down_read_common(sem, TASK_KILLABLE); ++ if (likely(!ret)) ++ return ret; ++ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); ++ return -EINTR; ++} ++ ++void __up_read(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ struct task_struct *tsk; ++ ++ /* ++ * sem->readers can only hit 0 when a writer is waiting for the ++ * active readers to leave the critical region. ++ */ ++ if (!atomic_dec_and_test(&sem->readers)) ++ return; ++ ++ raw_spin_lock_irq(&m->wait_lock); ++ /* ++ * Wake the writer, i.e. the rtmutex owner. It might release the ++ * rtmutex concurrently in the fast path (due to a signal), but to ++ * clean up the rwsem it needs to acquire m->wait_lock. The worst ++ * case which can happen is a spurious wakeup. ++ */ ++ tsk = rt_mutex_owner(m); ++ if (tsk) ++ wake_up_process(tsk); ++ ++ raw_spin_unlock_irq(&m->wait_lock); ++} ++ ++static void __up_write_unlock(struct rw_semaphore *sem, int bias, ++ unsigned long flags) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ ++ atomic_add(READER_BIAS - bias, &sem->readers); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ __rt_mutex_unlock(m); ++} ++ ++static int __sched __down_write_common(struct rw_semaphore *sem, int state) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ /* Take the rtmutex as a first step */ ++ if (__rt_mutex_lock_state(m, state)) ++ return -EINTR; ++ ++ /* Force readers into slow path */ ++ atomic_sub(READER_BIAS, &sem->readers); ++ might_sleep(); ++ ++ set_current_state(state); ++ for (;;) { ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ /* Have all readers left the critical region? */ ++ if (!atomic_read(&sem->readers)) { ++ atomic_set(&sem->readers, WRITER_BIAS); ++ __set_current_state(TASK_RUNNING); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return 0; ++ } ++ ++ if (signal_pending_state(state, current)) { ++ __set_current_state(TASK_RUNNING); ++ __up_write_unlock(sem, 0, flags); ++ return -EINTR; ++ } ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ ++ if (atomic_read(&sem->readers) != 0) { ++ schedule(); ++ set_current_state(state); ++ } ++ } ++} ++ ++void __sched __down_write(struct rw_semaphore *sem) ++{ ++ __down_write_common(sem, TASK_UNINTERRUPTIBLE); ++} ++ ++int __sched __down_write_killable(struct rw_semaphore *sem) ++{ ++ return __down_write_common(sem, TASK_KILLABLE); ++} ++ ++int __down_write_trylock(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ if (!__rt_mutex_trylock(m)) ++ return 0; ++ ++ atomic_sub(READER_BIAS, &sem->readers); ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ if (!atomic_read(&sem->readers)) { ++ atomic_set(&sem->readers, WRITER_BIAS); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return 1; ++ } ++ __up_write_unlock(sem, 0, flags); ++ return 0; ++} ++ ++void __up_write(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ __up_write_unlock(sem, WRITER_BIAS, flags); ++} ++ ++void __downgrade_write(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ /* Release it and account current as reader */ ++ __up_write_unlock(sem, WRITER_BIAS - 1, flags); ++} +diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c +index 936f3d14d..e89b70f47 100644 +--- a/kernel/locking/spinlock.c ++++ b/kernel/locking/spinlock.c +@@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c +index 03595c29c..d63df281b 100644 +--- a/kernel/locking/spinlock_debug.c ++++ b/kernel/locking/spinlock_debug.c +@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock) + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif +diff --git a/kernel/panic.c b/kernel/panic.c +index ebdb58e76..82955af2b 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -534,9 +534,11 @@ static u64 oops_id; + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c +index fc6466351..8f90096cb 100644 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -699,6 +699,10 @@ static int load_image_and_restore(void) + return error; + } + ++#ifndef CONFIG_SUSPEND ++bool pm_in_action; ++#endif ++ + /** + * hibernate - Carry out system hibernation, including saving the image. + */ +@@ -712,6 +716,8 @@ int hibernate(void) + return -EPERM; + } + ++ pm_in_action = true; ++ + lock_system_sleep(); + /* The snapshot device should not be opened while we're running */ + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { +@@ -791,6 +797,7 @@ int hibernate(void) + atomic_inc(&snapshot_device_available); + Unlock: + unlock_system_sleep(); ++ pm_in_action = false; + carry_out_hibernation = false; + pr_info("hibernation exit\n"); + +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index 07a32e99a..5b28b0ac8 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -603,6 +603,8 @@ static int enter_state(suspend_state_t state) + return error; + } + ++bool pm_in_action; ++ + /** + * pm_suspend - Externally visible function for suspending the system. + * @state: System sleep state to enter. +@@ -617,6 +619,7 @@ int pm_suspend(suspend_state_t state) + if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) + return -EINVAL; + ++ pm_in_action = true; + pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); + error = enter_state(state); + if (error) { +@@ -626,6 +629,7 @@ int pm_suspend(suspend_state_t state) + suspend_stats.success++; + } + pr_info("suspend exit\n"); ++ pm_in_action = false; + return error; + } + EXPORT_SYMBOL(pm_suspend); +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 0fe45941b..4c90438fc 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -405,6 +405,65 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); + printk_safe_exit_irqrestore(flags); \ + } while (0) + ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++static void early_vprintk(const char *fmt, va_list ap) ++{ ++ if (early_console) { ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ ++ early_console->write(early_console, buf, n); ++ } ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static bool __read_mostly printk_killswitch; ++ ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = true; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ ++void printk_kill(void) ++{ ++ printk_killswitch = true; ++} ++ ++#ifdef CONFIG_PRINTK ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#endif ++ ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; ++} ++#endif ++ + #ifdef CONFIG_PRINTK + DECLARE_WAIT_QUEUE_HEAD(log_wait); + /* the next printk record to read by syslog(READ) or /proc/kmsg */ +@@ -1405,6 +1464,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + u64 next_seq; + u64 seq; + u32 idx; ++ int attempts = 0; ++ int num_msg; + bool time; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); +@@ -1413,6 +1474,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + + time = printk_time; + logbuf_lock_irq(); ++ ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; ++ + /* + * Find first record that fits, including all following records, + * into the user-provided buffer for this dump. +@@ -1425,6 +1495,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + len += msg_print_text(msg, true, time, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1436,6 +1514,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + len -= msg_print_text(msg, true, time, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1468,6 +1554,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + logbuf_unlock_irq(); + + kfree(text); +@@ -1602,6 +1689,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) + return do_syslog(type, buf, len, SYSLOG_FROM_READER); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Special console_lock variants that help to reduce the risk of soft-lockups. + * They allow to pass console_lock to another printk() call using a busy wait. +@@ -1756,6 +1844,15 @@ static int console_trylock_spinning(void) + return 1; + } + ++#else ++ ++static int console_trylock_spinning(void) ++{ ++ return console_trylock(); ++} ++ ++#endif ++ + /* + * Call the console drivers, asking them to write out + * log_buf[start] to log_buf[end - 1]. +@@ -1771,6 +1868,12 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, + if (!console_drivers) + return; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -1786,6 +1889,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, + else + con->write(con, text, len); + } ++ migrate_enable(); + } + + int printk_delay_msec __read_mostly; +@@ -1956,6 +2060,13 @@ asmlinkage int vprintk_emit(int facility, int level, + unsigned long flags; + u64 curr_log_seq; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + if (level == LOGLEVEL_SCHED) { + level = LOGLEVEL_DEFAULT; + in_sched = true; +@@ -1973,20 +2084,30 @@ asmlinkage int vprintk_emit(int facility, int level, + + /* If called from the scheduler, we can not call up(). */ + if (!in_sched && pending_output) { ++ int may_trylock = 1; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * we can't take a sleeping lock with IRQs or preeption disabled ++ * so we can't print in these contexts ++ */ ++ if (!(preempt_count() == 0 && !irqs_disabled())) ++ may_trylock = 0; ++#endif + /* + * Disable preemption to avoid being preempted while holding + * console_sem which would prevent anyone from printing to + * console + */ +- preempt_disable(); ++ migrate_disable(); + /* + * Try to acquire and then immediately release the console + * semaphore. The release will print out buffers and wake up + * /dev/kmsg and syslog() users. + */ +- if (console_trylock_spinning()) ++ if (may_trylock && console_trylock_spinning()) + console_unlock(); +- preempt_enable(); ++ migrate_enable(); + } + + if (pending_output) +@@ -2101,26 +2222,6 @@ static bool suppress_message_printing(int level) { return false; } + + #endif /* CONFIG_PRINTK */ + +-#ifdef CONFIG_EARLY_PRINTK +-struct console *early_console; +- +-asmlinkage __visible void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- char buf[512]; +- int n; +- +- if (!early_console) +- return; +- +- va_start(ap, fmt); +- n = vscnprintf(buf, sizeof(buf), fmt, ap); +- va_end(ap); +- +- early_console->write(early_console, buf, n); +-} +-#endif +- + static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) + { +@@ -2471,6 +2572,10 @@ void console_unlock(void) + console_seq++; + raw_spin_unlock(&logbuf_lock); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ printk_safe_exit_irqrestore(flags); ++ call_console_drivers(ext_text, ext_len, text, len); ++#else + /* + * While actively printing out messages, if another printk() + * were to occur on another CPU, it may wait for this one to +@@ -2489,6 +2594,7 @@ void console_unlock(void) + } + + printk_safe_exit_irqrestore(flags); ++#endif + + if (do_cond_resched) + cond_resched(); +@@ -2536,6 +2642,11 @@ void console_unblank(void) + { + struct console *c; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ + /* + * console_unblank can no longer be called in interrupt context unless + * oops_in_progress is set to 1.. +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index f32095d7a..36ffe1417 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -190,7 +190,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && + !__fatal_signal_pending(task)) { +- task->state = __TASK_TRACED; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ if (task->state & __TASK_TRACED) ++ task->state = __TASK_TRACED; ++ else ++ task->saved_state = __TASK_TRACED; ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); +@@ -200,8 +207,8 @@ static bool ptrace_freeze_traced(struct task_struct *task) + + static void ptrace_unfreeze_traced(struct task_struct *task) + { +- if (task->state != __TASK_TRACED) +- return; ++ unsigned long flags; ++ bool frozen = true; + + WARN_ON(!task->ptrace || task->parent != current); + +@@ -210,12 +217,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task) + * Recheck state under the lock to close this race. + */ + spin_lock_irq(&task->sighand->siglock); +- if (task->state == __TASK_TRACED) { +- if (__fatal_signal_pending(task)) +- wake_up_state(task, __TASK_TRACED); +- else +- task->state = TASK_TRACED; +- } ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ if (task->state == __TASK_TRACED) ++ task->state = TASK_TRACED; ++ else if (task->saved_state == __TASK_TRACED) ++ task->saved_state = TASK_TRACED; ++ else ++ frozen = false; ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ ++ if (frozen && __fatal_signal_pending(task)) ++ wake_up_state(task, __TASK_TRACED); ++ + spin_unlock_irq(&task->sighand->siglock); + } + +diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig +index 9210379c0..a243a78ff 100644 +--- a/kernel/rcu/Kconfig ++++ b/kernel/rcu/Kconfig +@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP && RCU_EXPERT ++ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if +@@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ + + config RCU_BOOST + bool "Enable RCU priority boosting" +- depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT +- default n ++ depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT_FULL ++ default y if PREEMPT_RT_FULL + help + This option boosts the priority of preempted RCU readers that + block the current preemptible RCU grace period for too long. +diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h +index 4d04683c3..808cce9a5 100644 +--- a/kernel/rcu/rcu.h ++++ b/kernel/rcu/rcu.h +@@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads(void) { } + static inline int rcu_get_gp_kthreads_prio(void) { return 0; } + #else /* #ifdef CONFIG_TINY_RCU */ + unsigned long rcu_get_gp_seq(void); +-unsigned long rcu_bh_get_gp_seq(void); + unsigned long rcu_sched_get_gp_seq(void); + unsigned long rcu_exp_batches_completed(void); + unsigned long rcu_exp_batches_completed_sched(void); +@@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp); + void show_rcu_gp_kthreads(void); + int rcu_get_gp_kthreads_prio(void); + void rcu_force_quiescent_state(void); +-void rcu_bh_force_quiescent_state(void); + void rcu_sched_force_quiescent_state(void); + extern struct workqueue_struct *rcu_gp_wq; + extern struct workqueue_struct *rcu_par_gp_wq; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define rcu_bh_get_gp_seq rcu_get_gp_seq ++#define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++#else ++unsigned long rcu_bh_get_gp_seq(void); ++void rcu_bh_force_quiescent_state(void); ++#endif ++ + #endif /* #else #ifdef CONFIG_TINY_RCU */ + + #ifdef CONFIG_RCU_NOCB_CPU +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c +index 0b7af7e2b..e95d121ef 100644 +--- a/kernel/rcu/rcutorture.c ++++ b/kernel/rcu/rcutorture.c +@@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = { + .name = "rcu" + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Definitions for rcu_bh torture testing. + */ +@@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops = { + .name = "rcu_bh" + }; + ++#else ++static struct rcu_torture_ops rcu_bh_ops = { ++ .ttype = INVALID_RCU_FLAVOR, ++}; ++#endif ++ + /* + * Don't even think about trying any of these in real life!!! + * The names includes "busted", and they really means it! +diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c +index a276bbe90..73ec7635b 100644 +--- a/kernel/rcu/srcutree.c ++++ b/kernel/rcu/srcutree.c +@@ -38,6 +38,8 @@ + #include + #include + #include ++#include ++#include + + #include "rcu.h" + #include "rcu_segcblist.h" +@@ -462,21 +464,6 @@ static void srcu_gp_start(struct srcu_struct *sp) + WARN_ON_ONCE(state != SRCU_STATE_SCAN1); + } + +-/* +- * Track online CPUs to guide callback workqueue placement. +- */ +-DEFINE_PER_CPU(bool, srcu_online); +- +-void srcu_online_cpu(unsigned int cpu) +-{ +- WRITE_ONCE(per_cpu(srcu_online, cpu), true); +-} +- +-void srcu_offline_cpu(unsigned int cpu) +-{ +- WRITE_ONCE(per_cpu(srcu_online, cpu), false); +-} +- + /* + * Place the workqueue handler on the specified CPU if online, otherwise + * just run it whereever. This is useful for placing workqueue handlers +@@ -488,12 +475,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + { + bool ret; + +- preempt_disable(); +- if (READ_ONCE(per_cpu(srcu_online, cpu))) ++ cpus_read_lock(); ++ if (cpu_online(cpu)) + ret = queue_delayed_work_on(cpu, wq, dwork, delay); + else + ret = queue_delayed_work(wq, dwork, delay); +- preempt_enable(); ++ cpus_read_unlock(); + return ret; + } + +@@ -776,6 +763,8 @@ static void srcu_flip(struct srcu_struct *sp) + * negligible when amoritized over that time period, and the extra latency + * of a needlessly non-expedited grace period is similarly negligible. + */ ++static DEFINE_LOCAL_IRQ_LOCK(sp_llock); ++ + static bool srcu_might_be_idle(struct srcu_struct *sp) + { + unsigned long curseq; +@@ -785,13 +774,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) + unsigned long tlast; + + /* If the local srcu_data structure has callbacks, not idle. */ +- local_irq_save(flags); ++ local_lock_irqsave(sp_llock, flags); + sdp = this_cpu_ptr(sp->sda); + if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(sp_llock, flags); + return false; /* Callbacks already present, so not idle. */ + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(sp_llock, flags); + + /* + * No local callbacks, so probabalistically probe global state. +@@ -871,7 +860,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, + } + rhp->func = func; + idx = srcu_read_lock(sp); +- local_irq_save(flags); ++ local_lock_irqsave(sp_llock, flags); + sdp = this_cpu_ptr(sp->sda); + spin_lock_rcu_node(sdp); + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); +@@ -887,7 +876,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, + sdp->srcu_gp_seq_needed_exp = s; + needexp = true; + } +- spin_unlock_irqrestore_rcu_node(sdp, flags); ++ spin_unlock_rcu_node(sdp); ++ local_unlock_irqrestore(sp_llock, flags); + if (needgp) + srcu_funnel_gp_start(sp, sdp, s, do_norm); + else if (needexp) +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 594d6ea99..12704e7d3 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -61,6 +61,13 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../time/tick-internal.h" + + #include "tree.h" + #include "rcu.h" +@@ -244,6 +251,19 @@ void rcu_sched_qs(void) + this_cpu_ptr(&rcu_sched_data), true); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(void); ++ ++void rcu_bh_qs(void) ++{ ++ unsigned long flags; ++ ++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ ++ local_irq_save(flags); ++ rcu_preempt_qs(); ++ local_irq_restore(flags); ++} ++#else + void rcu_bh_qs(void) + { + RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); +@@ -254,6 +274,7 @@ void rcu_bh_qs(void) + __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); + } + } ++#endif + + /* + * Steal a bit from the bottom of ->dynticks for idle entry/exit +@@ -568,6 +589,7 @@ unsigned long rcu_sched_get_gp_seq(void) + } + EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU-bh GPs completed thus far for debug & stats. + */ +@@ -576,6 +598,7 @@ unsigned long rcu_bh_get_gp_seq(void) + return READ_ONCE(rcu_bh_state.gp_seq); + } + EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); ++#endif + + /* + * Return the number of RCU expedited batches completed thus far for +@@ -599,6 +622,7 @@ unsigned long rcu_exp_batches_completed_sched(void) + } + EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Force a quiescent state. + */ +@@ -617,6 +641,13 @@ void rcu_bh_force_quiescent_state(void) + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); + ++#else ++void rcu_force_quiescent_state(void) ++{ ++} ++EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); ++#endif ++ + /* + * Force a quiescent state for RCU-sched. + */ +@@ -674,9 +705,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, + case RCU_FLAVOR: + rsp = rcu_state_p; + break; ++#ifndef CONFIG_PREEMPT_RT_FULL + case RCU_BH_FLAVOR: + rsp = &rcu_bh_state; + break; ++#endif + case RCU_SCHED_FLAVOR: + rsp = &rcu_sched_state; + break; +@@ -1263,6 +1296,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && + (rnp->ffmask & rdp->grpmask)) { + init_irq_work(&rdp->rcu_iw, rcu_iw_handler); ++ rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; + rdp->rcu_iw_pending = true; + rdp->rcu_iw_gp_seq = rnp->gp_seq; + irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); +@@ -2869,18 +2903,17 @@ __rcu_process_callbacks(struct rcu_state *rsp) + /* + * Do RCU core processing for the current CPU. + */ +-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) ++static __latent_entropy void rcu_process_callbacks(void) + { + struct rcu_state *rsp; + + if (cpu_is_offline(smp_processor_id())) + return; +- trace_rcu_utilization(TPS("Start RCU core")); + for_each_rcu_flavor(rsp) + __rcu_process_callbacks(rsp); +- trace_rcu_utilization(TPS("End RCU core")); + } + ++static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); + /* + * Schedule RCU callback invocation. If the specified type of RCU + * does not support RCU priority boosting, just do a direct call, +@@ -2892,19 +2925,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) + { + if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) + return; +- if (likely(!rsp->boost)) { +- rcu_do_batch(rsp, rdp); +- return; +- } +- invoke_rcu_callbacks_kthread(); ++ rcu_do_batch(rsp, rdp); + } + ++static void rcu_wake_cond(struct task_struct *t, int status) ++{ ++ /* ++ * If the thread is yielding, only wake it when this ++ * is invoked from idle ++ */ ++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) ++ wake_up_process(t); ++} ++ ++/* ++ * Wake up this CPU's rcuc kthread to do RCU core processing. ++ */ + static void invoke_rcu_core(void) + { +- if (cpu_online(smp_processor_id())) +- raise_softirq(RCU_SOFTIRQ); ++ unsigned long flags; ++ struct task_struct *t; ++ ++ if (!cpu_online(smp_processor_id())) ++ return; ++ local_irq_save(flags); ++ __this_cpu_write(rcu_cpu_has_work, 1); ++ t = __this_cpu_read(rcu_cpu_kthread_task); ++ if (t != NULL && current != t) ++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); ++ local_irq_restore(flags); ++} ++ ++static void rcu_cpu_kthread_park(unsigned int cpu) ++{ ++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; ++} ++ ++static int rcu_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(rcu_cpu_has_work); + } + ++/* ++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the ++ * RCU softirq used in flavors and configurations of RCU that do not ++ * support RCU priority boosting. ++ */ ++static void rcu_cpu_kthread(unsigned int cpu) ++{ ++ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); ++ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); ++ int spincnt; ++ ++ for (spincnt = 0; spincnt < 10; spincnt++) { ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); ++ local_bh_disable(); ++ *statusp = RCU_KTHREAD_RUNNING; ++ this_cpu_inc(rcu_cpu_kthread_loops); ++ local_irq_disable(); ++ work = *workp; ++ *workp = 0; ++ local_irq_enable(); ++ if (work) ++ rcu_process_callbacks(); ++ local_bh_enable(); ++ if (*workp == 0) { ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); ++ *statusp = RCU_KTHREAD_WAITING; ++ return; ++ } ++ } ++ *statusp = RCU_KTHREAD_YIELDING; ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); ++ schedule_timeout_interruptible(2); ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); ++ *statusp = RCU_KTHREAD_WAITING; ++} ++ ++static struct smp_hotplug_thread rcu_cpu_thread_spec = { ++ .store = &rcu_cpu_kthread_task, ++ .thread_should_run = rcu_cpu_kthread_should_run, ++ .thread_fn = rcu_cpu_kthread, ++ .thread_comm = "rcuc/%u", ++ .setup = rcu_cpu_kthread_setup, ++ .park = rcu_cpu_kthread_park, ++}; ++ ++/* ++ * Spawn per-CPU RCU core processing kthreads. ++ */ ++static int __init rcu_spawn_core_kthreads(void) ++{ ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ per_cpu(rcu_cpu_has_work, cpu) = 0; ++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); ++ return 0; ++} ++early_initcall(rcu_spawn_core_kthreads); ++ + /* + * Handle any core-RCU processing required by a call_rcu() invocation. + */ +@@ -3056,6 +3176,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -3083,6 +3204,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) + __call_rcu(head, func, &rcu_bh_state, -1, 0); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /* + * Queue an RCU callback for lazy invocation after a grace period. +@@ -3168,6 +3290,7 @@ void synchronize_sched(void) + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -3194,6 +3317,7 @@ void synchronize_rcu_bh(void) + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + /** + * get_state_synchronize_rcu - Snapshot current RCU state +@@ -3501,6 +3625,7 @@ static void _rcu_barrier(struct rcu_state *rsp) + mutex_unlock(&rsp->barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -3509,6 +3634,7 @@ void rcu_barrier_bh(void) + _rcu_barrier(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. +@@ -3658,8 +3784,6 @@ int rcutree_online_cpu(unsigned int cpu) + rnp->ffmask |= rdp->grpmask; + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } +- if (IS_ENABLED(CONFIG_TREE_SRCU)) +- srcu_online_cpu(cpu); + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) + return 0; /* Too early in boot for scheduler work. */ + sync_sched_exp_online_cleanup(cpu); +@@ -3687,8 +3811,6 @@ int rcutree_offline_cpu(unsigned int cpu) + } + + rcutree_affinity_setting(cpu, cpu); +- if (IS_ENABLED(CONFIG_TREE_SRCU)) +- srcu_offline_cpu(cpu); + return 0; + } + +@@ -4157,12 +4279,13 @@ void __init rcu_init(void) + + rcu_bootup_announce(); + rcu_init_geometry(); ++#ifndef CONFIG_PREEMPT_RT_FULL + rcu_init_one(&rcu_bh_state); ++#endif + rcu_init_one(&rcu_sched_state); + if (dump_tree) + rcu_dump_rcu_node_tree(&rcu_sched_state); + __rcu_init_preempt(); +- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + + /* + * We don't need protection against CPU-hotplug here because +diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h +index 4e74df768..98257d20f 100644 +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavors; + */ + extern struct rcu_state rcu_sched_state; + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern struct rcu_state rcu_bh_state; ++#endif + + #ifdef CONFIG_PREEMPT_RCU + extern struct rcu_state rcu_preempt_state; +@@ -421,12 +423,10 @@ extern struct rcu_state rcu_preempt_state; + + int rcu_dynticks_snap(struct rcu_dynticks *rdtp); + +-#ifdef CONFIG_RCU_BOOST + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); + DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); + DECLARE_PER_CPU(char, rcu_cpu_has_work); +-#endif /* #ifdef CONFIG_RCU_BOOST */ + + #ifndef RCU_TREE_NONCORE + +@@ -449,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, + int ncheck); + static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); + static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); +-static void invoke_rcu_callbacks_kthread(void); + static bool rcu_is_callbacks_kthread(void); ++static void rcu_cpu_kthread_setup(unsigned int cpu); + #ifdef CONFIG_RCU_BOOST + static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp); +diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h +index 72770a551..ac6d6fdf5 100644 +--- a/kernel/rcu/tree_exp.h ++++ b/kernel/rcu/tree_exp.h +@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) + static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, + smp_call_func_t func) + { +- int cpu; + struct rcu_node *rnp; + + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); +@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, + continue; + } + INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); +- preempt_disable(); +- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); +- /* If all offline, queue the work on an unbound CPU. */ +- if (unlikely(cpu > rnp->grphi)) +- cpu = WORK_CPU_UNBOUND; +- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); +- preempt_enable(); ++ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); + rnp->exp_need_flush = true; + } + +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h +index 5f6de49dc..56639c8ed 100644 +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -24,41 +24,16 @@ + * Paul E. McKenney + */ + +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "../time/tick-internal.h" +- +-#ifdef CONFIG_RCU_BOOST +- + #include "../locking/rtmutex_common.h" + + /* + * Control variables for per-CPU and per-rcu_node kthreads. These + * handle all flavors of RCU. + */ +-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); + DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); + DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); + DEFINE_PER_CPU(char, rcu_cpu_has_work); + +-#else /* #ifdef CONFIG_RCU_BOOST */ +- +-/* +- * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, +- * all uses are in dead code. Provide a definition to keep the compiler +- * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. +- * This probably needs to be excluded from -rt builds. +- */ +-#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) +-#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) +- +-#endif /* #else #ifdef CONFIG_RCU_BOOST */ +- + #ifdef CONFIG_RCU_NOCB_CPU + static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ + static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ +@@ -337,9 +312,13 @@ static void rcu_preempt_note_context_switch(bool preempt) + struct task_struct *t = current; + struct rcu_data *rdp; + struct rcu_node *rnp; ++ int sleeping_l = 0; + + lockdep_assert_irqs_disabled(); +- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); ++#if defined(CONFIG_PREEMPT_RT_FULL) ++ sleeping_l = t->sleeping_lock; ++#endif ++ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l); + if (t->rcu_read_lock_nesting > 0 && + !t->rcu_read_unlock_special.b.blocked) { + +@@ -520,7 +499,7 @@ static void rcu_read_unlock_special(struct task_struct *t) + } + + /* Hardware IRQ handlers cannot block, complain if they get here. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + lockdep_rcu_suspicious(__FILE__, __LINE__, + "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); + pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", +@@ -1023,18 +1002,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++/* ++ * If boosting, set rcuc kthreads to realtime priority. ++ */ ++static void rcu_cpu_kthread_setup(unsigned int cpu) ++{ + #ifdef CONFIG_RCU_BOOST ++ struct sched_param sp; + +-static void rcu_wake_cond(struct task_struct *t, int status) +-{ +- /* +- * If the thread is yielding, only wake it when this +- * is invoked from idle +- */ +- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) +- wake_up_process(t); ++ sp.sched_priority = kthread_prio; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++#endif /* #ifdef CONFIG_RCU_BOOST */ + } + ++#ifdef CONFIG_RCU_BOOST ++ + /* + * Carry out RCU priority boosting on the task indicated by ->exp_tasks + * or ->boost_tasks, advancing the pointer to the next task in the +@@ -1172,23 +1154,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) + } + } + +-/* +- * Wake up the per-CPU kthread to invoke RCU callbacks. +- */ +-static void invoke_rcu_callbacks_kthread(void) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- __this_cpu_write(rcu_cpu_has_work, 1); +- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && +- current != __this_cpu_read(rcu_cpu_kthread_task)) { +- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), +- __this_cpu_read(rcu_cpu_kthread_status)); +- } +- local_irq_restore(flags); +-} +- + /* + * Is the current CPU running the RCU-callbacks kthread? + * Caller must have preemption disabled. +@@ -1243,67 +1208,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + return 0; + } + +-static void rcu_kthread_do_work(void) +-{ +- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); +- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); +- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); +-} +- +-static void rcu_cpu_kthread_setup(unsigned int cpu) +-{ +- struct sched_param sp; +- +- sp.sched_priority = kthread_prio; +- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +-} +- +-static void rcu_cpu_kthread_park(unsigned int cpu) +-{ +- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +-} +- +-static int rcu_cpu_kthread_should_run(unsigned int cpu) +-{ +- return __this_cpu_read(rcu_cpu_has_work); +-} +- +-/* +- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the +- * RCU softirq used in flavors and configurations of RCU that do not +- * support RCU priority boosting. +- */ +-static void rcu_cpu_kthread(unsigned int cpu) +-{ +- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); +- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); +- int spincnt; +- +- for (spincnt = 0; spincnt < 10; spincnt++) { +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); +- local_bh_disable(); +- *statusp = RCU_KTHREAD_RUNNING; +- this_cpu_inc(rcu_cpu_kthread_loops); +- local_irq_disable(); +- work = *workp; +- *workp = 0; +- local_irq_enable(); +- if (work) +- rcu_kthread_do_work(); +- local_bh_enable(); +- if (*workp == 0) { +- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); +- *statusp = RCU_KTHREAD_WAITING; +- return; +- } +- } +- *statusp = RCU_KTHREAD_YIELDING; +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); +- schedule_timeout_interruptible(2); +- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); +- *statusp = RCU_KTHREAD_WAITING; +-} +- + /* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still +@@ -1334,26 +1238,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) + free_cpumask_var(cm); + } + +-static struct smp_hotplug_thread rcu_cpu_thread_spec = { +- .store = &rcu_cpu_kthread_task, +- .thread_should_run = rcu_cpu_kthread_should_run, +- .thread_fn = rcu_cpu_kthread, +- .thread_comm = "rcuc/%u", +- .setup = rcu_cpu_kthread_setup, +- .park = rcu_cpu_kthread_park, +-}; +- + /* + * Spawn boost kthreads -- called as soon as the scheduler is running. + */ + static void __init rcu_spawn_boost_kthreads(void) + { + struct rcu_node *rnp; +- int cpu; +- +- for_each_possible_cpu(cpu) +- per_cpu(rcu_cpu_has_work, cpu) = 0; +- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + rcu_for_each_leaf_node(rcu_state_p, rnp) + (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); + } +@@ -1376,11 +1266,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + +-static void invoke_rcu_callbacks_kthread(void) +-{ +- WARN_ON_ONCE(1); +-} +- + static bool rcu_is_callbacks_kthread(void) + { + return false; +@@ -1404,7 +1289,7 @@ static void rcu_prepare_kthreads(int cpu) + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) + + /* + * Check to see if any future RCU-related work will need to be done +@@ -1420,7 +1305,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) + *nextevt = KTIME_MAX; + return rcu_cpu_has_callbacks(NULL); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ + ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up + * after it. +@@ -1517,6 +1404,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) + return cbs_ready; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready + * to invoke. If the CPU has callbacks, try to advance them. Tell the +@@ -1559,6 +1448,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) + *nextevt = basemono + dj * TICK_NSEC; + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Prepare a CPU for idle from an RCU perspective. The first major task +diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c +index 759ea6881..b5ba34afd 100644 +--- a/kernel/rcu/update.c ++++ b/kernel/rcu/update.c +@@ -68,8 +68,10 @@ extern int rcu_expedited; /* from sysctl */ + module_param(rcu_expedited, int, 0); + extern int rcu_normal; /* from sysctl */ + module_param(rcu_normal, int, 0); +-static int rcu_normal_after_boot; ++static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); ++#ifndef CONFIG_PREEMPT_RT_FULL + module_param(rcu_normal_after_boot, int, 0); ++#endif + #endif /* #ifndef CONFIG_TINY_RCU */ + + #ifdef CONFIG_DEBUG_LOCK_ALLOC +@@ -288,6 +290,7 @@ int rcu_read_lock_held(void) + } + EXPORT_SYMBOL_GPL(rcu_read_lock_held); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -314,6 +317,7 @@ int rcu_read_lock_bh_held(void) + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index 7fe183404..2b765aa4e 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -18,7 +18,7 @@ endif + + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle.o fair.o rt.o deadline.o +-obj-y += wait.o wait_bit.o swait.o completion.o ++obj-y += wait.o wait_bit.o swait.o swork.o completion.o + + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o +diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c +index a1ad5b7d5..49c141379 100644 +--- a/kernel/sched/completion.c ++++ b/kernel/sched/completion.c +@@ -29,12 +29,12 @@ void complete(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + + if (x->done != UINT_MAX) + x->done++; +- __wake_up_locked(&x->wait, TASK_NORMAL, 1); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -58,10 +58,10 @@ void complete_all(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done = UINT_MAX; +- __wake_up_locked(&x->wait, TASK_NORMAL, 0); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_all_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -70,20 +70,20 @@ do_wait_for_common(struct completion *x, + long (*action)(long), long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DECLARE_SWAITQUEUE(wait); + +- __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } ++ __prepare_to_swait(&x->wait, &wait); + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = action(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ __finish_swait(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -100,9 +100,9 @@ __wait_for_common(struct completion *x, + + complete_acquire(x); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, action, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + + complete_release(x); + +@@ -291,12 +291,12 @@ bool try_wait_for_completion(struct completion *x) + if (!READ_ONCE(x->done)) + return false; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = false; + else if (x->done != UINT_MAX) + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -322,8 +322,8 @@ bool completion_done(struct completion *x) + * otherwise we can end up freeing the completion before complete() + * is done referencing it. + */ +- spin_lock_irqsave(&x->wait.lock, flags); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return true; + } + EXPORT_SYMBOL(completion_done); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 496ce71f9..c6dd1cf96 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -45,7 +45,11 @@ const_debug unsigned int sysctl_sched_features = + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#else + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#endif + + /* + * period over which we measure -rt task CPU usage in us. +@@ -329,7 +333,7 @@ static void hrtick_rq_init(struct rq *rq) + rq->hrtick_csd.info = rq; + #endif + +- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + rq->hrtick_timer.function = hrtick; + } + #else /* CONFIG_SCHED_HRTICK */ +@@ -423,9 +427,16 @@ static bool set_nr_if_polling(struct task_struct *p) + * This function must be used as-if it were wake_up_process(); IOW the task + * must be ready to be woken at this location. + */ +-void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++void __wake_q_add(struct wake_q_head *head, struct task_struct *task, ++ bool sleeper) ++ + { +- struct wake_q_node *node = &task->wake_q; ++ struct wake_q_node *node; ++ ++ if (sleeper) ++ node = &task->wake_q_sleeper; ++ else ++ node = &task->wake_q; + + /* + * Atomically grab the task, if ->wake_q is !nil already it means +@@ -448,24 +459,32 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) + head->lastp = &node->next; + } + +-void wake_up_q(struct wake_q_head *head) ++void __wake_up_q(struct wake_q_head *head, bool sleeper) + { + struct wake_q_node *node = head->first; + + while (node != WAKE_Q_TAIL) { + struct task_struct *task; + +- task = container_of(node, struct task_struct, wake_q); ++ if (sleeper) ++ task = container_of(node, struct task_struct, wake_q_sleeper); ++ else ++ task = container_of(node, struct task_struct, wake_q); + BUG_ON(!task); + /* Task can safely be re-inserted now: */ + node = node->next; +- task->wake_q.next = NULL; +- ++ if (sleeper) ++ task->wake_q_sleeper.next = NULL; ++ else ++ task->wake_q.next = NULL; + /* + * wake_up_process() executes a full barrier, which pairs with + * the queueing in wake_q_add() so as not to miss wakeups. + */ +- wake_up_process(task); ++ if (sleeper) ++ wake_up_lock_sleeper(task); ++ else ++ wake_up_process(task); + put_task_struct(task); + } + } +@@ -501,6 +520,48 @@ void resched_curr(struct rq *rq) + trace_sched_wake_idle_without_ipi(cpu); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++ ++static int tsk_is_polling(struct task_struct *p) ++{ ++#ifdef TIF_POLLING_NRFLAG ++ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); ++#else ++ return 0; ++#endif ++} ++ ++void resched_curr_lazy(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_curr(rq); ++ return; ++ } ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ if (test_tsk_need_resched_lazy(curr)) ++ return; ++ ++ set_tsk_need_resched_lazy(curr); ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED_LAZY must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(curr)) ++ smp_send_reschedule(cpu); ++} ++#endif ++ + void resched_cpu(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -904,10 +965,10 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) + */ + static inline bool is_cpu_allowed(struct task_struct *p, int cpu) + { +- if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) + return false; + +- if (is_per_cpu_kthread(p)) ++ if (is_per_cpu_kthread(p) || __migrate_disabled(p)) + return cpu_online(cpu); + + return cpu_active(cpu); +@@ -956,6 +1017,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, + struct migration_arg { + struct task_struct *task; + int dest_cpu; ++ bool done; + }; + + /* +@@ -991,6 +1053,11 @@ static int migration_cpu_stop(void *data) + struct task_struct *p = arg->task; + struct rq *rq = this_rq(); + struct rq_flags rf; ++ int dest_cpu = arg->dest_cpu; ++ ++ /* We don't look at arg after this point. */ ++ smp_mb(); ++ arg->done = true; + + /* + * The original target CPU might have gone down and we might +@@ -999,7 +1066,7 @@ static int migration_cpu_stop(void *data) + local_irq_disable(); + /* + * We need to explicitly wake pending tasks before running +- * __migrate_task() such that we will not miss enforcing cpus_allowed ++ * __migrate_task() such that we will not miss enforcing cpus_ptr + * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. + */ + sched_ttwu_pending(); +@@ -1013,9 +1080,9 @@ static int migration_cpu_stop(void *data) + */ + if (task_rq(p) == rq) { + if (task_on_rq_queued(p)) +- rq = __migrate_task(rq, &rf, p, arg->dest_cpu); ++ rq = __migrate_task(rq, &rf, p, dest_cpu); + else +- p->wake_cpu = arg->dest_cpu; ++ p->wake_cpu = dest_cpu; + } + rq_unlock(rq, &rf); + raw_spin_unlock(&p->pi_lock); +@@ -1030,9 +1097,18 @@ static int migration_cpu_stop(void *data) + */ + void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) + { +- cpumask_copy(&p->cpus_allowed, new_mask); +- p->nr_cpus_allowed = cpumask_weight(new_mask); ++ cpumask_copy(&p->cpus_mask, new_mask); ++ if (p->cpus_ptr == &p->cpus_mask) ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++int __migrate_disabled(struct task_struct *p) ++{ ++ return p->migrate_disable; + } ++EXPORT_SYMBOL_GPL(__migrate_disabled); ++#endif + + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +@@ -1100,7 +1176,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, + goto out; + } + +- if (cpumask_equal(&p->cpus_allowed, new_mask)) ++ if (cpumask_equal(&p->cpus_mask, new_mask)) + goto out; + + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); +@@ -1122,7 +1198,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, + } + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || ++ p->cpus_ptr != &p->cpus_mask) + goto out; + + if (task_running(rq, p) || p->state == TASK_WAKING) { +@@ -1263,10 +1340,10 @@ static int migrate_swap_stop(void *data) + if (task_cpu(arg->src_task) != arg->src_cpu) + goto unlock; + +- if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) ++ if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) + goto unlock; + +- if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) ++ if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) + goto unlock; + + __migrate_swap_task(arg->src_task, arg->dst_cpu); +@@ -1308,10 +1385,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, + if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) + goto out; + +- if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) ++ if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) + goto out; + +- if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) ++ if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) + goto out; + + trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); +@@ -1322,6 +1399,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, + } + #endif /* CONFIG_NUMA_BALANCING */ + ++static bool check_task_state(struct task_struct *p, long match_state) ++{ ++ bool match = false; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ if (p->state == match_state || p->saved_state == match_state) ++ match = true; ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ return match; ++} ++ + /* + * wait_task_inactive - wait for a thread to unschedule. + * +@@ -1366,7 +1455,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) + * is actually now running somewhere else! + */ + while (task_running(rq, p)) { +- if (match_state && unlikely(p->state != match_state)) ++ if (match_state && !check_task_state(p, match_state)) + return 0; + cpu_relax(); + } +@@ -1381,7 +1470,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) + running = task_running(rq, p); + queued = task_on_rq_queued(p); + ncsw = 0; +- if (!match_state || p->state == match_state) ++ if (!match_state || p->state == match_state || ++ p->saved_state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_rq_unlock(rq, p, &rf); + +@@ -1456,7 +1546,7 @@ void kick_process(struct task_struct *p) + EXPORT_SYMBOL_GPL(kick_process); + + /* +- * ->cpus_allowed is protected by both rq->lock and p->pi_lock ++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock + * + * A few notes on cpu_active vs cpu_online: + * +@@ -1496,14 +1586,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + for_each_cpu(dest_cpu, nodemask) { + if (!cpu_active(dest_cpu)) + continue; +- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) ++ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) + return dest_cpu; + } + } + + for (;;) { + /* Any allowed, online CPU? */ +- for_each_cpu(dest_cpu, &p->cpus_allowed) { ++ for_each_cpu(dest_cpu, p->cpus_ptr) { + if (!is_cpu_allowed(p, dest_cpu)) + continue; + +@@ -1547,7 +1637,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + } + + /* +- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. ++ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. + */ + static inline + int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -1557,11 +1647,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) + if (p->nr_cpus_allowed > 1) + cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); + else +- cpu = cpumask_any(&p->cpus_allowed); ++ cpu = cpumask_any(p->cpus_ptr); + + /* + * In order not to call set_task_cpu() on a blocking task we need +- * to rely on ttwu() to place the task on a valid ->cpus_allowed ++ * to rely on ttwu() to place the task on a valid ->cpus_ptr + * CPU. + * + * Since this is common to all placement strategies, this lives here. +@@ -1985,8 +2075,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) + */ + raw_spin_lock_irqsave(&p->pi_lock, flags); + smp_mb__after_spinlock(); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) { ++ p->saved_state = TASK_RUNNING; ++ success = 1; ++ } ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + trace_sched_waking(p); + +@@ -2100,6 +2209,18 @@ int wake_up_process(struct task_struct *p) + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +@@ -2349,6 +2470,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + p->on_cpu = 0; + #endif + init_task_preempt_count(p); ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(p)->preempt_lazy_count = 0; ++#endif + #ifdef CONFIG_SMP + plist_node_init(&p->pushable_tasks, MAX_PRIO); + RB_CLEAR_NODE(&p->pushable_dl_tasks); +@@ -2413,7 +2537,7 @@ void wake_up_new_task(struct task_struct *p) + #ifdef CONFIG_SMP + /* + * Fork balancing, do it here and not earlier because: +- * - cpus_allowed can change in the fork path ++ * - cpus_ptr can change in the fork path + * - any previously selected CPU might disappear through hotplug + * + * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, +@@ -2702,23 +2826,18 @@ static struct rq *finish_task_switch(struct task_struct *prev) + * provided by mmdrop(), + * - a sync_core for SYNC_CORE. + */ ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) { + membarrier_mm_sync_core_before_usermode(mm); +- mmdrop(mm); ++ mmdrop_delayed(mm); + } + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); + +- /* +- * Remove function-return probe instances associated with this +- * task and put them back on the free list. +- */ +- kprobe_flush_task(prev); +- +- /* Task is done with its stack. */ +- put_task_stack(prev); +- + put_task_struct_rcu_user(prev); + } + +@@ -3406,6 +3525,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) + BUG(); + } + ++static void migrate_disabled_sched(struct task_struct *p); ++ + /* + * __schedule() is the main scheduler function. + * +@@ -3476,6 +3597,9 @@ static void __sched notrace __schedule(bool preempt) + rq_lock(rq, &rf); + smp_mb__after_spinlock(); + ++ if (__migrate_disabled(prev)) ++ migrate_disabled_sched(prev); ++ + /* Promote REQ to ACT */ + rq->clock_update_flags <<= 1; + update_rq_clock(rq); +@@ -3498,6 +3622,7 @@ static void __sched notrace __schedule(bool preempt) + + next = pick_next_task(rq, prev, &rf); + clear_tsk_need_resched(prev); ++ clear_tsk_need_resched_lazy(prev); + clear_preempt_need_resched(); + + if (likely(prev != next)) { +@@ -3553,7 +3678,7 @@ void __noreturn do_task_dead(void) + + static inline void sched_submit_work(struct task_struct *tsk) + { +- if (!tsk->state || tsk_is_pi_blocked(tsk)) ++ if (!tsk->state) + return; + + /* +@@ -3572,6 +3697,9 @@ static inline void sched_submit_work(struct task_struct *tsk) + preempt_enable_no_resched(); + } + ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. +@@ -3689,6 +3817,30 @@ static void __sched notrace preempt_schedule_common(void) + } while (need_resched()); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++/* ++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is ++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as ++ * preempt_lazy_count counter >0. ++ */ ++static __always_inline int preemptible_lazy(void) ++{ ++ if (test_thread_flag(TIF_NEED_RESCHED)) ++ return 1; ++ if (current_thread_info()->preempt_lazy_count) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static inline int preemptible_lazy(void) ++{ ++ return 1; ++} ++ ++#endif ++ + #ifdef CONFIG_PREEMPT + /* + * this is the entry point to schedule() from in-kernel preemption +@@ -3703,7 +3855,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) + */ + if (likely(!preemptible())) + return; +- ++ if (!preemptible_lazy()) ++ return; + preempt_schedule_common(); + } + NOKPROBE_SYMBOL(preempt_schedule); +@@ -3730,6 +3883,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) + if (likely(!preemptible())) + return; + ++ if (!preemptible_lazy()) ++ return; ++ + do { + /* + * Because the function tracer can trace preempt_count_sub() +@@ -4371,7 +4527,7 @@ static int __sched_setscheduler(struct task_struct *p, + * the entire root_domain to become SCHED_DEADLINE. We + * will also fail if there's no bandwidth available. + */ +- if (!cpumask_subset(span, &p->cpus_allowed) || ++ if (!cpumask_subset(span, p->cpus_ptr) || + rq->rd->dl_bw.bw == 0) { + task_rq_unlock(rq, p, &rf); + return -EPERM; +@@ -4970,7 +5126,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) + goto out_unlock; + + raw_spin_lock_irqsave(&p->pi_lock, flags); +- cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); ++ cpumask_and(mask, &p->cpus_mask, cpu_active_mask); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + out_unlock: +@@ -5509,7 +5665,9 @@ void init_idle(struct task_struct *idle, int cpu) + + /* Set the preempt count _outside_ the spinlocks! */ + init_idle_preempt_count(idle, cpu); +- ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(idle)->preempt_lazy_count = 0; ++#endif + /* + * The idle tasks have their own, simple scheduling class: + */ +@@ -5548,7 +5706,7 @@ int task_can_attach(struct task_struct *p, + * allowed nodes is unnecessary. Thus, cpusets are not + * applicable for such threads. This prevents checking for + * success of set_cpus_allowed_ptr() on all attached tasks +- * before cpus_allowed may be changed. ++ * before cpus_mask may be changed. + */ + if (p->flags & PF_NO_SETAFFINITY) { + ret = -EINVAL; +@@ -5575,7 +5733,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) + if (curr_cpu == target_cpu) + return 0; + +- if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) ++ if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) + return -EINVAL; + + /* TODO: This is not properly updating schedstats */ +@@ -5614,6 +5772,7 @@ void sched_setnuma(struct task_struct *p, int nid) + #endif /* CONFIG_NUMA_BALANCING */ + + #ifdef CONFIG_HOTPLUG_CPU ++ + /* + * Ensure that the idle task is using init_mm right before its CPU goes + * offline. +@@ -5713,8 +5872,10 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) + BUG_ON(!next); + put_prev_task(rq, next); + ++ WARN_ON_ONCE(__migrate_disabled(next)); ++ + /* +- * Rules for changing task_struct::cpus_allowed are holding ++ * Rules for changing task_struct::cpus_mask are holding + * both pi_lock and rq->lock, such that holding either + * stabilizes the mask. + * +@@ -6193,7 +6354,7 @@ void __init sched_init(void) + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = preempt_count() + rcu_preempt_depth(); ++ int nested = preempt_count() + sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } +@@ -7310,3 +7471,171 @@ const u32 sched_prio_to_wmult[40] = { + }; + + #undef CREATE_TRACE_POINTS ++ ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ ++static inline void ++update_nr_migratory(struct task_struct *p, long delta) ++{ ++ if (unlikely((p->sched_class == &rt_sched_class || ++ p->sched_class == &dl_sched_class) && ++ p->nr_cpus_allowed > 1)) { ++ if (p->sched_class == &rt_sched_class) ++ task_rq(p)->rt.rt_nr_migratory += delta; ++ else ++ task_rq(p)->dl.dl_nr_migratory += delta; ++ } ++} ++ ++static inline void ++migrate_disable_update_cpus_allowed(struct task_struct *p) ++{ ++ p->cpus_ptr = cpumask_of(smp_processor_id()); ++ update_nr_migratory(p, -1); ++ p->nr_cpus_allowed = 1; ++} ++ ++static inline void ++migrate_enable_update_cpus_allowed(struct task_struct *p) ++{ ++ struct rq *rq; ++ struct rq_flags rf; ++ ++ rq = task_rq_lock(p, &rf); ++ p->cpus_ptr = &p->cpus_mask; ++ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); ++ update_nr_migratory(p, 1); ++ task_rq_unlock(rq, p, &rf); ++} ++ ++void migrate_disable(void) ++{ ++ preempt_disable(); ++ ++ if (++current->migrate_disable == 1) { ++ this_rq()->nr_pinned++; ++ preempt_lazy_disable(); ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(current->pinned_on_cpu >= 0); ++ current->pinned_on_cpu = smp_processor_id(); ++#endif ++ } ++ ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++ if (p->migrate_disable_scheduled) ++ return; ++ ++ migrate_disable_update_cpus_allowed(p); ++ p->migrate_disable_scheduled = 1; ++} ++ ++static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work); ++static DEFINE_PER_CPU(struct migration_arg, migrate_arg); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ struct rq *rq = this_rq(); ++ int cpu = task_cpu(p); ++ ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ if (p->migrate_disable > 1) { ++ p->migrate_disable--; ++ return; ++ } ++ ++ preempt_disable(); ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(current->pinned_on_cpu != cpu); ++ current->pinned_on_cpu = -1; ++#endif ++ ++ WARN_ON_ONCE(rq->nr_pinned < 1); ++ ++ p->migrate_disable = 0; ++ rq->nr_pinned--; ++#ifdef CONFIG_HOTPLUG_CPU ++ if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && ++ takedown_cpu_task) ++ wake_up_process(takedown_cpu_task); ++#endif ++ ++ if (!p->migrate_disable_scheduled) ++ goto out; ++ ++ p->migrate_disable_scheduled = 0; ++ ++ migrate_enable_update_cpus_allowed(p); ++ ++ WARN_ON(smp_processor_id() != cpu); ++ if (!is_cpu_allowed(p, cpu)) { ++ struct migration_arg __percpu *arg; ++ struct cpu_stop_work __percpu *work; ++ struct rq_flags rf; ++ ++ work = this_cpu_ptr(&migrate_work); ++ arg = this_cpu_ptr(&migrate_arg); ++ WARN_ON_ONCE(!arg->done && !work->disabled && work->arg); ++ ++ arg->task = p; ++ arg->done = false; ++ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ arg->dest_cpu = select_fallback_rq(cpu, p); ++ task_rq_unlock(rq, p, &rf); ++ ++ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, ++ arg, work); ++ tlb_migrate_finish(p->mm); ++ } ++ ++out: ++ preempt_lazy_enable(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++ ++int cpu_nr_pinned(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ return rq->nr_pinned; ++} ++ ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++} ++ ++void migrate_disable(void) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ current->migrate_disable++; ++#endif ++ barrier(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ struct task_struct *p = current; ++ ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ p->migrate_disable--; ++#endif ++ barrier(); ++} ++EXPORT_SYMBOL(migrate_enable); ++#else ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++} ++#endif +diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c +index 50316455e..d57fb2f8a 100644 +--- a/kernel/sched/cpudeadline.c ++++ b/kernel/sched/cpudeadline.c +@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, + const struct sched_dl_entity *dl_se = &p->dl; + + if (later_mask && +- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { ++ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { + return 1; + } else { + int best_cpu = cpudl_maximum(cp); + + WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); + +- if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && ++ if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && + dl_time_before(dl_se->deadline, cp->elements[0].dl)) { + if (later_mask) + cpumask_set_cpu(best_cpu, later_mask); +diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c +index daaadf939..f7d2c10b4 100644 +--- a/kernel/sched/cpupri.c ++++ b/kernel/sched/cpupri.c +@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, + if (skip) + continue; + +- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) ++ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) + continue; + + if (lowest_mask) { +- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); ++ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); + + /* + * We have to ensure that we have at least one bit +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 6c4f93af1..a074b84ed 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p) + + dl_se->dl_non_contending = 1; + get_task_struct(p); +- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL); ++ hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); + } + + static void task_contending(struct sched_dl_entity *dl_se, int flags) +@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p + * If we cannot preempt any rq, fall back to pick any + * online CPU: + */ +- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); ++ cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); + if (cpu >= nr_cpu_ids) { + /* + * Failed to find any suitable CPU. +@@ -1086,7 +1086,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) + { + struct hrtimer *timer = &dl_se->dl_timer; + +- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + timer->function = dl_task_timer; + } + +@@ -1325,7 +1325,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) + { + struct hrtimer *timer = &dl_se->inactive_timer; + +- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + timer->function = inactive_task_timer; + } + +@@ -1857,7 +1857,7 @@ static void set_curr_task_dl(struct rq *rq) + static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) + { + if (!task_running(rq, p) && +- cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ cpumask_test_cpu(cpu, p->cpus_ptr)) + return 1; + return 0; + } +@@ -2035,7 +2035,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) + */ + next_task = pick_next_pushable_dl_task(rq); + if (unlikely(next_task != task || +- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed))) { ++ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr))) { + double_unlock_balance(rq, later_rq); + later_rq = NULL; + break; +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index fcf2a07ec..f5c76fabb 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -1027,6 +1027,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + P(dl.runtime); + P(dl.deadline); + } ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ P(migrate_disable); ++#endif ++ P(nr_cpus_allowed); + #undef PN_SCHEDSTAT + #undef PN + #undef __PN +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 52029f3a7..2fd091c24 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -1705,7 +1705,7 @@ static void task_numa_compare(struct task_numa_env *env, + * be incurred if the tasks were swapped. + */ + /* Skip this swap candidate if cannot move to the source cpu */ +- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) ++ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) + goto unlock; + + /* +@@ -1803,7 +1803,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, + + for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { + /* Skip this CPU if the source task cannot migrate */ +- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) ++ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) + continue; + + env->dst_cpu = cpu; +@@ -4188,7 +4188,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. +@@ -4212,7 +4212,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) + return; + + if (delta > ideal_runtime) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static void +@@ -4354,7 +4354,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) + * validating it and just reschedule. + */ + if (queued) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + return; + } + /* +@@ -4488,7 +4488,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static __always_inline +@@ -5213,7 +5213,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) + + if (delta < 0) { + if (rq->curr == p) +- resched_curr(rq); ++ resched_curr_lazy(rq); + return; + } + hrtick_start(rq, delta); +@@ -5927,7 +5927,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, + + /* Skip over this group if it has no CPUs allowed */ + if (!cpumask_intersects(sched_group_span(group), +- &p->cpus_allowed)) ++ p->cpus_ptr)) + continue; + + local_group = cpumask_test_cpu(this_cpu, +@@ -6059,7 +6059,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this + return cpumask_first(sched_group_span(group)); + + /* Traverse only the allowed CPUs */ +- for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { ++ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { + if (sched_idle_cpu(i)) + return i; + +@@ -6102,7 +6102,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p + { + int new_cpu = cpu; + +- if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) ++ if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) + return prev_cpu; + + /* +@@ -6219,7 +6219,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int + if (!test_idle_cores(target, false)) + return -1; + +- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); ++ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); + + for_each_cpu_wrap(core, cpus, target) { + bool idle = true; +@@ -6253,7 +6253,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t + return -1; + + for_each_cpu(cpu, cpu_smt_mask(target)) { +- if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) + continue; + if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) + return cpu; +@@ -6314,7 +6314,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t + + time = local_clock(); + +- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); ++ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); + + for_each_cpu_wrap(cpu, cpus, target) { + if (!--nr) +@@ -6373,7 +6373,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) + recent_used_cpu != target && + cpus_share_cache(recent_used_cpu, target) && + (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && +- cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { ++ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { + /* + * Replace recent_used_cpu with prev as it is a potential + * candidate for the next wake: +@@ -6604,7 +6604,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f + if (sd_flag & SD_BALANCE_WAKE) { + record_wakee(p); + want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) +- && cpumask_test_cpu(cpu, &p->cpus_allowed); ++ && cpumask_test_cpu(cpu, p->cpus_ptr); + } + + rcu_read_lock(); +@@ -6866,7 +6866,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ + return; + + preempt: +- resched_curr(rq); ++ resched_curr_lazy(rq); + /* + * Only set the backward buddy when the current task is still + * on the rq. This can happen when a wakeup gets interleaved +@@ -7569,14 +7569,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) + /* + * We do not migrate tasks that are: + * 1) throttled_lb_pair, or +- * 2) cannot be migrated to this CPU due to cpus_allowed, or ++ * 2) cannot be migrated to this CPU due to cpus_ptr, or + * 3) running (obviously), or + * 4) are cache-hot on their current CPU. + */ + if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) + return 0; + +- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { ++ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { + int cpu; + + schedstat_inc(p->se.statistics.nr_failed_migrations_affine); +@@ -7596,7 +7596,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) + + /* Prevent to re-select dst_cpu via env's CPUs: */ + for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { +- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { ++ if (cpumask_test_cpu(cpu, p->cpus_ptr)) { + env->flags |= LBF_DST_PINNED; + env->new_dst_cpu = cpu; + break; +@@ -7653,7 +7653,7 @@ can_migrate_task_llc(struct task_struct *p, struct rq *rq, struct rq *dst_rq) + if (throttled_lb_pair(task_group(p), cpu_of(rq), dst_cpu)) + return false; + +- if (!cpumask_test_cpu(dst_cpu, &p->cpus_allowed)) { ++ if (!cpumask_test_cpu(dst_cpu, p->cpus_ptr)) { + schedstat_inc(p->se.statistics.nr_failed_migrations_affine); + return false; + } +@@ -8251,7 +8251,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) + + /* + * Group imbalance indicates (and tries to solve) the problem where balancing +- * groups is inadequate due to ->cpus_allowed constraints. ++ * groups is inadequate due to ->cpus_ptr constraints. + * + * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a + * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. +@@ -8866,7 +8866,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) + /* + * If the busiest group is imbalanced the below checks don't + * work because they assume all things are equal, which typically +- * isn't true due to cpus_allowed constraints and the like. ++ * isn't true due to cpus_ptr constraints and the like. + */ + if (busiest->group_type == group_imbalanced) + goto force_balance; +@@ -9262,7 +9262,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, + * if the curr task on busiest CPU can't be + * moved to this_cpu: + */ +- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { ++ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { + raw_spin_unlock_irqrestore(&busiest->lock, + flags); + env.flags |= LBF_ALL_PINNED; +@@ -10397,7 +10397,7 @@ static void task_fork_fair(struct task_struct *p) + * 'current' within the tree based on its new key value. + */ + swap(curr->vruntime, se->vruntime); +- resched_curr(rq); ++ resched_curr_lazy(rq); + } + + se->vruntime -= cfs_rq->min_vruntime; +@@ -10421,7 +10421,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) + */ + if (rq->curr == p) { + if (p->prio > oldprio) +- resched_curr(rq); ++ resched_curr_lazy(rq); + } else + check_preempt_curr(rq, p, 0); + } +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index 515bfbcc6..550e236f4 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -46,11 +46,19 @@ SCHED_FEAT(LB_BIAS, true) + */ + SCHED_FEAT(NONTASK_CAPACITY, true) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++SCHED_FEAT(TTWU_QUEUE, false) ++# ifdef CONFIG_PREEMPT_LAZY ++SCHED_FEAT(PREEMPT_LAZY, true) ++# endif ++#else ++ + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, true) ++#endif + + /* + * When doing wakeups, attempt to limit superfluous scans of the LLC domain. +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 301ba04d9..cefd42d6d 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) + + raw_spin_lock_init(&rt_b->rt_runtime_lock); + +- hrtimer_init(&rt_b->rt_period_timer, +- CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, ++ HRTIMER_MODE_REL_HARD); + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +@@ -1619,7 +1619,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) + static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) + { + if (!task_running(rq, p) && +- cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ cpumask_test_cpu(cpu, p->cpus_ptr)) + return 1; + + return 0; +@@ -1779,7 +1779,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) + struct task_struct *next_task = pick_next_pushable_task(rq); + if (unlikely(next_task != task || + !rt_task(task) || +- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed))) { ++ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) { + + double_unlock_balance(rq, lowest_rq); + lowest_rq = NULL; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 1aaff1aa8..17195d82e 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -966,6 +966,9 @@ struct rq { + struct cpuidle_state *idle_state; + #endif + ++#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP) ++ int nr_pinned; ++#endif + KABI_RESERVE(1) + KABI_RESERVE(2) + }; +@@ -1518,6 +1521,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) + #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ + #define WF_FORK 0x02 /* Child wakeup after fork */ + #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + /* + * To aid in avoiding the subversion of "niceness" due to uneven distribution +@@ -1715,6 +1719,15 @@ extern void reweight_task(struct task_struct *p, int prio); + extern void resched_curr(struct rq *rq); + extern void resched_cpu(int cpu); + ++#ifdef CONFIG_PREEMPT_LAZY ++extern void resched_curr_lazy(struct rq *rq); ++#else ++static inline void resched_curr_lazy(struct rq *rq) ++{ ++ resched_curr(rq); ++} ++#endif ++ + extern struct rt_bandwidth def_rt_bandwidth; + extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); + +diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c +index 66b59ac77..119a56d7f 100644 +--- a/kernel/sched/swait.c ++++ b/kernel/sched/swait.c +@@ -32,6 +32,25 @@ void swake_up_locked(struct swait_queue_head *q) + } + EXPORT_SYMBOL(swake_up_locked); + ++void swake_up_all_locked(struct swait_queue_head *q) ++{ ++ struct swait_queue *curr; ++ int wakes = 0; ++ ++ while (!list_empty(&q->task_list)) { ++ ++ curr = list_first_entry(&q->task_list, typeof(*curr), ++ task_list); ++ wake_up_process(curr->task); ++ list_del_init(&curr->task_list); ++ wakes++; ++ } ++ if (pm_in_action) ++ return; ++ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); ++} ++EXPORT_SYMBOL(swake_up_all_locked); ++ + void swake_up_one(struct swait_queue_head *q) + { + unsigned long flags; +@@ -51,6 +70,7 @@ void swake_up_all(struct swait_queue_head *q) + struct swait_queue *curr; + LIST_HEAD(tmp); + ++ WARN_ON(irqs_disabled()); + raw_spin_lock_irq(&q->lock); + list_splice_init(&q->task_list, &tmp); + while (!list_empty(&tmp)) { +@@ -69,7 +89,7 @@ void swake_up_all(struct swait_queue_head *q) + } + EXPORT_SYMBOL(swake_up_all); + +-static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) + { + wait->task = current; + if (list_empty(&wait->task_list)) +diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c +new file mode 100644 +index 000000000..c90d14b9b +--- /dev/null ++++ b/kernel/sched/swork.c +@@ -0,0 +1,173 @@ ++/* ++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de ++ * ++ * Provides a framework for enqueuing callbacks from irq context ++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SWORK_EVENT_PENDING 1 ++ ++static DEFINE_MUTEX(worker_mutex); ++static struct sworker *glob_worker; ++ ++struct sworker { ++ struct list_head events; ++ struct swait_queue_head wq; ++ ++ raw_spinlock_t lock; ++ ++ struct task_struct *task; ++ int refs; ++}; ++ ++static bool swork_readable(struct sworker *worker) ++{ ++ bool r; ++ ++ if (kthread_should_stop()) ++ return true; ++ ++ raw_spin_lock_irq(&worker->lock); ++ r = !list_empty(&worker->events); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ return r; ++} ++ ++static int swork_kthread(void *arg) ++{ ++ struct sworker *worker = arg; ++ ++ for (;;) { ++ swait_event_interruptible_exclusive(worker->wq, ++ swork_readable(worker)); ++ if (kthread_should_stop()) ++ break; ++ ++ raw_spin_lock_irq(&worker->lock); ++ while (!list_empty(&worker->events)) { ++ struct swork_event *sev; ++ ++ sev = list_first_entry(&worker->events, ++ struct swork_event, item); ++ list_del(&sev->item); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, ++ &sev->flags)); ++ sev->func(sev); ++ raw_spin_lock_irq(&worker->lock); ++ } ++ raw_spin_unlock_irq(&worker->lock); ++ } ++ return 0; ++} ++ ++static struct sworker *swork_create(void) ++{ ++ struct sworker *worker; ++ ++ worker = kzalloc(sizeof(*worker), GFP_KERNEL); ++ if (!worker) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD(&worker->events); ++ raw_spin_lock_init(&worker->lock); ++ init_swait_queue_head(&worker->wq); ++ ++ worker->task = kthread_run(swork_kthread, worker, "kswork"); ++ if (IS_ERR(worker->task)) { ++ kfree(worker); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ return worker; ++} ++ ++static void swork_destroy(struct sworker *worker) ++{ ++ kthread_stop(worker->task); ++ ++ WARN_ON(!list_empty(&worker->events)); ++ kfree(worker); ++} ++ ++/** ++ * swork_queue - queue swork ++ * ++ * Returns %false if @work was already on a queue, %true otherwise. ++ * ++ * The work is queued and processed on a random CPU ++ */ ++bool swork_queue(struct swork_event *sev) ++{ ++ unsigned long flags; ++ ++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) ++ return false; ++ ++ raw_spin_lock_irqsave(&glob_worker->lock, flags); ++ list_add_tail(&sev->item, &glob_worker->events); ++ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); ++ ++ swake_up_one(&glob_worker->wq); ++ return true; ++} ++EXPORT_SYMBOL_GPL(swork_queue); ++ ++/** ++ * swork_get - get an instance of the sworker ++ * ++ * Returns an negative error code if the initialization if the worker did not ++ * work, %0 otherwise. ++ * ++ */ ++int swork_get(void) ++{ ++ struct sworker *worker; ++ ++ mutex_lock(&worker_mutex); ++ if (!glob_worker) { ++ worker = swork_create(); ++ if (IS_ERR(worker)) { ++ mutex_unlock(&worker_mutex); ++ return -ENOMEM; ++ } ++ ++ glob_worker = worker; ++ } ++ ++ glob_worker->refs++; ++ mutex_unlock(&worker_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(swork_get); ++ ++/** ++ * swork_put - puts an instance of the sworker ++ * ++ * Will destroy the sworker thread. This function must not be called until all ++ * queued events have been completed. ++ */ ++void swork_put(void) ++{ ++ mutex_lock(&worker_mutex); ++ ++ glob_worker->refs--; ++ if (glob_worker->refs > 0) ++ goto out; ++ ++ swork_destroy(glob_worker); ++ glob_worker = NULL; ++out: ++ mutex_unlock(&worker_mutex); ++} ++EXPORT_SYMBOL_GPL(swork_put); +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index ad5591520..1952bc9c0 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -294,6 +294,7 @@ static int init_rootdomain(struct root_domain *rd) + rd->rto_cpu = -1; + raw_spin_lock_init(&rd->rto_lock); + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); ++ rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ; + #endif + + init_dl_bw(&rd->dl_bw); +diff --git a/kernel/signal.c b/kernel/signal.c +index bc558abbf..b6a06b562 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -400,13 +401,30 @@ void task_join_group_stop(struct task_struct *task) + task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); + } + ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -428,7 +446,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi + rcu_read_unlock(); + + if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -445,6 +466,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -454,6 +482,21 @@ static void __sigqueue_free(struct sigqueue *q) + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ if (atomic_dec_and_test(&up->sigpending)) ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -466,6 +509,21 @@ void flush_sigqueue(struct sigpending *queue) + } + } + ++/* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ + /* + * Flush all pending signals for this kthread. + */ +@@ -589,7 +647,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + (info->si_code == SI_TIMER) && + (info->si_sys_private); + +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -626,6 +684,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) + bool resched_timer = false; + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -1288,8 +1348,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +-int +-force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++static int ++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + unsigned long int flags; + int ret, blocked, ignored; +@@ -1318,6 +1378,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + return ret; + } + ++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++{ ++/* ++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap ++ * since it can not enable preemption, and the signal code's spin_locks ++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will ++ * send the signal on exit of the trap. ++ */ ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (in_atomic()) { ++ if (WARN_ON_ONCE(t != current)) ++ return 0; ++ if (WARN_ON_ONCE(t->forced_info.si_signo)) ++ return 0; ++ ++ if (is_si_special(info)) { ++ WARN_ON_ONCE(info != SEND_SIG_PRIV); ++ t->forced_info.si_signo = sig; ++ t->forced_info.si_errno = 0; ++ t->forced_info.si_code = SI_KERNEL; ++ t->forced_info.si_pid = 0; ++ t->forced_info.si_uid = 0; ++ } else { ++ t->forced_info = *info; ++ } ++ ++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); ++ return 0; ++ } ++#endif ++ return do_force_sig_info(sig, info, t); ++} ++ + /* + * Nuke all other threads in the group. + */ +@@ -1734,7 +1827,8 @@ EXPORT_SYMBOL(kill_pid); + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; +@@ -2104,15 +2198,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + freezable_schedule(); + } else { + /* +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 6f584861d..9bad7a16d 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -21,11 +21,14 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -56,12 +59,136 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); + static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; + + DEFINE_PER_CPU(struct task_struct *, ksoftirqd); ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) ++DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); ++#endif + + const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ_COMMON ++# ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct softirq_runner { ++ struct task_struct *runner[NR_SOFTIRQS]; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); ++ ++static inline void softirq_set_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = current; ++} ++ ++static inline void softirq_clr_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = NULL; ++} ++ ++static bool softirq_check_runner_tsk(struct task_struct *tsk, ++ unsigned int *pending) ++{ ++ bool ret = false; ++ ++ if (!tsk) ++ return ret; ++ ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the ++ * task does cpu_chill(). ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING || ++ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) { ++ /* Clear all bits pending in that task */ ++ *pending &= ~(tsk->softirqs_raised); ++ ret = true; ++ } ++ raw_spin_unlock(&tsk->pi_lock); ++ ++ return ret; ++} ++ ++/* ++ * On preempt-rt a softirq running context might be blocked on a ++ * lock. There might be no other runnable task on this CPU because the ++ * lock owner runs on some other CPU. So we have to go into idle with ++ * the pending bit set. Therefor we need to check this otherwise we ++ * warn about false positives which confuses users and defeats the ++ * whole purpose of this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ struct task_struct *tsk; ++ static int rate_limit; ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ u32 warnpending; ++ int i; ++ ++ if (rate_limit >= 10) ++ return; ++ ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ if (!warnpending) ++ return; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ tsk = sr->runner[i]; ++ ++ if (softirq_check_runner_tsk(tsk, &warnpending)) ++ warnpending &= ~(1 << i); ++ } ++ ++ if (warnpending) { ++ tsk = __this_cpu_read(ksoftirqd); ++ softirq_check_runner_tsk(tsk, &warnpending); ++ } ++ ++ if (warnpending) { ++ tsk = __this_cpu_read(ktimer_softirqd); ++ softirq_check_runner_tsk(tsk, &warnpending); ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ warnpending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++ ++#else /* !CONFIG_NO_HZ_COMMON */ ++static inline void softirq_set_runner(unsigned int sirq) { } ++static inline void softirq_clr_runner(unsigned int sirq) { } ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +@@ -77,6 +204,38 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void wakeup_timer_softirqd(void) ++{ ++ /* Interrupts are disabled: no need to stop preemption */ ++ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); ++ ++ if (tsk && tsk->state != TASK_RUNNING) ++ wake_up_process(tsk); ++} ++#endif ++ ++static void handle_softirq(unsigned int vec_nr) ++{ ++ struct softirq_action *h = softirq_vec + vec_nr; ++ int prev_count; ++ ++ prev_count = preempt_count(); ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, preempt_count()); ++ preempt_count_set(prev_count); ++ } ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * If ksoftirqd is scheduled, we do not want to process pending softirqs + * right now. Let ksoftirqd handle this at its own rate, to get fairness, +@@ -92,6 +251,47 @@ static bool ksoftirqd_running(unsigned long pending) + return tsk && (tsk->state == TASK_RUNNING); + } + ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ ++static void handle_pending_softirqs(u32 pending) ++{ ++ struct softirq_action *h = softirq_vec; ++ int softirq_bit; ++ ++ local_irq_enable(); ++ ++ h = softirq_vec; ++ ++ while ((softirq_bit = ffs(pending))) { ++ unsigned int vec_nr; ++ ++ h += softirq_bit - 1; ++ vec_nr = h - softirq_vec; ++ handle_softirq(vec_nr); ++ ++ h++; ++ pending >>= softirq_bit; ++ } ++ ++ rcu_bh_qs(); ++ local_irq_disable(); ++} ++ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ local_irq_enable(); ++ cond_resched(); ++ return; ++ } ++ local_irq_enable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -251,10 +451,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) + unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + unsigned long old_flags = current->flags; + int max_restart = MAX_SOFTIRQ_RESTART; +- struct softirq_action *h; + bool in_hardirq; + __u32 pending; +- int softirq_bit; + + /* + * Mask out PF_MEMALLOC s current task context is borrowed for the +@@ -273,36 +471,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- while ((softirq_bit = ffs(pending))) { +- unsigned int vec_nr; +- int prev_count; +- +- h += softirq_bit - 1; +- +- vec_nr = h - softirq_vec; +- prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count_set(prev_count); +- } +- h++; +- pending >>= softirq_bit; +- } +- +- rcu_bh_qs(); +- local_irq_disable(); ++ handle_pending_softirqs(pending); + + pending = local_softirq_pending(); + if (pending) { +@@ -338,6 +507,309 @@ asmlinkage __visible void do_softirq(void) + local_irq_restore(flags); + } + ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an interrupt or softirq, we're done ++ * (this also catches softirq-disabled code). We will ++ * actually run the softirq once we return from ++ * the irq or softirq. ++ * ++ * Otherwise we wake up ksoftirqd to make sure we ++ * schedule the softirq soon. ++ */ ++ if (!in_interrupt()) ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++} ++ ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock per softirq ++ */ ++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); ++ ++void __init softirq_early_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < NR_SOFTIRQS; i++) ++ local_irq_lock_init(local_softirq_locks[i]); ++} ++ ++static void lock_softirq(int which) ++{ ++ local_lock(local_softirq_locks[which]); ++} ++ ++static void unlock_softirq(int which) ++{ ++ local_unlock(local_softirq_locks[which]); ++} ++ ++static void do_single_softirq(int which) ++{ ++ unsigned long old_flags = current->flags; ++ ++ current->flags &= ~PF_MEMALLOC; ++ vtime_account_irq_enter(current); ++ current->flags |= PF_IN_SOFTIRQ; ++ lockdep_softirq_enter(); ++ local_irq_enable(); ++ handle_softirq(which); ++ local_irq_disable(); ++ lockdep_softirq_exit(); ++ current->flags &= ~PF_IN_SOFTIRQ; ++ vtime_account_irq_enter(current); ++ current_restore_flags(old_flags, PF_MEMALLOC); ++} ++ ++/* ++ * Called with interrupts disabled. Process softirqs which were raised ++ * in current context (or on behalf of ksoftirqd). ++ */ ++static void do_current_softirqs(void) ++{ ++ while (current->softirqs_raised) { ++ int i = __ffs(current->softirqs_raised); ++ unsigned int pending, mask = (1U << i); ++ ++ current->softirqs_raised &= ~mask; ++ local_irq_enable(); ++ ++ /* ++ * If the lock is contended, we boost the owner to ++ * process the softirq or leave the critical section ++ * now. ++ */ ++ lock_softirq(i); ++ local_irq_disable(); ++ softirq_set_runner(i); ++ /* ++ * Check with the local_softirq_pending() bits, ++ * whether we need to process this still or if someone ++ * else took care of it. ++ */ ++ pending = local_softirq_pending(); ++ if (pending & mask) { ++ set_softirq_pending(pending & ~mask); ++ do_single_softirq(i); ++ } ++ softirq_clr_runner(i); ++ WARN_ON(current->softirq_nestcnt != 1); ++ local_irq_enable(); ++ unlock_softirq(i); ++ local_irq_disable(); ++ } ++} ++ ++void __local_bh_disable(void) ++{ ++ if (++current->softirq_nestcnt == 1) ++ migrate_disable(); ++} ++EXPORT_SYMBOL(__local_bh_disable); ++ ++void __local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ local_irq_disable(); ++ if (current->softirq_nestcnt == 1 && current->softirqs_raised) ++ do_current_softirqs(); ++ local_irq_enable(); ++ ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(__local_bh_enable); ++ ++void _local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(_local_bh_enable); ++ ++int in_serving_softirq(void) ++{ ++ return current->flags & PF_IN_SOFTIRQ; ++} ++EXPORT_SYMBOL(in_serving_softirq); ++ ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ current->softirq_nestcnt++; ++ ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ local_irq_enable(); ++ cond_resched(); ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled, but migration ++ * disabled. So the cpu can't go away under us. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq() && current->softirqs_raised) { ++ current->softirq_nestcnt++; ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ } ++} ++ ++static void do_raise_softirq_irqoff(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ ++ /* ++ * If we are not in a hard interrupt and inside a bh disabled ++ * region, we simply raise the flag on current. local_bh_enable() ++ * will make sure that the softirq is executed. Otherwise we ++ * delegate it to ksoftirqd. ++ */ ++ if (!in_irq() && current->softirq_nestcnt) ++ current->softirqs_raised |= mask; ++ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) ++ return; ++ ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++} ++ ++static void wakeup_proper_softirq(unsigned int nr) ++{ ++ if ((1UL << nr) & TIMER_SOFTIRQS) ++ wakeup_timer_softirqd(); ++ else ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd ++ */ ++void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || ++ !__this_cpu_read(ktimer_softirqd))) ++ return; ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an hard interrupt we let irq return code deal ++ * with the wakeup of ksoftirqd. ++ */ ++ if (in_irq()) ++ return; ++ /* ++ * If we are in thread context but outside of a bh disabled ++ * region, we need to wake ksoftirqd as well. ++ * ++ * CHECKME: Some of the places which do that could be wrapped ++ * into local_bh_disable/enable pairs. Though it's unclear ++ * whether this is worth the effort. To find those places just ++ * raise a WARN() if the condition is met. ++ */ ++ if (!current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return current->softirqs_raised; ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) ++{ ++ /* Take over all but timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++ ++ /* Take over timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, ++ bool online) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ ++static int ktimer_softirqd_should_run(unsigned int cpu) ++{ ++ return current->softirqs_raised; ++} ++ ++#endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. + */ +@@ -349,9 +821,9 @@ void irq_enter(void) + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_irq_enter(); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -359,6 +831,7 @@ void irq_enter(void) + + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (ksoftirqd_running(local_softirq_pending())) + return; + +@@ -381,6 +854,18 @@ static inline void invoke_softirq(void) + } else { + wakeup_softirqd(); + } ++#else /* PREEMPT_RT_FULL */ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ if (__this_cpu_read(ksoftirqd) && ++ __this_cpu_read(ksoftirqd)->softirqs_raised) ++ wakeup_softirqd(); ++ if (__this_cpu_read(ktimer_softirqd) && ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised) ++ wakeup_timer_softirqd(); ++ local_irq_restore(flags); ++#endif + } + + static inline void tick_irq_exit(void) +@@ -416,26 +901,6 @@ void irq_exit(void) + trace_hardirq_exit(); /* must be last! */ + } + +-/* +- * This function must run with irqs disabled! +- */ +-inline void raise_softirq_irqoff(unsigned int nr) +-{ +- __raise_softirq_irqoff(nr); +- +- /* +- * If we're in an interrupt or softirq, we're done +- * (this also catches softirq-disabled code). We will +- * actually run the softirq once we return from +- * the irq or softirq. +- * +- * Otherwise we wake up ksoftirqd to make sure we +- * schedule the softirq soon. +- */ +- if (!in_interrupt()) +- wakeup_softirqd(); +-} +- + void raise_softirq(unsigned int nr) + { + unsigned long flags; +@@ -445,12 +910,6 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void __raise_softirq_irqoff(unsigned int nr) +-{ +- trace_softirq_raise(nr); +- or_softirq_pending(1UL << nr); +-} +- + void open_softirq(int nr, void (*action)(struct softirq_action *)) + { + softirq_vec[nr].action = action; +@@ -475,11 +934,44 @@ static void __tasklet_schedule_common(struct tasklet_struct *t, + unsigned long flags; + + local_irq_save(flags); ++ if (!tasklet_trylock(t)) { ++ local_irq_restore(flags); ++ return; ++ } ++ + head = this_cpu_ptr(headp); +- t->next = NULL; +- *head->tail = t; +- head->tail = &(t->next); +- raise_softirq_irqoff(softirq_nr); ++again: ++ /* We may have been preempted before tasklet_trylock ++ * and __tasklet_action may have already run. ++ * So double check the sched bit while the takslet ++ * is locked before adding it to the list. ++ */ ++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++ if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) { ++ tasklet_unlock(t); ++ return; ++ } ++#endif ++ t->next = NULL; ++ *head->tail = t; ++ head->tail = &(t->next); ++ raise_softirq_irqoff(softirq_nr); ++ tasklet_unlock(t); ++ } else { ++ /* This is subtle. If we hit the corner case above ++ * It is possible that we get preempted right here, ++ * and another task has successfully called ++ * tasklet_schedule(), then this function, and ++ * failed on the trylock. Thus we must be sure ++ * before releasing the tasklet lock, that the ++ * SCHED_BIT is clear. Otherwise the tasklet ++ * may get its SCHED_BIT set, but not added to the ++ * list ++ */ ++ if (!tasklet_tryunlock(t)) ++ goto again; ++ } + local_irq_restore(flags); + } + +@@ -497,11 +989,21 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) + } + EXPORT_SYMBOL(__tasklet_hi_schedule); + ++void tasklet_enable(struct tasklet_struct *t) ++{ ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_schedule(t); ++} ++EXPORT_SYMBOL(tasklet_enable); ++ + static void tasklet_action_common(struct softirq_action *a, + struct tasklet_head *tl_head, + unsigned int softirq_nr) + { + struct tasklet_struct *list; ++ int loops = 1000000; + + local_irq_disable(); + list = tl_head->head; +@@ -513,25 +1015,60 @@ static void tasklet_action_common(struct softirq_action *a, + struct tasklet_struct *t = list; + + list = list->next; ++ /* ++ * Should always succeed - after a tasklist got on the ++ * list (after getting the SCHED bit set from 0 to 1), ++ * nothing but the tasklet softirq it got queued to can ++ * lock it: ++ */ ++ if (!tasklet_trylock(t)) { ++ WARN_ON(1); ++ continue; ++ } ++ ++ t->next = NULL; + +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); ++ if (unlikely(atomic_read(&t->count))) { ++out_disabled: ++ /* implicit unlock: */ ++ wmb(); ++ t->state = TASKLET_STATEF_PENDING; ++ continue; ++ } ++ /* ++ * After this point on the tasklet might be rescheduled ++ * on another CPU, but it can only be added to another ++ * CPU's tasklet list if we unlock the tasklet (which we ++ * dont do yet). ++ */ ++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ WARN_ON(1); ++again: ++ t->func(t->data); ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++ while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) { ++#else ++ while (!tasklet_tryunlock(t)) { ++#endif ++ /* ++ * If it got disabled meanwhile, bail out: ++ */ ++ if (atomic_read(&t->count)) ++ goto out_disabled; ++ /* ++ * If it got scheduled meanwhile, re-execute ++ * the tasklet function: ++ */ ++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ goto again; ++ if (!--loops) { ++ printk("hm, tasklet state: %08lx\n", t->state); ++ WARN_ON(1); + tasklet_unlock(t); +- continue; ++ break; + } +- tasklet_unlock(t); + } +- +- local_irq_disable(); +- t->next = NULL; +- *tl_head->tail = t; +- tl_head->tail = &t->next; +- __raise_softirq_irqoff(softirq_nr); +- local_irq_enable(); + } + } + +@@ -563,7 +1100,7 @@ void tasklet_kill(struct tasklet_struct *t) + + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + do { +- yield(); ++ msleep(1); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } + tasklet_unlock_wait(t); +@@ -637,25 +1174,26 @@ void __init softirq_init(void) + open_softirq(HI_SOFTIRQ, tasklet_hi_action); + } + +-static int ksoftirqd_should_run(unsigned int cpu) +-{ +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++void tasklet_unlock_wait(struct tasklet_struct *t) + { +- local_irq_disable(); +- if (local_softirq_pending()) { ++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* +- * We can safely run softirq on inline stack, as we are not deep +- * in the task stack here. ++ * Hack for now to avoid this busy-loop: + */ +- __do_softirq(); +- local_irq_enable(); +- cond_resched(); +- return; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ msleep(1); ++#else ++ barrier(); ++#endif + } +- local_irq_enable(); ++} ++EXPORT_SYMBOL(tasklet_unlock_wait); ++#endif ++ ++static int ksoftirqd_should_run(unsigned int cpu) ++{ ++ return ksoftirqd_softirq_pending(); + } + + #ifdef CONFIG_HOTPLUG_CPU +@@ -722,17 +1260,31 @@ static int takeover_tasklets(unsigned int cpu) + + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct smp_hotplug_thread softirq_timer_threads = { ++ .store = &ktimer_softirqd, ++ .setup = ktimer_softirqd_set_sched_params, ++ .cleanup = ktimer_softirqd_clr_sched_params, ++ .thread_should_run = ktimer_softirqd_should_run, ++ .thread_fn = run_ksoftirqd, ++ .thread_comm = "ktimersoftd/%u", ++}; ++#endif ++ + static __init int spawn_ksoftirqd(void) + { + cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, + takeover_tasklets); + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); +- ++#ifdef CONFIG_PREEMPT_RT_FULL ++ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); ++#endif + return 0; + } + early_initcall(spawn_ksoftirqd); +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c +index 7e103738f..a2b1319a1 100644 +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -92,8 +92,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) + enabled = stopper->enabled; + if (enabled) + __cpu_stop_queue_work(stopper, work, &wakeq); +- else if (work->done) +- cpu_stop_signal_done(work->done); ++ else { ++ work->disabled = true; ++ if (work->done) ++ cpu_stop_signal_done(work->done); ++ } + raw_spin_unlock_irqrestore(&stopper->lock, flags); + + wake_up_q(&wakeq); +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index 6a2ba3988..e7b983df6 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) + int ret = alarm_try_to_cancel(alarm); + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_grab_expiry_lock(&alarm->timer); + } + } + EXPORT_SYMBOL_GPL(alarm_cancel); +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 32ee24f51..a84673149 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -150,6 +150,11 @@ static struct hrtimer_cpu_base migration_cpu_base = { + + #define migration_base migration_cpu_base.clock_base[0] + ++static inline bool is_migration_base(struct hrtimer_clock_base *base) ++{ ++ return base == &migration_base; ++} ++ + /* + * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock + * means that all timers which are tied to this base via timer->base are +@@ -274,6 +279,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, + + #else /* CONFIG_SMP */ + ++static inline bool is_migration_base(struct hrtimer_clock_base *base) ++{ ++ return false; ++} ++ + static inline struct hrtimer_clock_base * + lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) + { +@@ -957,6 +967,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++void hrtimer_grab_expiry_lock(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = READ_ONCE(timer->base); ++ ++ if (timer->is_soft && !is_migration_base(base)) { ++ spin_lock(&base->cpu_base->softirq_expiry_lock); ++ spin_unlock(&base->cpu_base->softirq_expiry_lock); ++ } ++} ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -1175,7 +1195,9 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft + * match. + */ ++#ifndef CONFIG_PREEMPT_RT_BASE + WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); ++#endif + + base = lock_hrtimer_base(timer, &flags); + +@@ -1238,7 +1260,7 @@ int hrtimer_cancel(struct hrtimer *timer) + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_grab_expiry_lock(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1335,10 +1357,17 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) + static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, + enum hrtimer_mode mode) + { +- bool softtimer = !!(mode & HRTIMER_MODE_SOFT); +- int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; ++ bool softtimer; ++ int base; + struct hrtimer_cpu_base *cpu_base; + ++ softtimer = !!(mode & HRTIMER_MODE_SOFT); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!softtimer && !(mode & HRTIMER_MODE_HARD)) ++ softtimer = true; ++#endif ++ base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; ++ + memset(timer, 0, sizeof(struct hrtimer)); + + cpu_base = raw_cpu_ptr(&hrtimer_bases); +@@ -1535,6 +1564,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) + unsigned long flags; + ktime_t now; + ++ spin_lock(&cpu_base->softirq_expiry_lock); + raw_spin_lock_irqsave(&cpu_base->lock, flags); + + now = hrtimer_update_base(cpu_base); +@@ -1544,6 +1574,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) + hrtimer_update_softirq_timer(cpu_base, true); + + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); ++ spin_unlock(&cpu_base->softirq_expiry_lock); + } + + #ifdef CONFIG_HIGH_RES_TIMERS +@@ -1715,13 +1746,52 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) + return HRTIMER_NORESTART; + } + +-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) ++static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, ++ clockid_t clock_id, ++ enum hrtimer_mode mode, ++ struct task_struct *task) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) { ++ if (task_is_realtime(current) || system_state != SYSTEM_RUNNING) ++ mode |= HRTIMER_MODE_HARD; ++ else ++ mode |= HRTIMER_MODE_SOFT; ++ } ++#endif ++ __hrtimer_init(&sl->timer, clock_id, mode); + sl->timer.function = hrtimer_wakeup; + sl->task = task; + } ++ ++/** ++ * hrtimer_init_sleeper - initialize sleeper to the given clock ++ * @sl: sleeper to be initialized ++ * @clock_id: the clock to be used ++ * @mode: timer mode abs/rel ++ * @task: the task to wake up ++ */ ++void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, ++ enum hrtimer_mode mode, struct task_struct *task) ++{ ++ debug_init(&sl->timer, clock_id, mode); ++ __hrtimer_init_sleeper(sl, clock_id, mode, task); ++ ++} + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); + ++#ifdef CONFIG_DEBUG_OBJECTS_TIMERS ++void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, ++ clockid_t clock_id, ++ enum hrtimer_mode mode, ++ struct task_struct *task) ++{ ++ debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); ++ __hrtimer_init_sleeper(sl, clock_id, mode, task); ++} ++EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack); ++#endif ++ + int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) + { + switch(restart->nanosleep.type) { +@@ -1745,8 +1815,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod + { + struct restart_block *restart; + +- hrtimer_init_sleeper(t, current); +- + do { + set_current_state(TASK_INTERRUPTIBLE); + hrtimer_start_expires(&t->timer, mode); +@@ -1754,12 +1822,12 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod + if (likely(t->task)) + freezable_schedule(); + ++ __set_current_state(TASK_RUNNING); + hrtimer_cancel(&t->timer); + mode = HRTIMER_MODE_ABS; + + } while (t->task && !signal_pending(current)); + +- __set_current_state(TASK_RUNNING); + + if (!t->task) + return 0; +@@ -1783,10 +1851,9 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) + struct hrtimer_sleeper t; + int ret; + +- hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, +- HRTIMER_MODE_ABS); ++ hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, ++ HRTIMER_MODE_ABS, current); + hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); +- + ret = do_nanosleep(&t, HRTIMER_MODE_ABS); + destroy_hrtimer_on_stack(&t.timer); + return ret; +@@ -1804,7 +1871,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, + if (dl_task(current) || rt_task(current)) + slack = 0; + +- hrtimer_init_on_stack(&t.timer, clockid, mode); ++ hrtimer_init_sleeper_on_stack(&t, clockid, mode, current); + hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); + ret = do_nanosleep(&t, mode); + if (ret != -ERESTART_RESTARTBLOCK) +@@ -1864,6 +1931,38 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, + } + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Sleep for 1 ms in hope whoever holds what we want will let it go. ++ */ ++void cpu_chill(void) ++{ ++ unsigned int freeze_flag = current->flags & PF_NOFREEZE; ++ struct task_struct *self = current; ++ ktime_t chill_time; ++ ++ raw_spin_lock_irq(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock_irq(&self->pi_lock); ++ ++ chill_time = ktime_set(0, NSEC_PER_MSEC); ++ ++ current->flags |= PF_NOFREEZE; ++ sleeping_lock_inc(); ++ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD); ++ sleeping_lock_dec(); ++ if (!freeze_flag) ++ current->flags &= ~PF_NOFREEZE; ++ ++ raw_spin_lock_irq(&self->pi_lock); ++ __set_current_state_no_track(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ raw_spin_unlock_irq(&self->pi_lock); ++} ++EXPORT_SYMBOL(cpu_chill); ++#endif ++ + /* + * Functions related to boot-time initialization: + */ +@@ -1885,6 +1984,7 @@ int hrtimers_prepare_cpu(unsigned int cpu) + cpu_base->softirq_next_timer = NULL; + cpu_base->expires_next = KTIME_MAX; + cpu_base->softirq_expires_next = KTIME_MAX; ++ spin_lock_init(&cpu_base->softirq_expiry_lock); + return 0; + } + +@@ -2003,11 +2103,9 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, + return -EINTR; + } + +- hrtimer_init_on_stack(&t.timer, clock_id, mode); ++ hrtimer_init_sleeper_on_stack(&t, clock_id, mode, current); + hrtimer_set_expires_range_ns(&t.timer, *expires, delta); + +- hrtimer_init_sleeper(&t, current); +- + hrtimer_start_expires(&t.timer, mode); + + if (likely(t.task)) +diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c +index 2e2b335ef..48d977f94 100644 +--- a/kernel/time/itimer.c ++++ b/kernel/time/itimer.c +@@ -211,6 +211,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_grab_expiry_lock(timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c +index 497719127..62acb8914 100644 +--- a/kernel/time/jiffies.c ++++ b/kernel/time/jiffies.c +@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { + .max_cycles = 10, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -83,9 +84,9 @@ u64 get_jiffies_64(void) + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index bfaa44a80..b9e4ccbb6 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -3,8 +3,10 @@ + * Implement CPU time clocks for the POSIX clock interface. + */ + ++#include + #include + #include ++#include + #include + #include + #include +@@ -15,6 +17,7 @@ + #include + #include + #include ++#include + + #include "posix-timers.h" + +@@ -789,6 +792,7 @@ check_timers_list(struct list_head *timers, + return t->expires; + + t->firing = 1; ++ t->firing_cpu = smp_processor_id(); + list_move_tail(&t->entry, firing); + } + +@@ -1135,18 +1139,31 @@ static inline int fastpath_timer_check(struct task_struct *tsk) + return 0; + } + ++static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock); ++ ++void cpu_timers_grab_expiry_lock(struct k_itimer *timer) ++{ ++ int cpu = timer->it.cpu.firing_cpu; ++ ++ if (cpu >= 0) { ++ spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu); ++ ++ spin_lock_irq(expiry_lock); ++ spin_unlock_irq(expiry_lock); ++ } ++} ++ + /* + * This is called from the timer interrupt handler. The irq handler has + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; +- +- lockdep_assert_irqs_disabled(); ++ spinlock_t *expiry_lock; + + /* + * The fast path checks that there are no expired thread or thread +@@ -1155,8 +1172,13 @@ void run_posix_cpu_timers(struct task_struct *tsk) + if (!fastpath_timer_check(tsk)) + return; + +- if (!lock_task_sighand(tsk, &flags)) ++ expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); ++ spin_lock(expiry_lock); ++ ++ if (!lock_task_sighand(tsk, &flags)) { ++ spin_unlock(expiry_lock); + return; ++ } + /* + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and +@@ -1189,6 +1211,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) + list_del_init(&timer->it.cpu.entry); + cpu_firing = timer->it.cpu.firing; + timer->it.cpu.firing = 0; ++ timer->it.cpu.firing_cpu = -1; + /* + * The firing flag is -1 if we collided with a reset + * of the timer, which already reported this +@@ -1198,8 +1221,156 @@ void run_posix_cpu_timers(struct task_struct *tsk) + cpu_timer_fire(timer); + spin_unlock(&timer->it_lock); + } ++ spin_unlock(expiry_lock); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include ++#include ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++DEFINE_PER_CPU(bool, posix_timer_th_active); ++ ++static void posix_cpu_kthread_fn(unsigned int cpu) ++{ ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ BUG_ON(per_cpu(posix_timer_task, cpu) != current); ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) ++ return; ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++} ++ ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; + } + ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned int cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ ++ if (per_cpu(posix_timer_th_active, cpu) != true) ++ return; ++ ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++ } ++} ++ ++static int posix_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(posix_timer_tasklist) != NULL; ++} ++ ++static void posix_cpu_kthread_park(unsigned int cpu) ++{ ++ this_cpu_write(posix_timer_th_active, false); ++} ++ ++static void posix_cpu_kthread_unpark(unsigned int cpu) ++{ ++ this_cpu_write(posix_timer_th_active, true); ++} ++ ++static void posix_cpu_kthread_setup(unsigned int cpu) ++{ ++ struct sched_param sp; ++ ++ sp.sched_priority = MAX_RT_PRIO - 1; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++ posix_cpu_kthread_unpark(cpu); ++} ++ ++static struct smp_hotplug_thread posix_cpu_thread = { ++ .store = &posix_timer_task, ++ .thread_should_run = posix_cpu_kthread_should_run, ++ .thread_fn = posix_cpu_kthread_fn, ++ .thread_comm = "posixcputmr/%u", ++ .setup = posix_cpu_kthread_setup, ++ .park = posix_cpu_kthread_park, ++ .unpark = posix_cpu_kthread_unpark, ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ int ret; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ ret = smpboot_register_percpu_thread(&posix_cpu_thread); ++ WARN_ON(ret); ++ ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ lockdep_assert_irqs_disabled(); ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. +@@ -1318,6 +1489,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, + spin_unlock_irq(&timer.it_lock); + + while (error == TIMER_RETRY) { ++ ++ cpu_timers_grab_expiry_lock(&timer); + /* + * We need to handle case when timer was or is in the + * middle of firing. In other cases we already freed +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c +index 12c4048c5..88efe799f 100644 +--- a/kernel/time/posix-timers.c ++++ b/kernel/time/posix-timers.c +@@ -463,7 +463,7 @@ static struct k_itimer * alloc_posix_timer(void) + + static void k_itimer_rcu_free(struct rcu_head *head) + { +- struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); ++ struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); + + kmem_cache_free(posix_timers_cache, tmr); + } +@@ -480,7 +480,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) + } + put_pid(tmr->it_pid); + sigqueue_free(tmr->sigq); +- call_rcu(&tmr->it.rcu, k_itimer_rcu_free); ++ call_rcu(&tmr->rcu, k_itimer_rcu_free); + } + + static int common_timer_create(struct k_itimer *new_timer) +@@ -826,6 +826,17 @@ static int common_hrtimer_try_to_cancel(struct k_itimer *timr) + return hrtimer_try_to_cancel(&timr->it.real.timer); + } + ++static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer) ++{ ++ if (kc->timer_arm == common_hrtimer_arm) ++ hrtimer_grab_expiry_lock(&timer->it.real.timer); ++ else if (kc == &alarm_clock) ++ hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer); ++ else ++ /* posix-cpu-timers */ ++ cpu_timers_grab_expiry_lock(timer); ++} ++ + /* Set a POSIX.1b interval timer. */ + int common_timer_set(struct k_itimer *timr, int flags, + struct itimerspec64 *new_setting, +@@ -891,11 +902,15 @@ static int do_timer_settime(timer_t timer_id, int flags, + else + error = kc->timer_set(timr, flags, new_spec64, old_spec64); + +- unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ rcu_read_lock(); ++ unlock_timer(timr, flag); ++ timer_wait_for_callback(kc, timr); ++ rcu_read_unlock(); + old_spec64 = NULL; // We already got the old time... + goto retry; + } ++ unlock_timer(timr, flag); + + return error; + } +@@ -957,13 +972,21 @@ int common_timer_del(struct k_itimer *timer) + return 0; + } + +-static inline int timer_delete_hook(struct k_itimer *timer) ++static int timer_delete_hook(struct k_itimer *timer) + { + const struct k_clock *kc = timer->kclock; ++ int ret; + + if (WARN_ON_ONCE(!kc || !kc->timer_del)) + return -EINVAL; +- return kc->timer_del(timer); ++ ret = kc->timer_del(timer); ++ if (ret == TIMER_RETRY) { ++ rcu_read_lock(); ++ spin_unlock_irq(&timer->it_lock); ++ timer_wait_for_callback(kc, timer); ++ rcu_read_unlock(); ++ } ++ return ret; + } + + /* Delete a POSIX.1b interval timer. */ +@@ -977,10 +1000,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) + if (!timer) + return -EINVAL; + +- if (timer_delete_hook(timer) == TIMER_RETRY) { +- unlock_timer(timer, flags); ++ if (timer_delete_hook(timer) == TIMER_RETRY) + goto retry_delete; +- } + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -1006,10 +1027,9 @@ static void itimer_delete(struct k_itimer *timer) + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + +- if (timer_delete_hook(timer) == TIMER_RETRY) { +- unlock_timer(timer, flags); ++ if (timer_delete_hook(timer) == TIMER_RETRY) + goto retry_delete; +- } ++ + list_del(&timer->list); + /* + * This keeps any tasks waiting on the spin lock from thinking +diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h +index ddb211452..725bd230a 100644 +--- a/kernel/time/posix-timers.h ++++ b/kernel/time/posix-timers.h +@@ -32,6 +32,8 @@ extern const struct k_clock clock_process; + extern const struct k_clock clock_thread; + extern const struct k_clock alarm_clock; + ++extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer); ++ + int posix_timer_event(struct k_itimer *timr, int si_private); + + void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting); +diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c +index a836efd34..c50e8f326 100644 +--- a/kernel/time/tick-broadcast-hrtimer.c ++++ b/kernel/time/tick-broadcast-hrtimer.c +@@ -107,7 +107,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) + + void tick_setup_hrtimer_broadcast(void) + { +- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + bctimer.function = bc_handler; + clockevents_register_device(&ce_broadcast_hrtimer); + } +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c +index 0a3cc37e4..7bd136b64 100644 +--- a/kernel/time/tick-common.c ++++ b/kernel/time/tick-common.c +@@ -80,13 +80,15 @@ int tick_is_oneshot_available(void) + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -158,9 +160,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); + +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c +index 5b33e2f5c..da4a3f8fe 100644 +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -67,7 +67,8 @@ static void tick_do_update_jiffies64(ktime_t now) + return; + + /* Reevaluate with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta >= tick_period) { +@@ -90,10 +91,12 @@ static void tick_do_update_jiffies64(ktime_t now) + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } else { +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return; + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -104,12 +107,14 @@ static ktime_t tick_init_jiffy_update(void) + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -227,6 +232,7 @@ static void nohz_full_kick_func(struct irq_work *work) + + static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { + .func = nohz_full_kick_func, ++ .flags = IRQ_WORK_HARD_IRQ, + }; + + /* +@@ -652,10 +658,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + basemono = last_jiffies_update; + basejiff = jiffies; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + ts->last_jiffies = basejiff; + ts->timer_expires_base = basemono; + +@@ -886,14 +892,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) + return false; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { +- pr_warn("NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + return false; + } + +@@ -1305,7 +1304,7 @@ void tick_setup_sched_timer(void) + /* + * Emulate tick processing via per-CPU hrtimers: + */ +- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per-CPU) */ +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index f246818e3..2cc8a1e8e 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -2392,8 +2392,10 @@ EXPORT_SYMBOL(hardpps); + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } +diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h +index 141ab3ab0..099737f6f 100644 +--- a/kernel/time/timekeeping.h ++++ b/kernel/time/timekeeping.h +@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { } + extern void do_timer(unsigned long ticks); + extern void update_wall_time(void); + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #define CS_NAME_LEN 32 + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 8b6b33b81..4b8d3c37f 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -198,6 +198,7 @@ EXPORT_SYMBOL(jiffies_64); + struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; ++ spinlock_t expiry_lock; + unsigned long clk; + unsigned long next_expiry; + unsigned int cpu; +@@ -214,8 +215,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); + static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); + static DEFINE_MUTEX(timer_keys_mutex); + +-static void timer_update_keys(struct work_struct *work); +-static DECLARE_WORK(timer_update_work, timer_update_keys); ++static struct swork_event timer_update_swork; + + #ifdef CONFIG_SMP + unsigned int sysctl_timer_migration = 1; +@@ -233,7 +233,7 @@ static void timers_update_migration(void) + static inline void timers_update_migration(void) { } + #endif /* !CONFIG_SMP */ + +-static void timer_update_keys(struct work_struct *work) ++static void timer_update_keys(struct swork_event *event) + { + mutex_lock(&timer_keys_mutex); + timers_update_migration(); +@@ -243,9 +243,17 @@ static void timer_update_keys(struct work_struct *work) + + void timers_update_nohz(void) + { +- schedule_work(&timer_update_work); ++ swork_queue(&timer_update_swork); + } + ++static __init int hrtimer_init_thread(void) ++{ ++ WARN_ON(swork_get()); ++ INIT_SWORK(&timer_update_swork, timer_update_keys); ++ return 0; ++} ++early_initcall(hrtimer_init_thread); ++ + int timer_migration_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +@@ -1220,14 +1228,8 @@ int del_timer(struct timer_list *timer) + } + EXPORT_SYMBOL(del_timer); + +-/** +- * try_to_del_timer_sync - Try to deactivate a timer +- * @timer: timer to delete +- * +- * This function tries to deactivate a timer. Upon successful (ret >= 0) +- * exit the timer is not queued and the handler is not running on any CPU. +- */ +-int try_to_del_timer_sync(struct timer_list *timer) ++static int __try_to_del_timer_sync(struct timer_list *timer, ++ struct timer_base **basep) + { + struct timer_base *base; + unsigned long flags; +@@ -1235,7 +1237,7 @@ int try_to_del_timer_sync(struct timer_list *timer) + + debug_assert_init(timer); + +- base = lock_timer_base(timer, &flags); ++ *basep = base = lock_timer_base(timer, &flags); + + if (base->running_timer != timer) + ret = detach_if_pending(timer, base, true); +@@ -1244,9 +1246,42 @@ int try_to_del_timer_sync(struct timer_list *timer) + + return ret; + } ++ ++/** ++ * try_to_del_timer_sync - Try to deactivate a timer ++ * @timer: timer to delete ++ * ++ * This function tries to deactivate a timer. Upon successful (ret >= 0) ++ * exit the timer is not queued and the handler is not running on any CPU. ++ */ ++int try_to_del_timer_sync(struct timer_list *timer) ++{ ++ struct timer_base *base; ++ ++ return __try_to_del_timer_sync(timer, &base); ++} + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++static int __del_timer_sync(struct timer_list *timer) ++{ ++ struct timer_base *base; ++ int ret; ++ ++ for (;;) { ++ ret = __try_to_del_timer_sync(timer, &base); ++ if (ret >= 0) ++ return ret; ++ ++ /* ++ * When accessing the lock, timers of base are no longer expired ++ * and so timer is no longer running. ++ */ ++ spin_lock(&base->expiry_lock); ++ spin_unlock(&base->expiry_lock); ++ } ++} ++ + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1302,12 +1337,8 @@ int del_timer_sync(struct timer_list *timer) + * could lead to deadlock. + */ + WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); +- for (;;) { +- int ret = try_to_del_timer_sync(timer); +- if (ret >= 0) +- return ret; +- cpu_relax(); +- } ++ ++ return __del_timer_sync(timer); + } + EXPORT_SYMBOL(del_timer_sync); + #endif +@@ -1367,13 +1398,20 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) + + fn = timer->function; + +- if (timer->flags & TIMER_IRQSAFE) { ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && ++ timer->flags & TIMER_IRQSAFE) { + raw_spin_unlock(&base->lock); + call_timer_fn(timer, fn); ++ base->running_timer = NULL; ++ spin_unlock(&base->expiry_lock); ++ spin_lock(&base->expiry_lock); + raw_spin_lock(&base->lock); + } else { + raw_spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn); ++ base->running_timer = NULL; ++ spin_unlock(&base->expiry_lock); ++ spin_lock(&base->expiry_lock); + raw_spin_lock_irq(&base->lock); + } + } +@@ -1670,6 +1708,7 @@ static inline void __run_timers(struct timer_base *base) + if (!time_after_eq(jiffies, base->clk)) + return; + ++ spin_lock(&base->expiry_lock); + raw_spin_lock_irq(&base->lock); + + /* +@@ -1696,8 +1735,8 @@ static inline void __run_timers(struct timer_base *base) + while (levels--) + expire_timers(base, heads + levels); + } +- base->running_timer = NULL; + raw_spin_unlock_irq(&base->lock); ++ spin_unlock(&base->expiry_lock); + } + + /* +@@ -1707,6 +1746,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) + { + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + ++ irq_work_tick_soft(); ++ + __run_timers(base); + if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); +@@ -1942,6 +1983,7 @@ static void __init init_timer_cpu(int cpu) + base->cpu = cpu; + raw_spin_lock_init(&base->lock); + base->clk = jiffies; ++ spin_lock_init(&base->expiry_lock); + } + } + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 119dd5fd5..dd12bf89a 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2121,6 +2121,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, + struct task_struct *tsk = current; + + entry->preempt_count = pc & 0xff; ++ entry->preempt_lazy_count = preempt_lazy_count(); + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +@@ -2131,8 +2132,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, + ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | +- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -3327,14 +3331,17 @@ get_total_entries(struct trace_buffer *buf, + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n" +- "# / _-----=> irqs-off \n" +- "# | / _----=> need-resched \n" +- "# || / _---=> hardirq/softirq \n" +- "# ||| / _--=> preempt-depth \n" +- "# |||| / delay \n" +- "# cmd pid ||||| time | caller \n" +- "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n" ++ "# / _-------=> irqs-off \n" ++ "# | / _------=> need-resched \n" ++ "# || / _-----=> need-resched_lazy \n" ++ "# ||| / _----=> hardirq/softirq \n" ++ "# |||| / _---=> preempt-depth \n" ++ "# ||||| / _--=> preempt-lazy-depth\n" ++ "# |||||| / _-=> migrate-disable \n" ++ "# ||||||| / delay \n" ++ "# cmd pid |||||||| time | caller \n" ++ "# \\ / |||||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -3372,15 +3379,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file + tgid ? tgid_space : space); + seq_printf(m, "# %s / _----=> need-resched\n", + tgid ? tgid_space : space); +- seq_printf(m, "# %s| / _---=> hardirq/softirq\n", ++ seq_printf(m, "# %s| / _---=> need-resched_lazy\n", ++ tgid ? tgid_space : space); ++ seq_printf(m, "# %s|| / _--=> hardirq/softirq\n", + tgid ? tgid_space : space); +- seq_printf(m, "# %s|| / _--=> preempt-depth\n", ++ seq_printf(m, "# %s||| / preempt-depth\n", + tgid ? tgid_space : space); +- seq_printf(m, "# %s||| / delay\n", ++ seq_printf(m, "# %s|||| / delay\n", + tgid ? tgid_space : space); +- seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", ++ seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n", + tgid ? " TGID " : space); +- seq_printf(m, "# | | %s | |||| | |\n", ++ seq_printf(m, "# | | %s | ||||| | |\n", + tgid ? " | " : space); + } + +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index d05230d21..08841925d 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head { + * NEED_RESCHED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler ++ * NEED_RESCHED_LAZY - lazy reschedule is requested + */ + enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, +@@ -136,6 +137,7 @@ enum trace_flag_type { + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_PREEMPT_RESCHED = 0x20, + TRACE_FLAG_NMI = 0x40, ++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80, + }; + + #define TRACE_BUF_SIZE 1024 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 531e6e8f7..664c9738f 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -188,6 +188,8 @@ static int trace_define_common_fields(void) + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned char, migrate_disable); ++ __common_field(unsigned char, preempt_lazy_count); + + return ret; + } +diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c +index c6cd54cf7..ae84fa5be 100644 +--- a/kernel/trace/trace_hwlat.c ++++ b/kernel/trace/trace_hwlat.c +@@ -279,7 +279,7 @@ static void move_to_next_cpu(void) + * of this thread, than stop migrating for the duration + * of the current test. + */ +- if (!cpumask_equal(current_mask, ¤t->cpus_allowed)) ++ if (!cpumask_equal(current_mask, current->cpus_ptr)) + goto disable; + + get_online_cpus(); +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index 6e6cc64fa..3f78b0afb 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) + { + char hardsoft_irq; + char need_resched; ++ char need_resched_lazy; + char irqs_off; + int hardirq; + int softirq; +@@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) + break; + } + ++ need_resched_lazy = ++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; ++ + hardsoft_irq = + (nmi && hardirq) ? 'Z' : + nmi ? 'z' : +@@ -486,14 +490,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) + softirq ? 's' : + '.' ; + +- trace_seq_printf(s, "%c%c%c", +- irqs_off, need_resched, hardsoft_irq); ++ trace_seq_printf(s, "%c%c%c%c", ++ irqs_off, need_resched, need_resched_lazy, ++ hardsoft_irq); + + if (entry->preempt_count) + trace_seq_printf(s, "%x", entry->preempt_count); + else + trace_seq_putc(s, '.'); + ++ if (entry->preempt_lazy_count) ++ trace_seq_printf(s, "%x", entry->preempt_lazy_count); ++ else ++ trace_seq_putc(s, '.'); ++ ++ if (entry->migrate_disable) ++ trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ trace_seq_putc(s, '.'); ++ + return !trace_seq_has_overflowed(s); + } + +diff --git a/kernel/watchdog.c b/kernel/watchdog.c +index 463c4c11c..26b8a86cf 100644 +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -500,7 +500,7 @@ static void watchdog_enable(unsigned int cpu) + * Start the timer first to prevent the NMI watchdog triggering + * before the timer has a chance to fire. + */ +- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + hrtimer->function = watchdog_timer_fn; + hrtimer_start(hrtimer, ns_to_ktime(sample_period), + HRTIMER_MODE_REL_PINNED); +diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c +index 43832b102..e07b84dab 100644 +--- a/kernel/watchdog_hld.c ++++ b/kernel/watchdog_hld.c +@@ -23,6 +23,7 @@ + + static DEFINE_PER_CPU(bool, hard_watchdog_warn); + static DEFINE_PER_CPU(bool, watchdog_nmi_touch); ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); + + static unsigned long hardlockup_allcpu_dumped; + +@@ -331,6 +332,13 @@ void watchdog_hardlockup_check(struct pt_regs *regs) + /* only print hardlockups once */ + if (__this_cpu_read(hard_watchdog_warn) == true) + return; ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ raw_spin_lock(&watchdog_output_lock); + + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + print_modules(); +@@ -348,6 +356,7 @@ void watchdog_hardlockup_check(struct pt_regs *regs) + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) + trigger_allbutself_cpu_backtrace(); + ++ raw_spin_unlock(&watchdog_output_lock); + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 04b558a26..59be58862 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -129,7 +129,7 @@ enum { + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. + * +@@ -138,7 +138,7 @@ enum { + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -146,7 +146,7 @@ enum { + /* struct worker is defined in workqueue_internal.h */ + + struct worker_pool { +- spinlock_t lock; /* the pool lock */ ++ raw_spinlock_t lock; /* the pool lock */ + int cpu; /* I: the associated cpu */ + int node; /* I: the associated node ID */ + int id; /* I: pool ID */ +@@ -185,7 +185,7 @@ struct worker_pool { + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -214,7 +214,7 @@ struct pool_workqueue { + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -299,8 +299,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; + + static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ + static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ +-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ +-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ ++static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ ++static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ + + static LIST_HEAD(workqueues); /* PR: list of all workqueues */ + static bool workqueue_freezing; /* PL: have wqs started freezing? */ +@@ -359,20 +359,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + #include + + #define assert_rcu_or_pool_mutex() \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex) && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU, wq->mutex or wq_pool_mutex should be held") ++ "RCU, wq->mutex or wq_pool_mutex should be held") + + #define for_each_cpu_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ +@@ -384,7 +384,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -416,7 +416,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -552,7 +552,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU ++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU + * read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. +@@ -696,8 +696,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) + * @work: the work item of interest + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -830,7 +830,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool) + * Wake up the first idle worker of @pool. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void wake_up_worker(struct worker_pool *pool) + { +@@ -883,7 +883,7 @@ void wq_worker_sleeping(struct task_struct *task) + return; + + worker->sleeping = 1; +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* + * The counterpart of the following dec_and_test, implied mb, +@@ -902,7 +902,7 @@ void wq_worker_sleeping(struct task_struct *task) + if (next) + wake_up_process(next->task); + } +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + /** +@@ -913,7 +913,7 @@ void wq_worker_sleeping(struct task_struct *task) + * Set @flags in @worker->flags and adjust nr_running accordingly. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) ++ * raw_spin_lock_irq(pool->lock) + */ + static inline void worker_set_flags(struct worker *worker, unsigned int flags) + { +@@ -938,7 +938,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) + * Clear @flags in @worker->flags and adjust nr_running accordingly. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) ++ * raw_spin_lock_irq(pool->lock) + */ + static inline void worker_clr_flags(struct worker *worker, unsigned int flags) + { +@@ -986,7 +986,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) + * actually occurs, it should be easy to locate the culprit work function. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + * + * Return: + * Pointer to worker which is executing @work if found, %NULL +@@ -1021,7 +1021,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, + * nested inside outer list_for_each_entry_safe(). + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void move_linked_works(struct work_struct *work, struct list_head *head, + struct work_struct **nextp) +@@ -1096,12 +1096,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- spin_lock_irq(&pwq->pool->lock); ++ raw_spin_lock_irq(&pwq->pool->lock); + put_pwq(pwq); +- spin_unlock_irq(&pwq->pool->lock); ++ raw_spin_unlock_irq(&pwq->pool->lock); + } + } + +@@ -1134,7 +1134,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq) + * decrement nr_in_flight of its pwq and handle workqueue flushing. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) + { +@@ -1224,6 +1224,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1232,7 +1233,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + if (!pool) + goto fail; + +- spin_lock(&pool->lock); ++ raw_spin_lock(&pool->lock); + /* + * work->data is guaranteed to point to pwq only while the work + * item is queued on pwq->wq, and both updating work->data to point +@@ -1261,11 +1262,13 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + /* work->data points to pwq iff queued, point to pool */ + set_work_pool_and_keep_pending(work, pool->id); + +- spin_unlock(&pool->lock); ++ raw_spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } +- spin_unlock(&pool->lock); ++ raw_spin_unlock(&pool->lock); + fail: ++ rcu_read_unlock(); + local_irq_restore(*flags); + if (work_is_canceling(work)) + return -ENOENT; +@@ -1284,7 +1287,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + * work_struct flags. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, + struct list_head *head, unsigned int extra_flags) +@@ -1378,6 +1381,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ rcu_read_lock(); + retry: + /* pwq which will be used unless @work is executing elsewhere */ + if (wq->flags & WQ_UNBOUND) { +@@ -1399,7 +1403,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + if (last_pool && last_pool != pwq->pool) { + struct worker *worker; + +- spin_lock(&last_pool->lock); ++ raw_spin_lock(&last_pool->lock); + + worker = find_worker_executing_work(last_pool, work); + +@@ -1407,11 +1411,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + pwq = worker->current_pwq; + } else { + /* meh... not running there, queue here */ +- spin_unlock(&last_pool->lock); +- spin_lock(&pwq->pool->lock); ++ raw_spin_unlock(&last_pool->lock); ++ raw_spin_lock(&pwq->pool->lock); + } + } else { +- spin_lock(&pwq->pool->lock); ++ raw_spin_lock(&pwq->pool->lock); + } + + /* +@@ -1424,7 +1428,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + */ + if (unlikely(!pwq->refcnt)) { + if (wq->flags & WQ_UNBOUND) { +- spin_unlock(&pwq->pool->lock); ++ raw_spin_unlock(&pwq->pool->lock); + cpu_relax(); + goto retry; + } +@@ -1436,10 +1440,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1458,7 +1460,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + debug_work_activate(work); + insert_work(pwq, work, worklist, work_flags); + +- spin_unlock(&pwq->pool->lock); ++out: ++ raw_spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -1577,9 +1581,11 @@ EXPORT_SYMBOL_GPL(queue_work_node); + void delayed_work_timer_fn(struct timer_list *t) + { + struct delayed_work *dwork = from_timer(dwork, t, timer); ++ unsigned long flags; + +- /* should have been called from irqsafe timer with irq already off */ ++ local_irq_save(flags); + __queue_work(dwork->cpu, dwork->wq, &dwork->work); ++ local_irq_restore(flags); + } + EXPORT_SYMBOL(delayed_work_timer_fn); + +@@ -1726,7 +1732,7 @@ EXPORT_SYMBOL(queue_rcu_work); + * necessary. + * + * LOCKING: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void worker_enter_idle(struct worker *worker) + { +@@ -1766,7 +1772,7 @@ static void worker_enter_idle(struct worker *worker) + * @worker is leaving idle state. Update stats. + * + * LOCKING: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void worker_leave_idle(struct worker *worker) + { +@@ -1901,11 +1907,11 @@ static struct worker *create_worker(struct worker_pool *pool) + worker_attach_to_pool(worker, pool); + + /* start the newly created worker */ +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + worker->pool->nr_workers++; + worker_enter_idle(worker); + wake_up_process(worker->task); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + return worker; + +@@ -1924,7 +1930,7 @@ static struct worker *create_worker(struct worker_pool *pool) + * be idle. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void destroy_worker(struct worker *worker) + { +@@ -1950,7 +1956,7 @@ static void idle_worker_timeout(struct timer_list *t) + { + struct worker_pool *pool = from_timer(pool, t, idle_timer); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + while (too_many_workers(pool)) { + struct worker *worker; +@@ -1968,7 +1974,7 @@ static void idle_worker_timeout(struct timer_list *t) + destroy_worker(worker); + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + static void send_mayday(struct work_struct *work) +@@ -1999,8 +2005,8 @@ static void pool_mayday_timeout(struct timer_list *t) + struct worker_pool *pool = from_timer(pool, t, mayday_timer); + struct work_struct *work; + +- spin_lock_irq(&pool->lock); +- spin_lock(&wq_mayday_lock); /* for wq->maydays */ ++ raw_spin_lock_irq(&pool->lock); ++ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ + + if (need_to_create_worker(pool)) { + /* +@@ -2013,8 +2019,8 @@ static void pool_mayday_timeout(struct timer_list *t) + send_mayday(work); + } + +- spin_unlock(&wq_mayday_lock); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock(&wq_mayday_lock); ++ raw_spin_unlock_irq(&pool->lock); + + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); + } +@@ -2033,7 +2039,7 @@ static void pool_mayday_timeout(struct timer_list *t) + * may_start_working() %true. + * + * LOCKING: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. Called only from + * manager. + */ +@@ -2042,7 +2048,7 @@ __releases(&pool->lock) + __acquires(&pool->lock) + { + restart: +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); +@@ -2058,7 +2064,7 @@ __acquires(&pool->lock) + } + + del_timer_sync(&pool->mayday_timer); +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* + * This is necessary even after a new worker was just successfully + * created as @pool->lock was dropped and the new worker might have +@@ -2081,7 +2087,7 @@ __acquires(&pool->lock) + * and may_start_working() is true. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. + * + * Return: +@@ -2104,7 +2110,7 @@ static bool manage_workers(struct worker *worker) + + pool->manager = NULL; + pool->flags &= ~POOL_MANAGER_ACTIVE; +- wake_up(&wq_manager_wait); ++ swake_up_one(&wq_manager_wait); + return true; + } + +@@ -2120,7 +2126,7 @@ static bool manage_workers(struct worker *worker) + * call this function to process a work. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which is released and regrabbed. ++ * raw_spin_lock_irq(pool->lock) which is released and regrabbed. + */ + static void process_one_work(struct worker *worker, struct work_struct *work) + __releases(&pool->lock) +@@ -2202,7 +2208,7 @@ __acquires(&pool->lock) + */ + set_work_pool_and_clear_pending(work, pool->id); + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + lock_map_acquire(&pwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); +@@ -2257,7 +2263,7 @@ __acquires(&pool->lock) + */ + cond_resched(); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* clear cpu intensive status */ + if (unlikely(cpu_intensive)) +@@ -2292,7 +2298,7 @@ __acquires(&pool->lock) + * fetches a work from the top and executes it. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. + */ + static void process_scheduled_works(struct worker *worker) +@@ -2334,11 +2340,11 @@ static int worker_thread(void *__worker) + /* tell the scheduler that this is a workqueue worker */ + set_pf_worker(true); + woke_up: +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* am I supposed to die? */ + if (unlikely(worker->flags & WORKER_DIE)) { +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + WARN_ON_ONCE(!list_empty(&worker->entry)); + set_pf_worker(false); + +@@ -2404,7 +2410,7 @@ static int worker_thread(void *__worker) + */ + worker_enter_idle(worker); + __set_current_state(TASK_IDLE); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + schedule(); + goto woke_up; + } +@@ -2458,7 +2464,7 @@ static int rescuer_thread(void *__rescuer) + should_stop = kthread_should_stop(); + + /* see whether any pwq is asking for help */ +- spin_lock_irq(&wq_mayday_lock); ++ raw_spin_lock_irq(&wq_mayday_lock); + + while (!list_empty(&wq->maydays)) { + struct pool_workqueue *pwq = list_first_entry(&wq->maydays, +@@ -2470,11 +2476,11 @@ static int rescuer_thread(void *__rescuer) + __set_current_state(TASK_RUNNING); + list_del_init(&pwq->mayday_node); + +- spin_unlock_irq(&wq_mayday_lock); ++ raw_spin_unlock_irq(&wq_mayday_lock); + + worker_attach_to_pool(rescuer, pool); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* + * Slurp in all works issued via this workqueue and +@@ -2503,7 +2509,7 @@ static int rescuer_thread(void *__rescuer) + * incur MAYDAY_INTERVAL delay inbetween. + */ + if (need_to_create_worker(pool)) { +- spin_lock(&wq_mayday_lock); ++ raw_spin_lock(&wq_mayday_lock); + /* + * Queue iff we aren't racing destruction + * and somebody else hasn't queued it already. +@@ -2512,7 +2518,7 @@ static int rescuer_thread(void *__rescuer) + get_pwq(pwq); + list_add_tail(&pwq->mayday_node, &wq->maydays); + } +- spin_unlock(&wq_mayday_lock); ++ raw_spin_unlock(&wq_mayday_lock); + } + } + +@@ -2530,14 +2536,14 @@ static int rescuer_thread(void *__rescuer) + if (need_more_worker(pool)) + wake_up_worker(pool); + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + worker_detach_from_pool(rescuer); + +- spin_lock_irq(&wq_mayday_lock); ++ raw_spin_lock_irq(&wq_mayday_lock); + } + +- spin_unlock_irq(&wq_mayday_lock); ++ raw_spin_unlock_irq(&wq_mayday_lock); + + if (should_stop) { + __set_current_state(TASK_RUNNING); +@@ -2617,7 +2623,7 @@ static void wq_barrier_func(struct work_struct *work) + * underneath us, so we can't reliably determine pwq from @target. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void insert_wq_barrier(struct pool_workqueue *pwq, + struct wq_barrier *barr, +@@ -2704,7 +2710,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, + for_each_pwq(pwq, wq) { + struct worker_pool *pool = pwq->pool; + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + if (flush_color >= 0) { + WARN_ON_ONCE(pwq->flush_color != -1); +@@ -2721,7 +2727,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, + pwq->work_color = work_color; + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) +@@ -2921,9 +2927,9 @@ void drain_workqueue(struct workqueue_struct *wq) + for_each_pwq(pwq, wq) { + bool drained; + +- spin_lock_irq(&pwq->pool->lock); ++ raw_spin_lock_irq(&pwq->pool->lock); + drained = !pwq->nr_active && list_empty(&pwq->delayed_works); +- spin_unlock_irq(&pwq->pool->lock); ++ raw_spin_unlock_irq(&pwq->pool->lock); + + if (drained) + continue; +@@ -2997,14 +3003,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -3028,7 +3034,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + check_flush_dependency(pwq->wq, work); + + insert_wq_barrier(pwq, barr, work, worker); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + /* + * Force a lock recursion deadlock when using flush_work() inside a +@@ -3044,10 +3050,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + lock_map_acquire(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); + } +- ++ rcu_read_unlock(); + return true; + already_gone: +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -3385,7 +3392,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); + * + * Undo alloc_workqueue_attrs(). + */ +-void free_workqueue_attrs(struct workqueue_attrs *attrs) ++static void free_workqueue_attrs(struct workqueue_attrs *attrs) + { + if (attrs) { + free_cpumask_var(attrs->cpumask); +@@ -3395,21 +3402,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) + + /** + * alloc_workqueue_attrs - allocate a workqueue_attrs +- * @gfp_mask: allocation mask to use + * + * Allocate a new workqueue_attrs, initialize with default settings and + * return it. + * + * Return: The allocated new workqueue_attr on success. %NULL on failure. + */ +-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) ++static struct workqueue_attrs *alloc_workqueue_attrs(void) + { + struct workqueue_attrs *attrs; + +- attrs = kzalloc(sizeof(*attrs), gfp_mask); ++ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); + if (!attrs) + goto fail; +- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) ++ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) + goto fail; + + cpumask_copy(attrs->cpumask, cpu_possible_mask); +@@ -3466,7 +3472,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, + */ + static int init_worker_pool(struct worker_pool *pool) + { +- spin_lock_init(&pool->lock); ++ raw_spin_lock_init(&pool->lock); + pool->id = -1; + pool->cpu = -1; + pool->node = NUMA_NO_NODE; +@@ -3487,7 +3493,7 @@ static int init_worker_pool(struct worker_pool *pool) + pool->refcnt = 1; + + /* shouldn't fail above this point */ +- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ pool->attrs = alloc_workqueue_attrs(); + if (!pool->attrs) + return -ENOMEM; + return 0; +@@ -3520,7 +3526,7 @@ static void rcu_free_pool(struct rcu_head *rcu) + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3552,15 +3558,15 @@ static void put_unbound_pool(struct worker_pool *pool) + * @pool's workers from blocking on attach_mutex. We're the last + * manager and @pool gets freed with the flag set. + */ +- spin_lock_irq(&pool->lock); +- wait_event_lock_irq(wq_manager_wait, ++ raw_spin_lock_irq(&pool->lock); ++ swait_event_lock_irq(wq_manager_wait, + !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); + pool->flags |= POOL_MANAGER_ACTIVE; + + while ((worker = first_idle_worker(pool))) + destroy_worker(worker); + WARN_ON(pool->nr_workers || pool->nr_idle); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + mutex_lock(&wq_pool_attach_mutex); + if (!list_empty(&pool->workers)) +@@ -3574,8 +3580,8 @@ static void put_unbound_pool(struct worker_pool *pool) + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3688,14 +3694,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one + * is gonna access it anymore. Schedule RCU free. + */ + if (is_last) +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } + + /** +@@ -3720,7 +3726,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + return; + + /* this function can be called during early boot w/ irq disabled */ +- spin_lock_irqsave(&pwq->pool->lock, flags); ++ raw_spin_lock_irqsave(&pwq->pool->lock, flags); + + /* + * During [un]freezing, the caller is responsible for ensuring that +@@ -3750,7 +3756,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + pwq->max_active = 0; + } + +- spin_unlock_irqrestore(&pwq->pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } + + /* initialize newly alloced @pwq which is associated with @wq and @pool */ +@@ -3923,8 +3929,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, + + ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); + +- new_attrs = alloc_workqueue_attrs(GFP_KERNEL); +- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ new_attrs = alloc_workqueue_attrs(); ++ tmp_attrs = alloc_workqueue_attrs(); + if (!ctx || !new_attrs || !tmp_attrs) + goto out_free; + +@@ -4063,7 +4069,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, + * + * Return: 0 on success and -errno on failure. + */ +-int apply_workqueue_attrs(struct workqueue_struct *wq, ++static int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) + { + int ret; +@@ -4074,7 +4080,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, + + return ret; + } +-EXPORT_SYMBOL_GPL(apply_workqueue_attrs); + + /** + * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug +@@ -4152,9 +4157,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, + + use_dfl_pwq: + mutex_lock(&wq->mutex); +- spin_lock_irq(&wq->dfl_pwq->pool->lock); ++ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); + get_pwq(wq->dfl_pwq); +- spin_unlock_irq(&wq->dfl_pwq->pool->lock); ++ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); + old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); + out_unlock: + mutex_unlock(&wq->mutex); +@@ -4275,7 +4280,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, + return NULL; + + if (flags & WQ_UNBOUND) { +- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ wq->unbound_attrs = alloc_workqueue_attrs(); + if (!wq->unbound_attrs) + goto err_free_wq; + } +@@ -4362,9 +4367,9 @@ void destroy_workqueue(struct workqueue_struct *wq) + struct worker *rescuer = wq->rescuer; + + /* this prevents new queueing */ +- spin_lock_irq(&wq_mayday_lock); ++ raw_spin_lock_irq(&wq_mayday_lock); + wq->rescuer = NULL; +- spin_unlock_irq(&wq_mayday_lock); ++ raw_spin_unlock_irq(&wq_mayday_lock); + + /* rescuer will empty maydays list before exiting */ + kthread_stop(rescuer->task); +@@ -4407,7 +4412,7 @@ void destroy_workqueue(struct workqueue_struct *wq) + * The base ref is never dropped on per-cpu pwqs. Directly + * schedule RCU free. + */ +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } else { + /* + * We're the sole accessor of @wq at this point. Directly +@@ -4517,7 +4522,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4528,7 +4534,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4554,15 +4561,15 @@ unsigned int work_busy(struct work_struct *work) + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ raw_spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ raw_spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); ++ rcu_read_unlock(); + + return ret; + } +@@ -4747,7 +4754,7 @@ void show_workqueue_state(void) + unsigned long flags; + int pi; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); + + pr_info("Showing busy workqueues and worker pools:\n"); + +@@ -4767,10 +4774,10 @@ void show_workqueue_state(void) + pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); + + for_each_pwq(pwq, wq) { +- spin_lock_irqsave(&pwq->pool->lock, flags); ++ raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq->nr_active || !list_empty(&pwq->delayed_works)) + show_pwq(pwq); +- spin_unlock_irqrestore(&pwq->pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering +@@ -4784,7 +4791,7 @@ void show_workqueue_state(void) + struct worker *worker; + bool first = true; + +- spin_lock_irqsave(&pool->lock, flags); ++ raw_spin_lock_irqsave(&pool->lock, flags); + if (pool->nr_workers == pool->nr_idle) + goto next_pool; + +@@ -4803,7 +4810,7 @@ void show_workqueue_state(void) + } + pr_cont("\n"); + next_pool: +- spin_unlock_irqrestore(&pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering +@@ -4812,7 +4819,7 @@ void show_workqueue_state(void) + touch_nmi_watchdog(); + } + +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + + /* used to show worker information through /proc/PID/{comm,stat,status} */ +@@ -4833,7 +4840,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) + struct worker_pool *pool = worker->pool; + + if (pool) { +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* + * ->desc tracks information (wq name or + * set_worker_desc()) for the latest execution. If +@@ -4847,7 +4854,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) + scnprintf(buf + off, size - off, "-%s", + worker->desc); + } +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + } + +@@ -4878,7 +4885,7 @@ static void unbind_workers(int cpu) + + for_each_cpu_worker_pool(pool, cpu) { + mutex_lock(&wq_pool_attach_mutex); +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* + * We've blocked all attach/detach operations. Make all workers +@@ -4892,7 +4899,7 @@ static void unbind_workers(int cpu) + + pool->flags |= POOL_DISASSOCIATED; + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + mutex_unlock(&wq_pool_attach_mutex); + + /* +@@ -4918,9 +4925,9 @@ static void unbind_workers(int cpu) + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + wake_up_worker(pool); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + } + +@@ -4947,7 +4954,7 @@ static void rebind_workers(struct worker_pool *pool) + WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, + pool->attrs->cpumask) < 0); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + pool->flags &= ~POOL_DISASSOCIATED; + +@@ -4986,7 +4993,7 @@ static void rebind_workers(struct worker_pool *pool) + WRITE_ONCE(worker->flags, worker_flags); + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + /** +@@ -5199,16 +5206,16 @@ bool freeze_workqueues_busy(void) + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); +@@ -5403,7 +5410,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -5411,7 +5419,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +@@ -5436,7 +5445,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) + + lockdep_assert_held(&wq_pool_mutex); + +- attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ attrs = alloc_workqueue_attrs(); + if (!attrs) + return NULL; + +@@ -5865,7 +5874,7 @@ static void __init wq_numa_init(void) + return; + } + +- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); ++ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(); + BUG_ON(!wq_update_unbound_numa_attrs_buf); + + /* +@@ -5940,7 +5949,7 @@ int __init workqueue_init_early(void) + for (i = 0; i < NR_STD_WORKER_POOLS; i++) { + struct workqueue_attrs *attrs; + +- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); ++ BUG_ON(!(attrs = alloc_workqueue_attrs())); + attrs->nice = std_nice[i]; + unbound_std_wq_attrs[i] = attrs; + +@@ -5949,7 +5958,7 @@ int __init workqueue_init_early(void) + * guaranteed by max_active which is enforced by pwqs. + * Turn off NUMA so that dfl_pwq is used for all nodes. + */ +- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); ++ BUG_ON(!(attrs = alloc_workqueue_attrs())); + attrs->nice = std_nice[i]; + attrs->no_numa = true; + ordered_wq_attrs[i] = attrs; +diff --git a/lib/Kconfig b/lib/Kconfig +index edb7d40d1..5bf23108d 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -441,6 +441,7 @@ config CHECK_SIGNATURE + + config CPUMASK_OFFSTACK + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS ++ depends on !PREEMPT_RT_FULL + help + Use dynamic allocation for cpumask_var_t, instead of putting + them on the stack. This is a bit more expensive, but avoids +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 4a78bacd4..58d68b6cd 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -1233,7 +1233,7 @@ config DEBUG_ATOMIC_SLEEP + + config DEBUG_LOCKING_API_SELFTESTS + bool "Locking API boot-time self-tests" +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL + help + Say Y here if you want the kernel to run a short self-test during + bootup. The self-test checks whether common types of locking bugs +diff --git a/lib/debugobjects.c b/lib/debugobjects.c +index 14afeeb7d..e28481c40 100644 +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -376,7 +376,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + +diff --git a/lib/irq_poll.c b/lib/irq_poll.c +index 86a709954..9c069ef83 100644 +--- a/lib/irq_poll.c ++++ b/lib/irq_poll.c +@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop) + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_sched); + +@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop) + local_irq_save(flags); + __irq_poll_complete(iop); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_complete); + +@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) + } + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new +@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu) + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c +index 1e1bbf171..32db9532d 100644 +--- a/lib/locking-selftest.c ++++ b/lib/locking-selftest.c +@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex); + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) + +@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Enabling hardirqs with a softirq-safe lock held: + */ +@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) + #undef E1 + #undef E2 + ++#endif ++ + /* + * Enabling irqs with an irq-safe lock held: + */ +@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) + +@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + +@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) + +@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 +@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) + +@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock irq inversion. + * +@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) + #undef E2 + #undef E3 + ++#endif ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock recursion that is actually safe. + */ +@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) + #undef E2 + #undef E3 + ++#endif ++ + /* + * read-lock / write-lock recursion that is unsafe. + */ +@@ -2057,6 +2084,7 @@ void locking_selftest(void) + + printk(" --------------------------------------------------------------------------\n"); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * irq-context testcases: + */ +@@ -2069,6 +2097,28 @@ void locking_selftest(void) + + DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); + // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); ++#else ++ /* On -rt, we only do hardirq context test for raw spinlock */ ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); ++#endif + + ww_tests(); + +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index e5cab5c4e..9309e813b 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -38,7 +38,7 @@ + #include + #include + #include +- ++#include + + /* Number of nodes in fully populated tree of given height */ + static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; +@@ -87,6 +87,7 @@ struct radix_tree_preload { + struct radix_tree_node *nodes; + }; + static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); + + static inline struct radix_tree_node *entry_to_node(void *ptr) + { +@@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = this_cpu_ptr(&radix_tree_preloads); ++ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes; + rtp->nodes = ret->parent; + rtp->nr--; + } ++ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + /* + * Update the allocation stack trace as this is more useful + * for debugging. +@@ -476,14 +478,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) + */ + gfp_mask &= ~__GFP_ACCOUNT; + +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + while (rtp->nr < nr) { +- preempt_enable(); ++ local_unlock(radix_tree_preloads_lock); + node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + if (node == NULL) + goto out; +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + if (rtp->nr < nr) { + node->parent = rtp->nodes; +@@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) + if (gfpflags_allow_blocking(gfp_mask)) + return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); + /* Preloading doesn't help anything with this gfp mask, skip it */ +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + EXPORT_SYMBOL(radix_tree_maybe_preload); +@@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) + + /* Preloading doesn't help anything with this gfp mask, skip it */ + if (!gfpflags_allow_blocking(gfp_mask)) { +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + +@@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) + return __radix_tree_preload(gfp_mask, nr_nodes); + } + ++void radix_tree_preload_end(void) ++{ ++ local_unlock(radix_tree_preloads_lock); ++} ++EXPORT_SYMBOL(radix_tree_preload_end); ++ + static unsigned radix_tree_load_root(const struct radix_tree_root *root, + struct radix_tree_node **nodep, unsigned long *maxindex) + { +@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged); + void idr_preload(gfp_t gfp_mask) + { + if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + } + EXPORT_SYMBOL(idr_preload); + ++void idr_preload_end(void) ++{ ++ local_unlock(radix_tree_preloads_lock); ++} ++EXPORT_SYMBOL(idr_preload_end); ++ + int ida_pre_get(struct ida *ida, gfp_t gfp) + { + /* +@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) + * to return to the ida_pre_get() step. + */ + if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) +- preempt_enable(); ++ local_unlock(radix_tree_preloads_lock); + + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); +diff --git a/lib/scatterlist.c b/lib/scatterlist.c +index cf18373f4..388b25a20 100644 +--- a/lib/scatterlist.c ++++ b/lib/scatterlist.c +@@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); +diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c +index 85925aaa4..2e7398534 100644 +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -22,7 +22,12 @@ notrace static unsigned int check_preemption_disabled(const char *what1, + * Kernel threads bound to a single CPU can safely use + * smp_processor_id(): + */ +- if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) ++#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG)) ++ if (current->migrate_disable) ++ goto out; ++#endif ++ ++ if (current->nr_cpus_allowed == 1) + goto out; + + /* +diff --git a/lib/ubsan.c b/lib/ubsan.c +index 1e9e2ab25..199c75e03 100644 +--- a/lib/ubsan.c ++++ b/lib/ubsan.c +@@ -143,25 +143,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, + } + } + +-static DEFINE_SPINLOCK(report_lock); +- +-static void ubsan_prologue(struct source_location *location, +- unsigned long *flags) ++static void ubsan_prologue(struct source_location *location) + { + current->in_ubsan++; +- spin_lock_irqsave(&report_lock, *flags); + + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); + } + +-static void ubsan_epilogue(unsigned long *flags) ++static void ubsan_epilogue(void) + { + dump_stack(); + pr_err("========================================" + "========================================\n"); +- spin_unlock_irqrestore(&report_lock, *flags); ++ + current->in_ubsan--; + } + +@@ -170,14 +166,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + { + + struct type_descriptor *type = data->type; +- unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); +@@ -189,7 +184,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + rhs_val_str, + type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + void __ubsan_handle_add_overflow(struct overflow_data *data, +@@ -217,20 +212,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + void __ubsan_handle_negate_overflow(struct overflow_data *data, + void *old_val) + { +- unsigned long flags; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + +@@ -238,13 +232,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + void __ubsan_handle_divrem_overflow(struct overflow_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + +@@ -254,58 +247,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, + else + pr_err("division by zero\n"); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); + + static void handle_null_ptr_deref(struct type_mismatch_data_common *data) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], + data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_misaligned_access(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], + (void *)ptr, data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_object_size_mismatch(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + pr_err("%s address %p with insufficient space\n", + type_check_kinds[data->type_check_kind], + (void *) ptr); + pr_err("for an object of type %s\n", data->type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, +@@ -352,42 +339,39 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); + void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, + void *bound) + { +- unsigned long flags; + char bound_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(bound_str, sizeof(bound_str), data->type, bound); + pr_err("variable length array bound value %s <= 0\n", bound_str); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); + + void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) + { +- unsigned long flags; + char index_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); + + void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; +@@ -396,7 +380,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); +@@ -419,18 +403,16 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + lhs_str, rhs_str, + lhs_type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); + + + void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) + { +- unsigned long flags; +- +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + pr_err("calling __builtin_unreachable()\n"); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + panic("can't return from __builtin_unreachable()"); + } + EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); +@@ -438,19 +420,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); + void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, + void *val) + { +- unsigned long flags; + char val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(val_str, sizeof(val_str), data->type, val); + + pr_err("load of value %s is not a valid value for type %s\n", + val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); +diff --git a/localversion-rt b/localversion-rt +new file mode 100644 +index 000000000..e0a0b11b0 +--- /dev/null ++++ b/localversion-rt +@@ -0,0 +1 @@ ++-rt103 +diff --git a/mm/Kconfig b/mm/Kconfig +index e607d1576..cef749b67 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + select COMPACTION + select RADIX_TREE_MULTIORDER + help +diff --git a/mm/compaction.c b/mm/compaction.c +index 1d991e443..6d726b900 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1672,10 +1672,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro + block_start_pfn(cc->migrate_pfn, cc->order); + + if (cc->last_migrated_pfn < current_block_start) { +- cpu = get_cpu(); ++ cpu = get_cpu_light(); ++ local_lock_irq(swapvec_lock); + lru_add_drain_cpu(cpu); ++ local_unlock_irq(swapvec_lock); + drain_local_pages(zone); +- put_cpu(); ++ put_cpu_light(); + /* No more flushing until we migrate again */ + cc->last_migrated_pfn = 0; + } +diff --git a/mm/highmem.c b/mm/highmem.c +index 59db3223a..22aa3ddbd 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -30,10 +30,11 @@ + #include + #include + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c +index 3a8ddf8ba..b209dbaef 100644 +--- a/mm/kasan/quarantine.c ++++ b/mm/kasan/quarantine.c +@@ -103,7 +103,7 @@ static int quarantine_head; + static int quarantine_tail; + /* Total size of all objects in global_quarantine across all batches. */ + static unsigned long quarantine_size; +-static DEFINE_SPINLOCK(quarantine_lock); ++static DEFINE_RAW_SPINLOCK(quarantine_lock); + DEFINE_STATIC_SRCU(remove_cache_srcu); + + /* Maximum size of the global queue. */ +@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) + if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { + qlist_move_all(q, &temp); + +- spin_lock(&quarantine_lock); ++ raw_spin_lock(&quarantine_lock); + WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); + qlist_move_all(&temp, &global_quarantine[quarantine_tail]); + if (global_quarantine[quarantine_tail].bytes >= +@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) + if (new_tail != quarantine_head) + quarantine_tail = new_tail; + } +- spin_unlock(&quarantine_lock); ++ raw_spin_unlock(&quarantine_lock); + } + + local_irq_restore(flags); +@@ -230,7 +230,7 @@ void quarantine_reduce(void) + * expected case). + */ + srcu_idx = srcu_read_lock(&remove_cache_srcu); +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + + /* + * Update quarantine size in case of hotplug. Allocate a fraction of +@@ -254,7 +254,7 @@ void quarantine_reduce(void) + quarantine_head = 0; + } + +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, NULL); + srcu_read_unlock(&remove_cache_srcu, srcu_idx); +@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache) + */ + on_each_cpu(per_cpu_remove_cache, cache, 1); + +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + for (i = 0; i < QUARANTINE_BATCHES; i++) { + if (qlist_empty(&global_quarantine[i])) + continue; + qlist_move_cache(&global_quarantine[i], &to_free, cache); + /* Scanning whole quarantine can take a while. */ +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + cond_resched(); +- spin_lock_irqsave(&quarantine_lock, flags); ++ raw_spin_lock_irqsave(&quarantine_lock, flags); + } +- spin_unlock_irqrestore(&quarantine_lock, flags); ++ raw_spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, cache); + +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index f54734abf..e8a7eebc2 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -26,7 +26,7 @@ + * + * The following locks and mutexes are used by kmemleak: + * +- * - kmemleak_lock (rwlock): protects the object_list modifications and ++ * - kmemleak_lock (raw spinlock): protects the object_list modifications and + * accesses to the object_tree_root. The object_list is the main list + * holding the metadata (struct kmemleak_object) for the allocated memory + * blocks. The object_tree_root is a red black tree used to look-up +@@ -147,7 +147,7 @@ struct kmemleak_scan_area { + * (use_count) and freed using the RCU mechanism. + */ + struct kmemleak_object { +- spinlock_t lock; ++ raw_spinlock_t lock; + unsigned int flags; /* object status flags */ + struct list_head object_list; + struct list_head gray_list; +@@ -197,7 +197,7 @@ static LIST_HEAD(gray_list); + /* search tree for object boundaries */ + static struct rb_root object_tree_root = RB_ROOT; + /* rw_lock protecting the access to object_list and object_tree_root */ +-static DEFINE_RWLOCK(kmemleak_lock); ++static DEFINE_RAW_SPINLOCK(kmemleak_lock); + + /* allocation caches for kmemleak internal data */ + static struct kmem_cache *object_cache; +@@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) + struct kmemleak_object *object; + + rcu_read_lock(); +- read_lock_irqsave(&kmemleak_lock, flags); ++ raw_spin_lock_irqsave(&kmemleak_lock, flags); + object = lookup_object(ptr, alias); +- read_unlock_irqrestore(&kmemleak_lock, flags); ++ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + + /* check whether the object is still available */ + if (object && !get_object(object)) +@@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali + unsigned long flags; + struct kmemleak_object *object; + +- write_lock_irqsave(&kmemleak_lock, flags); ++ raw_spin_lock_irqsave(&kmemleak_lock, flags); + object = lookup_object(ptr, alias); + if (object) { + rb_erase(&object->rb_node, &object_tree_root); + list_del_rcu(&object->object_list); + } +- write_unlock_irqrestore(&kmemleak_lock, flags); ++ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + + return object; + } +@@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, + INIT_LIST_HEAD(&object->object_list); + INIT_LIST_HEAD(&object->gray_list); + INIT_HLIST_HEAD(&object->area_list); +- spin_lock_init(&object->lock); ++ raw_spin_lock_init(&object->lock); + atomic_set(&object->use_count, 1); + object->flags = OBJECT_ALLOCATED; + object->pointer = ptr; +@@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, + /* kernel backtrace */ + object->trace_len = __save_stack_trace(object->trace); + +- write_lock_irqsave(&kmemleak_lock, flags); ++ raw_spin_lock_irqsave(&kmemleak_lock, flags); + + min_addr = min(min_addr, ptr); + max_addr = max(max_addr, ptr + size); +@@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, + + list_add_tail_rcu(&object->object_list, &object_list); + out: +- write_unlock_irqrestore(&kmemleak_lock, flags); ++ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + return object; + } + +@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object) + * Locking here also ensures that the corresponding memory block + * cannot be freed when it is being scanned. + */ +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->flags &= ~OBJECT_ALLOCATED; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color) + { + unsigned long flags; + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + __paint_it(object, color); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + + static void paint_ptr(unsigned long ptr, int color) +@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) + goto out; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (size == SIZE_MAX) { + size = object->pointer + object->size - ptr; + } else if (ptr + size > object->pointer + object->size) { +@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) + + hlist_add_head(&area->node, &object->area_list); + out_unlock: +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + out: + put_object(object); + } +@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->excess_ref = excess_ref; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->flags |= OBJECT_NO_SCAN; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log) + log->min_count, GFP_ATOMIC); + if (!object) + goto out; +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + for (i = 0; i < log->trace_len; i++) + object->trace[i] = log->trace[i]; + object->trace_len = log->trace_len; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + out: + rcu_read_unlock(); + } +@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->trace_len = __save_stack_trace(object->trace); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); + } +@@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end, + unsigned long *end = _end - (BYTES_PER_POINTER - 1); + unsigned long flags; + +- read_lock_irqsave(&kmemleak_lock, flags); ++ raw_spin_lock_irqsave(&kmemleak_lock, flags); + for (ptr = start; ptr < end; ptr++) { + struct kmemleak_object *object; + unsigned long pointer; +@@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end, + * previously acquired in scan_object(). These locks are + * enclosed by scan_mutex. + */ +- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + /* only pass surplus references (object already gray) */ + if (color_gray(object)) { + excess_ref = object->excess_ref; +@@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end, + excess_ref = 0; + update_refs(object); + } +- spin_unlock(&object->lock); ++ raw_spin_unlock(&object->lock); + + if (excess_ref) { + object = lookup_object(excess_ref, 0); +@@ -1362,12 +1362,12 @@ static void scan_block(void *_start, void *_end, + if (object == scanned) + /* circular reference, ignore */ + continue; +- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + update_refs(object); +- spin_unlock(&object->lock); ++ raw_spin_unlock(&object->lock); + } + } +- read_unlock_irqrestore(&kmemleak_lock, flags); ++ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + } + + /* +@@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object) + * Once the object->lock is acquired, the corresponding memory block + * cannot be freed (the same lock is acquired in delete_object). + */ +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (object->flags & OBJECT_NO_SCAN) + goto out; + if (!(object->flags & OBJECT_ALLOCATED)) +@@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object) + if (start >= end) + break; + +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + cond_resched(); +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + } while (object->flags & OBJECT_ALLOCATED); + } else + hlist_for_each_entry(area, &object->area_list, node) +@@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object) + (void *)(area->start + area->size), + object); + out: +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + + /* +@@ -1482,7 +1482,7 @@ static void kmemleak_scan(void) + /* prepare the kmemleak_object's */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + #ifdef DEBUG + /* + * With a few exceptions there should be a maximum of +@@ -1499,7 +1499,7 @@ static void kmemleak_scan(void) + if (color_gray(object) && get_object(object)) + list_add_tail(&object->gray_list, &gray_list); + +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1564,14 +1564,14 @@ static void kmemleak_scan(void) + */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (color_white(object) && (object->flags & OBJECT_ALLOCATED) + && update_checksum(object) && get_object(object)) { + /* color it gray temporarily */ + object->count = object->min_count; + list_add_tail(&object->gray_list, &gray_list); + } +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1591,13 +1591,13 @@ static void kmemleak_scan(void) + */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (unreferenced_object(object) && + !(object->flags & OBJECT_REPORTED)) { + object->flags |= OBJECT_REPORTED; + new_leaks++; + } +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) + struct kmemleak_object *object = v; + unsigned long flags; + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) + print_unreferenced(seq, object); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + return 0; + } + +@@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str) + return -EINVAL; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + dump_object_info(object); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); + return 0; +@@ -1803,11 +1803,11 @@ static void kmemleak_clear(void) + + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if ((object->flags & OBJECT_REPORTED) && + unreferenced_object(object)) + __paint_it(object, KMEMLEAK_GREY); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 18b5660dc..6ded3d9c7 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -71,6 +71,7 @@ + #include + #include + #include "slab.h" ++#include + + #include + +@@ -96,6 +97,8 @@ int do_swap_account __read_mostly; + #define do_swap_account 0 + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(event_lock); ++ + /* Whether legacy memory+swap accounting is active */ + static bool do_memsw_account(void) + { +@@ -2210,7 +2213,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) + * as well as workers from this path always operate on the local + * per-cpu data. CPU up doesn't touch memcg_stock at all. + */ +- curcpu = get_cpu(); ++ curcpu = get_cpu_light(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + struct mem_cgroup *memcg; +@@ -2230,7 +2233,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) + } + css_put(&memcg->css); + } +- put_cpu(); ++ put_cpu_light(); + mutex_unlock(&percpu_charge_mutex); + } + +@@ -5386,12 +5389,12 @@ static int mem_cgroup_move_account(struct page *page, + + ret = 0; + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(to, page, compound, nr_pages); + memcg_check_events(to, page); + mem_cgroup_charge_statistics(from, page, compound, -nr_pages); + memcg_check_events(from, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + out_unlock: + unlock_page(page); + out: +@@ -6530,10 +6533,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, + + commit_charge(page, memcg, lrucare); + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); + memcg_check_events(memcg, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + + if (do_memsw_account() && PageSwapCache(page)) { + swp_entry_t entry = { .val = page_private(page) }; +@@ -6602,7 +6605,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) + memcg_oom_recover(ug->memcg); + } + +- local_irq_save(flags); ++ local_lock_irqsave(event_lock, flags); + __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); + __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); + __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); +@@ -6610,7 +6613,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) + __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); + __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); + memcg_check_events(ug->memcg, ug->dummy_page); +- local_irq_restore(flags); ++ local_unlock_irqrestore(event_lock, flags); + + if (!mem_cgroup_is_root(ug->memcg)) + css_put_many(&ug->memcg->css, nr_pages); +@@ -6773,10 +6776,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) + + commit_charge(newpage, memcg, false); + +- local_irq_save(flags); ++ local_lock_irqsave(event_lock, flags); + mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); + memcg_check_events(memcg, newpage); +- local_irq_restore(flags); ++ local_unlock_irqrestore(event_lock, flags); + } + + DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); +@@ -6962,6 +6965,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) + struct mem_cgroup *memcg, *swap_memcg; + unsigned int nr_entries; + unsigned short oldid; ++ unsigned long flags; + + VM_BUG_ON_PAGE(PageLRU(page), page); + VM_BUG_ON_PAGE(page_count(page), page); +@@ -7007,10 +7011,14 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) + * important here to have the interrupts disabled because it is the + * only synchronisation we have for updating the per-CPU variables. + */ ++ local_lock_irqsave(event_lock, flags); ++#ifndef CONFIG_PREEMPT_RT_BASE + VM_BUG_ON(!irqs_disabled()); ++#endif + mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), + -nr_entries); + memcg_check_events(memcg, page); ++ local_unlock_irqrestore(event_lock, flags); + + if (!mem_cgroup_is_root(memcg)) + css_put_many(&memcg->css, nr_entries); +diff --git a/mm/mmu_context.c b/mm/mmu_context.c +index 3e612ae74..d0ccc0709 100644 +--- a/mm/mmu_context.c ++++ b/mm/mmu_context.c +@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm) + struct task_struct *tsk = current; + + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + mmgrab(mm); +@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm) + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + #ifdef finish_arch_post_lock_switch + finish_arch_post_lock_switch(); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index f4d4716b9..5ef3490be 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -60,6 +60,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -333,6 +334,18 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ local_lock_irqsave_on(pa_lock, flags, cpu) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +@@ -1153,7 +1166,7 @@ static inline void prefetch_buddy(struct page *page) + } + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -1163,15 +1176,57 @@ static inline void prefetch_buddy(struct page *page) + * And clear the zone's pages_scanned counter, to hold off the "all pages are + * pinned" detection logic. + */ +-static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++static void free_pcppages_bulk(struct zone *zone, struct list_head *head, ++ bool zone_retry) ++{ ++ bool isolated_pageblocks; ++ struct page *page, *tmp; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); ++ isolated_pageblocks = has_isolate_pageblock(zone); ++ ++ /* ++ * Use safe version since after __free_one_page(), ++ * page->lru.next will not point to original list. ++ */ ++ list_for_each_entry_safe(page, tmp, head, lru) { ++ int mt = get_pcppage_migratetype(page); ++ ++ if (page_zone(page) != zone) { ++ /* ++ * free_unref_page_list() sorts pages by zone. If we end ++ * up with pages from a different NUMA nodes belonging ++ * to the same ZONE index then we need to redo with the ++ * correct ZONE pointer. Skip the page for now, redo it ++ * on the next iteration. ++ */ ++ WARN_ON_ONCE(zone_retry == false); ++ if (zone_retry) ++ continue; ++ } ++ ++ /* MIGRATE_ISOLATE page should not go to pcplists */ ++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); ++ /* Pageblock could have been isolated meanwhile */ ++ if (unlikely(isolated_pageblocks)) ++ mt = get_pageblock_migratetype(page); ++ ++ list_del(&page->lru); ++ __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ } ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, ++ struct list_head *dst) ++ + { + int migratetype = 0; + int batch_free = 0; + int prefetch_nr = 0; +- bool isolated_pageblocks; +- struct page *page, *tmp; +- LIST_HEAD(head); ++ struct page *page; + + /* + * Ensure proper count is passed which otherwise would stuck in the +@@ -1208,7 +1263,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, + if (bulkfree_pcp_prepare(page)) + continue; + +- list_add_tail(&page->lru, &head); ++ list_add_tail(&page->lru, dst); + + /* + * We are going to put the page back to the global +@@ -1223,26 +1278,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, + prefetch_buddy(page); + } while (--count && --batch_free && !list_empty(list)); + } +- +- spin_lock(&zone->lock); +- isolated_pageblocks = has_isolate_pageblock(zone); +- +- /* +- * Use safe version since after __free_one_page(), +- * page->lru.next will not point to original list. +- */ +- list_for_each_entry_safe(page, tmp, &head, lru) { +- int mt = get_pcppage_migratetype(page); +- /* MIGRATE_ISOLATE page should not go to pcplists */ +- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); +- /* Pageblock could have been isolated meanwhile */ +- if (unlikely(isolated_pageblocks)) +- mt = get_pageblock_migratetype(page); +- +- __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); +- trace_mm_page_pcpu_drain(page, 0, mt); +- } +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, +@@ -1338,12 +1373,12 @@ static void __free_pages_ok(struct page *page, unsigned int order, + return; + + migratetype = get_pfnblock_migratetype(page, pfn); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + mem_reliable_buddy_counter(page, 1 << order); + free_one_page(page_zone(page), page, pfn, order, migratetype, + fpi_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + void __free_pages_core(struct page *page, unsigned int order) +@@ -2690,13 +2725,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; + int to_drain, batch; ++ LIST_HEAD(dst); + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) +- free_pcppages_bulk(zone, to_drain, pcp); +- local_irq_restore(flags); ++ isolate_pcp_pages(to_drain, pcp, &dst); ++ ++ local_unlock_irqrestore(pa_lock, flags); ++ ++ if (to_drain > 0) ++ free_pcppages_bulk(zone, &dst, false); + } + #endif + +@@ -2712,14 +2752,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + unsigned long flags; + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) +- free_pcppages_bulk(zone, pcp->count, pcp); +- local_irq_restore(flags); ++ count = pcp->count; ++ if (count) ++ isolate_pcp_pages(count, pcp, &dst); ++ ++ cpu_unlock_irqrestore(cpu, flags); ++ ++ if (count) ++ free_pcppages_bulk(zone, &dst, false); + } + + /* +@@ -2754,6 +2801,7 @@ void drain_local_pages(struct zone *zone) + drain_pages(cpu); + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + static void drain_local_pages_wq(struct work_struct *work) + { + /* +@@ -2767,6 +2815,7 @@ static void drain_local_pages_wq(struct work_struct *work) + drain_local_pages(NULL); + preempt_enable(); + } ++#endif + + /* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator. +@@ -2833,7 +2882,14 @@ void drain_all_pages(struct zone *zone) + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++ for_each_cpu(cpu, &cpus_with_pcps) { ++ if (zone) ++ drain_pages_zone(cpu, zone); ++ else ++ drain_pages(cpu); ++ } ++#else + for_each_cpu(cpu, &cpus_with_pcps) { + struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); + INIT_WORK(work, drain_local_pages_wq); +@@ -2841,6 +2897,7 @@ void drain_all_pages(struct zone *zone) + } + for_each_cpu(cpu, &cpus_with_pcps) + flush_work(per_cpu_ptr(&pcpu_drain, cpu)); ++#endif + + mutex_unlock(&pcpu_drain_mutex); + } +@@ -2912,7 +2969,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn) + return true; + } + +-static void free_unref_page_commit(struct page *page, unsigned long pfn) ++static void free_unref_page_commit(struct page *page, unsigned long pfn, ++ struct list_head *dst) + { + struct zone *zone = page_zone(page); + struct per_cpu_pages *pcp; +@@ -2943,7 +3001,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) + pcp->count++; + if (pcp->count >= pcp->high) { + unsigned long batch = READ_ONCE(pcp->batch); +- free_pcppages_bulk(zone, batch, pcp); ++ ++ isolate_pcp_pages(batch, pcp, dst); + } + } + +@@ -2954,13 +3013,17 @@ void free_unref_page(struct page *page) + { + unsigned long flags; + unsigned long pfn = page_to_pfn(page); ++ struct zone *zone = page_zone(page); ++ LIST_HEAD(dst); + + if (!free_unref_page_prepare(page, pfn)) + return; + +- local_irq_save(flags); +- free_unref_page_commit(page, pfn); +- local_irq_restore(flags); ++ local_lock_irqsave(pa_lock, flags); ++ free_unref_page_commit(page, pfn, &dst); ++ local_unlock_irqrestore(pa_lock, flags); ++ if (!list_empty(&dst)) ++ free_pcppages_bulk(zone, &dst, false); + } + + /* +@@ -2971,6 +3034,11 @@ void free_unref_page_list(struct list_head *list) + struct page *page, *next; + unsigned long flags, pfn; + int batch_count = 0; ++ struct list_head dsts[__MAX_NR_ZONES]; ++ int i; ++ ++ for (i = 0; i < __MAX_NR_ZONES; i++) ++ INIT_LIST_HEAD(&dsts[i]); + + /* Prepare pages for freeing */ + list_for_each_entry_safe(page, next, list, lru) { +@@ -2980,25 +3048,42 @@ void free_unref_page_list(struct list_head *list) + set_page_private(page, pfn); + } + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + list_for_each_entry_safe(page, next, list, lru) { + unsigned long pfn = page_private(page); ++ enum zone_type type; + + set_page_private(page, 0); + trace_mm_page_free_batched(page); +- free_unref_page_commit(page, pfn); ++ type = page_zonenum(page); ++ free_unref_page_commit(page, pfn, &dsts[type]); + + /* + * Guard against excessive IRQ disabled times when we get + * a large list of pages to free. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + batch_count = 0; +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + } + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); ++ ++ for (i = 0; i < __MAX_NR_ZONES; ) { ++ struct page *page; ++ struct zone *zone; ++ ++ if (list_empty(&dsts[i])) { ++ i++; ++ continue; ++ } ++ ++ page = list_first_entry(&dsts[i], struct page, lru); ++ zone = page_zone(page); ++ ++ free_pcppages_bulk(zone, &dsts[i], true); ++ } + } + + /* +@@ -3152,7 +3237,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct page *page; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + page = __rmqueue_pcplist(zone, migratetype, pcp, list); +@@ -3161,7 +3246,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + mem_reliable_buddy_counter(page, -(1 << order)); + zone_statistics(preferred_zone, zone); + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return page; + } + +@@ -3188,7 +3273,7 @@ struct page *rmqueue(struct zone *preferred_zone, + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + + do { + page = NULL; +@@ -3209,14 +3294,14 @@ struct page *rmqueue(struct zone *preferred_zone, + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + mem_reliable_buddy_counter(page, -(1 << order)); + zone_statistics(preferred_zone, zone); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + out: + VM_BUG_ON_PAGE(page && bad_range(zone, page), page); + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -7564,8 +7649,9 @@ void __init free_area_init(unsigned long *zones_size) + + static int page_alloc_cpu_dead(unsigned int cpu) + { +- ++ local_lock_irq_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); ++ local_unlock_irq_on(swapvec_lock, cpu); + drain_pages(cpu); + + /* +@@ -8482,7 +8568,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -8491,7 +8577,7 @@ void zone_pcp_reset(struct zone *zone) + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE +diff --git a/mm/slab.c b/mm/slab.c +index 0dcce6822..f5398a95b 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) + parent->shared = NULL; + parent->alien = NULL; + parent->colour_next = 0; +- spin_lock_init(&parent->list_lock); ++ raw_spin_lock_init(&parent->list_lock); + parent->free_objects = 0; + parent->free_touched = 0; + } +@@ -600,9 +600,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, + page_node = page_to_nid(page); + n = get_node(cachep, page_node); + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, &objp, 1, page_node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -741,7 +741,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, + struct kmem_cache_node *n = get_node(cachep, node); + + if (ac->avail) { +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + /* + * Stuff objects into the remote nodes shared array first. + * That way we could avoid the overhead of putting the objects +@@ -752,7 +752,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, + + free_block(cachep, ac->entry, ac->avail, node, list); + ac->avail = 0; +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + } + } + +@@ -825,9 +825,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp, + slabs_destroy(cachep, &list); + } else { + n = get_node(cachep, page_node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, &objp, 1, page_node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + slabs_destroy(cachep, &list); + } + return 1; +@@ -868,10 +868,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) + */ + n = get_node(cachep, node); + if (n) { +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + + cachep->num; +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + return 0; + } +@@ -950,7 +950,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, + goto fail; + + n = get_node(cachep, node); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + if (n->shared && force_change) { + free_block(cachep, n->shared->entry, + n->shared->avail, node, &list); +@@ -968,7 +968,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, + new_alien = NULL; + } + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slabs_destroy(cachep, &list); + + /* +@@ -1007,7 +1007,7 @@ static void cpuup_canceled(long cpu) + if (!n) + continue; + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + + /* Free limit for this kmem_cache_node */ + n->free_limit -= cachep->batchcount; +@@ -1020,7 +1020,7 @@ static void cpuup_canceled(long cpu) + } + + if (!cpumask_empty(mask)) { +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + goto free_slab; + } + +@@ -1034,7 +1034,7 @@ static void cpuup_canceled(long cpu) + alien = n->alien; + n->alien = NULL; + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + kfree(shared); + if (alien) { +@@ -1218,7 +1218,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * + /* + * Do not assume that spinlocks can be initialized via memcpy: + */ +- spin_lock_init(&ptr->list_lock); ++ raw_spin_lock_init(&ptr->list_lock); + + MAKE_ALL_LISTS(cachep, ptr, nodeid); + cachep->node[nodeid] = ptr; +@@ -1389,11 +1389,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) + for_each_kmem_cache_node(cachep, node, n) { + unsigned long total_slabs, free_slabs, free_objs; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + total_slabs = n->total_slabs; + free_slabs = n->free_slabs; + free_objs = n->free_objects; +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", + node, total_slabs - free_slabs, total_slabs, +@@ -2188,7 +2188,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) + { + #ifdef CONFIG_SMP + check_irq_off(); +- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); ++ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); + #endif + } + +@@ -2196,7 +2196,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) + { + #ifdef CONFIG_SMP + check_irq_off(); +- assert_spin_locked(&get_node(cachep, node)->list_lock); ++ assert_raw_spin_locked(&get_node(cachep, node)->list_lock); + #endif + } + +@@ -2236,9 +2236,9 @@ static void do_drain(void *arg) + check_irq_off(); + ac = cpu_cache_get(cachep); + n = get_node(cachep, node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + free_block(cachep, ac->entry, ac->avail, node, &list); +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + slabs_destroy(cachep, &list); + ac->avail = 0; + } +@@ -2256,9 +2256,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep) + drain_alien_cache(cachep, n->alien); + + for_each_kmem_cache_node(cachep, node, n) { +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + drain_array_locked(cachep, n->shared, node, true, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -2280,10 +2280,10 @@ static int drain_freelist(struct kmem_cache *cache, + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&n->slabs_free)) { + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + p = n->slabs_free.prev; + if (p == &n->slabs_free) { +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + goto out; + } + +@@ -2296,7 +2296,7 @@ static int drain_freelist(struct kmem_cache *cache, + * to the cache. + */ + n->free_objects -= cache->num; +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slab_destroy(cache, page); + nr_freed++; + } +@@ -2744,7 +2744,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) + INIT_LIST_HEAD(&page->lru); + n = get_node(cachep, page_to_nid(page)); + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + n->total_slabs++; + if (!page->active) { + list_add_tail(&page->lru, &(n->slabs_free)); +@@ -2754,7 +2754,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) + + STATS_INC_GROWN(cachep); + n->free_objects += cachep->num - page->active; +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + fixup_objfreelist_debug(cachep, &list); + } +@@ -2922,7 +2922,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) + { + struct page *page; + +- assert_spin_locked(&n->list_lock); ++ assert_raw_spin_locked(&n->list_lock); + page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); + if (!page) { + n->free_touched = 1; +@@ -2948,10 +2948,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, + if (!gfp_pfmemalloc_allowed(flags)) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + page = get_first_slab(n, true); + if (!page) { +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return NULL; + } + +@@ -2960,7 +2960,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, + + fixup_slab_list(cachep, n, page, &list); + +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + + return obj; +@@ -3019,7 +3019,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) + if (!n->free_objects && (!shared || !shared->avail)) + goto direct_grow; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + shared = READ_ONCE(n->shared); + + /* See if we can refill from the shared array */ +@@ -3043,7 +3043,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) + must_grow: + n->free_objects -= ac->avail; + alloc_done: +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + + direct_grow: +@@ -3268,7 +3268,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + BUG_ON(!n); + + check_irq_off(); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + page = get_first_slab(n, false); + if (!page) + goto must_grow; +@@ -3286,12 +3286,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + + fixup_slab_list(cachep, n, page, &list); + +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + fixup_objfreelist_debug(cachep, &list); + return obj; + + must_grow: +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); + if (page) { + /* This slab isn't counted yet so don't update free_objects */ +@@ -3467,7 +3467,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) + + check_irq_off(); + n = get_node(cachep, node); +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + if (n->shared) { + struct array_cache *shared_array = n->shared; + int max = shared_array->limit - shared_array->avail; +@@ -3496,7 +3496,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) + STATS_SET_FREEABLE(cachep, i); + } + #endif +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + slabs_destroy(cachep, &list); + ac->avail -= batchcount; + memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); +@@ -3904,9 +3904,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, + + node = cpu_to_mem(cpu); + n = get_node(cachep, node); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + free_block(cachep, ac->entry, ac->avail, node, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + slabs_destroy(cachep, &list); + } + free_percpu(prev); +@@ -4031,9 +4031,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, + return; + } + +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + drain_array_locked(cachep, ac, node, false, &list); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + slabs_destroy(cachep, &list); + } +@@ -4117,7 +4117,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) + + for_each_kmem_cache_node(cachep, node, n) { + check_irq_on(); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + + total_slabs += n->total_slabs; + free_slabs += n->free_slabs; +@@ -4126,7 +4126,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) + if (n->shared) + shared_avail += n->shared->avail; + +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + } + num_objs = total_slabs * cachep->num; + active_slabs = total_slabs - free_slabs; +@@ -4346,13 +4346,13 @@ static int leaks_show(struct seq_file *m, void *p) + for_each_kmem_cache_node(cachep, node, n) { + + check_irq_on(); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + + list_for_each_entry(page, &n->slabs_full, lru) + handle_slab(x, cachep, page); + list_for_each_entry(page, &n->slabs_partial, lru) + handle_slab(x, cachep, page); +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + } + } while (!is_store_user_clean(cachep)); + +diff --git a/mm/slab.h b/mm/slab.h +index c683b07ff..e45fc3a81 100644 +--- a/mm/slab.h ++++ b/mm/slab.h +@@ -450,7 +450,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, + * The slab lists for all objects. + */ + struct kmem_cache_node { +- spinlock_t list_lock; ++ raw_spinlock_t list_lock; + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ +diff --git a/mm/slub.c b/mm/slub.c +index 7b5630ca9..8c9ba6a1a 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1188,7 +1188,7 @@ static noinline int free_debug_processing( + unsigned long uninitialized_var(flags); + int ret = 0; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + slab_lock(page); + + if (s->flags & SLAB_CONSISTENCY_CHECKS) { +@@ -1223,7 +1223,7 @@ static noinline int free_debug_processing( + bulk_cnt, cnt); + + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + if (!ret) + slab_fix(s, "Object at 0x%p not freed", object); + return ret; +@@ -1356,6 +1356,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + } + #endif /* CONFIG_SLUB_DEBUG */ + ++struct slub_free_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); ++ + /* + * Hooks for other subsystems that check memory allocations. In a typical + * production configuration these hooks all should produce no code at all. +@@ -1597,10 +1603,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) + void *start, *p; + int idx, order; + bool shuffle; ++ bool enableirqs = false; + + flags &= gfp_allowed_mask; + + if (gfpflags_allow_blocking(flags)) ++ enableirqs = true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state > SYSTEM_BOOTING) ++ enableirqs = true; ++#endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1659,7 +1672,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) + page->frozen = 1; + + out: +- if (gfpflags_allow_blocking(flags)) ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; +@@ -1717,6 +1730,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) + __free_pages(page, order); + } + ++static void free_delayed(struct list_head *h) ++{ ++ while (!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_slab(page->slab_cache, page); ++ } ++} ++ + static void rcu_free_slab(struct rcu_head *h) + { + struct page *page = container_of(h, struct page, rcu_head); +@@ -1728,6 +1751,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) + { + if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { + call_rcu(&page->rcu_head, rcu_free_slab); ++ } else if (irqs_disabled()) { ++ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); ++ ++ raw_spin_lock(&f->lock); ++ list_add(&page->lru, &f->list); ++ raw_spin_unlock(&f->lock); + } else + __free_slab(s, page); + } +@@ -1835,7 +1864,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + void *t; + +@@ -1860,7 +1889,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -2112,7 +2141,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -2123,7 +2152,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } + +@@ -2158,7 +2187,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_FREE) { + stat(s, DEACTIVATE_EMPTY); +@@ -2193,10 +2222,10 @@ static void unfreeze_partials(struct kmem_cache *s, + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -2225,7 +2254,7 @@ static void unfreeze_partials(struct kmem_cache *s, + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -2262,14 +2291,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) + pobjects = oldpage->pobjects; + pages = oldpage->pages; + if (drain && pobjects > s->cpu_partial) { ++ struct slub_free_list *f; + unsigned long flags; ++ LIST_HEAD(tofree); + /* + * partial array is full. Move the existing + * set to the per node partial list. + */ + local_irq_save(flags); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(&tofree); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2339,7 +2375,19 @@ static bool has_cpu_slab(int cpu, void *info) + + static void flush_all(struct kmem_cache *s) + { ++ LIST_HEAD(tofree); ++ int cpu; ++ + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); ++ for_each_online_cpu(cpu) { ++ struct slub_free_list *f; ++ ++ f = &per_cpu(slub_free_list, cpu); ++ raw_spin_lock_irq(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock_irq(&f->lock); ++ free_delayed(&tofree); ++ } + } + + /* +@@ -2394,10 +2442,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ +@@ -2537,8 +2585,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) + * already disabled (which is the case for bulk allocation). + */ + static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, +- unsigned long addr, struct kmem_cache_cpu *c) ++ unsigned long addr, struct kmem_cache_cpu *c, ++ struct list_head *to_free) + { ++ struct slub_free_list *f; + void *freelist; + struct page *page; + +@@ -2604,6 +2654,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + VM_BUG_ON(!c->page->frozen); + c->freelist = get_freepointer(s, freelist); + c->tid = next_tid(c->tid); ++ ++out: ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, to_free); ++ raw_spin_unlock(&f->lock); ++ + return freelist; + + new_slab: +@@ -2619,7 +2676,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + + if (unlikely(!freelist)) { + slab_out_of_memory(s, gfpflags, node); +- return NULL; ++ goto out; + } + + page = c->page; +@@ -2632,7 +2689,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + goto new_slab; /* Slab failed checks. Next slab needed */ + + deactivate_slab(s, page, get_freepointer(s, freelist), c); +- return freelist; ++ goto out; + } + + /* +@@ -2644,6 +2701,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + { + void *p; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2655,8 +2713,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + c = this_cpu_ptr(s->cpu_slab); + #endif + +- p = ___slab_alloc(s, gfpflags, node, addr, c); ++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); + local_irq_restore(flags); ++ free_delayed(&tofree); + return p; + } + +@@ -2842,7 +2901,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2874,7 +2933,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -2916,7 +2975,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -2931,7 +2990,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, + remove_full(s, n, page); + } + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -3136,6 +3195,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + void **p) + { + struct kmem_cache_cpu *c; ++ LIST_HEAD(to_free); + int i; + + /* memcg and kmem_cache debug support */ +@@ -3168,7 +3228,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + * of re-populating per CPU c->freelist + */ + p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, +- _RET_IP_, c); ++ _RET_IP_, c, &to_free); + if (unlikely(!p[i])) + goto error; + +@@ -3180,6 +3240,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + } + c->tid = next_tid(c->tid); + local_irq_enable(); ++ free_delayed(&to_free); + + /* Clear memory outside IRQ disabled fastpath loop */ + if (unlikely(flags & __GFP_ZERO)) { +@@ -3194,6 +3255,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + return i; + error: + local_irq_enable(); ++ free_delayed(&to_free); + slab_post_alloc_hook(s, flags, i, p); + __kmem_cache_free_bulk(s, i, p); + return 0; +@@ -3329,7 +3391,7 @@ static void + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3678,6 +3740,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, + const char *text) + { + #ifdef CONFIG_SLUB_DEBUG ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* XXX move out of irq-off section */ ++ slab_err(s, page, text, s->name); ++#else ++ + void *addr = page_address(page); + void *p; + unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), +@@ -3699,6 +3766,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, + slab_unlock(page); + kfree(map); + #endif ++#endif + } + + /* +@@ -3712,7 +3780,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) + struct page *page, *h; + + BUG_ON(irqs_disabled()); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + list_for_each_entry_safe(page, h, &n->partial, lru) { + if (!page->inuse) { + remove_partial(n, page); +@@ -3722,7 +3790,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) + "Objects remaining in %s on __kmem_cache_shutdown()"); + } + } +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + list_for_each_entry_safe(page, h, &discard, lru) + discard_slab(s, page); +@@ -3995,7 +4063,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) + for (i = 0; i < SHRINK_PROMOTE_MAX; i++) + INIT_LIST_HEAD(promote + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists of slabs to discard or promote. +@@ -4026,7 +4094,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) + for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) + list_splice(promote + i, &n->partial); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, &discard, lru) +@@ -4240,6 +4308,12 @@ void __init kmem_cache_init(void) + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; + int node; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } + + if (debug_guardpage_minorder()) + slub_max_order = 0; +@@ -4448,7 +4522,7 @@ static int validate_slab_node(struct kmem_cache *s, + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page, map); +@@ -4470,7 +4544,7 @@ static int validate_slab_node(struct kmem_cache *s, + s->name, count, atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4527,6 +4601,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) + struct location *l; + int order; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC) ++ return 0; ++ + order = get_order(sizeof(struct location) * max); + + l = (void *)__get_free_pages(flags, order); +@@ -4660,12 +4737,12 @@ static int list_locations(struct kmem_cache *s, char *buf, + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc, map); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc, map); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { +diff --git a/mm/swap.c b/mm/swap.c +index 002c98a81..7d99561c4 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); + #ifdef CONFIG_SMP + static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); + #endif ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); + + /* + * This path almost never happens for VM activity - pages are normally +@@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page) + unsigned long flags; + + get_page(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = this_cpu_ptr(&lru_rotate_pvecs); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -310,12 +313,13 @@ void activate_page(struct page *page) + { + page = compound_head(page); + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -337,7 +341,7 @@ void activate_page(struct page *page) + + static void __lru_cache_activate_page(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + int i; + + /* +@@ -359,7 +363,7 @@ static void __lru_cache_activate_page(struct page *page) + } + } + +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /* +@@ -403,12 +407,12 @@ EXPORT_SYMBOL(mark_page_accessed); + + static void __lru_cache_add(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + __pagevec_lru_add(pvec); +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /** +@@ -591,9 +595,15 @@ void lru_add_drain_cpu(int cpu) + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ local_lock_irqsave_on(rotate_lock, flags, cpu); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore_on(rotate_lock, flags, cpu); ++#else ++ local_lock_irqsave(rotate_lock, flags); ++ pagevec_move_tail(pvec); ++ local_unlock_irqrestore(rotate_lock, flags); ++#endif + } + + pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); +@@ -625,11 +635,12 @@ void deactivate_file_page(struct page *page) + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_file_pvecs); + + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); +- put_cpu_var(lru_deactivate_file_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); + } + } + +@@ -644,23 +655,34 @@ void mark_page_lazyfree(struct page *page) + { + if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && + !PageSwapCache(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_lazyfree_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); +- put_cpu_var(lru_lazyfree_pvecs); ++ put_locked_var(swapvec_lock, lru_lazyfree_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + + #ifdef CONFIG_SMP + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) ++{ ++ local_lock_on(swapvec_lock, cpu); ++ lru_add_drain_cpu(cpu); ++ local_unlock_on(swapvec_lock, cpu); ++} ++ ++#else ++ + static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); + + static void lru_add_drain_per_cpu(struct work_struct *dummy) +@@ -668,6 +690,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) + lru_add_drain(); + } + ++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) ++{ ++ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); ++ ++ INIT_WORK(work, lru_add_drain_per_cpu); ++ queue_work_on(cpu, mm_percpu_wq, work); ++ cpumask_set_cpu(cpu, has_work); ++} ++#endif ++ + /* + * Doesn't need any cpu hotplug locking because we do rely on per-cpu + * kworkers being shut down before our page_alloc_cpu_dead callback is +@@ -692,21 +724,19 @@ void lru_add_drain_all(void) + cpumask_clear(&has_work); + + for_each_online_cpu(cpu) { +- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + + if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || + pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || + pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || +- need_activate_page_drain(cpu)) { +- INIT_WORK(work, lru_add_drain_per_cpu); +- queue_work_on(cpu, mm_percpu_wq, work); +- cpumask_set_cpu(cpu, &has_work); +- } ++ need_activate_page_drain(cpu)) ++ remote_lru_add_drain(cpu, &has_work); + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + for_each_cpu(cpu, &has_work) + flush_work(&per_cpu(lru_add_drain_work, cpu)); ++#endif + + mutex_unlock(&lock); + } +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index c3df82a8e..70012c329 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1717,7 +1717,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + void *vaddr; + + node = numa_node_id(); +@@ -1760,11 +1760,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + spin_lock(&vbq->lock); + list_add_tail_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vaddr; + } +@@ -1833,6 +1834,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + struct vmap_block *vb; + void *vaddr = NULL; + unsigned int order; ++ int cpu; + + BUG_ON(offset_in_page(size)); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -1847,7 +1849,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + order = get_order(size); + + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + unsigned long pages_off; + +@@ -1870,7 +1873,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + break; + } + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + /* Allocate new block if nothing was found */ +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 96028cc96..f2f7d9345 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_node_page_state); + +@@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) + node_page_state_add(v + overstep, pgdat, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) + node_page_state_add(v - overstep, pgdat, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c +index 05d3d5872..ccac527c7 100644 +--- a/mm/zsmalloc.c ++++ b/mm/zsmalloc.c +@@ -56,6 +56,7 @@ + #include + #include + #include ++#include + + #define ZSPAGE_MAGIC 0x58 + +@@ -73,9 +74,22 @@ + */ + #define ZS_MAX_ZSPAGE_ORDER 2 + #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) +- + #define ZS_HANDLE_SIZE (sizeof(unsigned long)) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct zsmalloc_handle { ++ unsigned long addr; ++ struct mutex lock; ++}; ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) ++ ++#else ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) ++#endif ++ + /* + * Object location (, ) is encoded as + * as single (unsigned long) handle value. +@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} + + static int create_cache(struct zs_pool *pool) + { +- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, ++ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, + 0, 0, NULL); + if (!pool->handle_cachep) + return 1; +@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool) + + static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) + { +- return (unsigned long)kmem_cache_alloc(pool->handle_cachep, +- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++ void *p; ++ ++ p = kmem_cache_alloc(pool->handle_cachep, ++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (p) { ++ struct zsmalloc_handle *zh = p; ++ ++ mutex_init(&zh->lock); ++ } ++#endif ++ return (unsigned long)p; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) ++{ ++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); ++} ++#endif ++ + static void cache_free_handle(struct zs_pool *pool, unsigned long handle) + { + kmem_cache_free(pool->handle_cachep, (void *)handle); +@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) + + static void record_obj(unsigned long handle, unsigned long obj) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ WRITE_ONCE(zh->addr, obj); ++#else + /* + * lsb of @obj represents handle lock while other bits + * represent object value the handle is pointing so + * updating shouldn't do store tearing. + */ + WRITE_ONCE(*(unsigned long *)handle, obj); ++#endif + } + + /* zpool driver */ +@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc"); + + /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ + static DEFINE_PER_CPU(struct mapping_area, zs_map_area); ++static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); + + static bool is_zspage_isolated(struct zspage *zspage) + { +@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) + + static unsigned long handle_to_obj(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return zh->addr; ++#else + return *(unsigned long *)handle; ++#endif + } + + static unsigned long obj_to_head(struct page *page, void *obj) +@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) + + static inline int testpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_is_locked(&zh->lock); ++#else + return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static inline int trypin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_trylock(&zh->lock); ++#else + return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void pin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_lock(&zh->lock); ++#else + bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void unpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_unlock(&zh->lock); ++#else + bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void reset_page(struct page *page) +@@ -1344,7 +1412,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, + class = pool->size_class[class_idx]; + off = (class->size * obj_idx) & ~PAGE_MASK; + +- area = &get_cpu_var(zs_map_area); ++ area = &get_locked_var(zs_map_area_lock, zs_map_area); + area->vm_mm = mm; + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ +@@ -1398,7 +1466,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) + + __zs_unmap_object(area, pages, off, class->size); + } +- put_cpu_var(zs_map_area); ++ put_locked_var(zs_map_area_lock, zs_map_area); + + migrate_read_unlock(zspage); + unpin_tag(handle); +diff --git a/mm/zswap.c b/mm/zswap.c +index 6c686888d..ed79b94fa 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -990,6 +991,8 @@ static void zswap_fill_page(void *ptr, unsigned long value) + memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); + } + ++/* protect zswap_dstmem from concurrency */ ++static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock); + /********************************* + * frontswap hooks + **********************************/ +@@ -1066,12 +1069,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + } + + /* compress */ +- dst = get_cpu_var(zswap_dstmem); +- tfm = *get_cpu_ptr(entry->pool->tfm); ++ dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem); ++ tfm = *this_cpu_ptr(entry->pool->tfm); + src = kmap_atomic(page); + ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen); + kunmap_atomic(src); +- put_cpu_ptr(entry->pool->tfm); + if (ret) { + ret = -EINVAL; + goto put_dstmem; +@@ -1094,7 +1096,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + memcpy(buf, &zhdr, hlen); + memcpy(buf + hlen, dst, dlen); + zpool_unmap_handle(entry->pool->zpool, handle); +- put_cpu_var(zswap_dstmem); ++ put_locked_var(zswap_dstmem_lock, zswap_dstmem); + + /* populate entry */ + entry->offset = offset; +@@ -1122,7 +1124,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + return 0; + + put_dstmem: +- put_cpu_var(zswap_dstmem); ++ put_locked_var(zswap_dstmem_lock, zswap_dstmem); + zswap_pool_put(entry->pool); + freepage: + zswap_entry_cache_free(entry); +diff --git a/net/Kconfig b/net/Kconfig +index 228dfa382..bc8d01996 100644 +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -275,7 +275,7 @@ config CGROUP_NET_CLASSID + + config NET_RX_BUSY_POLL + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config BQL + bool +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c +index c044ff2f7..75bc8102c 100644 +--- a/net/bluetooth/rfcomm/sock.c ++++ b/net/bluetooth/rfcomm/sock.c +@@ -64,15 +64,13 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) + static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) + { + struct sock *sk = d->owner, *parent; +- unsigned long flags; + + if (!sk) + return; + + BT_DBG("dlc %p state %ld err %d", d, d->state, err); + +- local_irq_save(flags); +- bh_lock_sock(sk); ++ spin_lock_bh(&sk->sk_lock.slock); + + if (err) + sk->sk_err = err; +@@ -93,8 +91,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) + sk->sk_state_change(sk); + } + +- bh_unlock_sock(sk); +- local_irq_restore(flags); ++ spin_unlock_bh(&sk->sk_lock.slock); + + if (parent && sock_flag(sk, SOCK_ZAPPED)) { + /* We have to drop DLC lock here, otherwise +diff --git a/net/core/dev.c b/net/core/dev.c +index 5bc0e5621..01e8d2453 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -219,14 +219,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -2723,6 +2723,7 @@ static void __netif_reschedule(struct Qdisc *q) + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2785,6 +2786,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -3468,7 +3470,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, + * This permits qdisc->running owner to get the lock more + * often and dequeue packets faster. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ contended = true; ++#else + contended = qdisc_is_running(q); ++#endif + if (unlikely(contended)) + spin_lock(&q->busylock); + +@@ -3829,10 +3835,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) + if (dev->flags & IFF_UP) { + int cpu = smp_processor_id(); /* ok because BHs are off */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (READ_ONCE(txq->xmit_lock_owner) != current) { ++#else + /* Other cpus might concurrently change txq->xmit_lock_owner + * to -1 or to their cpu id, but not to our id. + */ + if (READ_ONCE(txq->xmit_lock_owner) != cpu) { ++#endif + if (dev_xmit_recursion()) + goto recursion_alert; + +@@ -4267,6 +4277,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -4481,7 +4492,7 @@ static int netif_rx_internal(struct sk_buff *skb) + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -4491,14 +4502,14 @@ static int netif_rx_internal(struct sk_buff *skb) + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; + +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } +@@ -4532,11 +4543,9 @@ int netif_rx_ni(struct sk_buff *skb) + + trace_netif_rx_ni_entry(skb); + +- preempt_disable(); ++ local_bh_disable(); + err = netif_rx_internal(skb); +- if (local_softirq_pending()) +- do_softirq(); +- preempt_enable(); ++ local_bh_enable(); + + return err; + } +@@ -5817,12 +5826,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) + sd->rps_ipi_list = NULL; + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + net_rps_send_ipi(remsd); + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +@@ -5852,7 +5863,9 @@ static int process_backlog(struct napi_struct *napi, int quota) + while (again) { + struct sk_buff *skb; + ++ local_irq_disable(); + while ((skb = __skb_dequeue(&sd->process_queue))) { ++ local_irq_enable(); + rcu_read_lock(); + __netif_receive_skb(skb); + rcu_read_unlock(); +@@ -5860,9 +5873,9 @@ static int process_backlog(struct napi_struct *napi, int quota) + if (++work >= quota) + return work; + ++ local_irq_disable(); + } + +- local_irq_disable(); + rps_lock(sd); + if (skb_queue_empty(&sd->input_pkt_queue)) { + /* +@@ -5900,6 +5913,7 @@ void __napi_schedule(struct napi_struct *n) + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -5936,6 +5950,7 @@ bool napi_schedule_prep(struct napi_struct *n) + } + EXPORT_SYMBOL(napi_schedule_prep); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule +@@ -5947,6 +5962,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + } + EXPORT_SYMBOL(__napi_schedule_irqoff); ++#endif + + bool napi_complete_done(struct napi_struct *n, int work_done) + { +@@ -6327,13 +6343,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) + unsigned long time_limit = jiffies + + usecs_to_jiffies(netdev_budget_usecs); + int budget = netdev_budget; ++ struct sk_buff_head tofree_q; ++ struct sk_buff *skb; + LIST_HEAD(list); + LIST_HEAD(repoll); + ++ __skb_queue_head_init(&tofree_q); ++ + local_irq_disable(); ++ skb_queue_splice_init(&sd->tofree_queue, &tofree_q); + list_splice_init(&sd->poll_list, &list); + local_irq_enable(); + ++ while ((skb = __skb_dequeue(&tofree_q))) ++ kfree_skb(skb); ++ + for (;;) { + struct napi_struct *n; + +@@ -6363,7 +6387,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) + list_splice_tail(&repoll, &list); + list_splice(&list, &sd->poll_list); + if (!list_empty(&sd->poll_list)) +- __raise_softirq_irqoff(NET_RX_SOFTIRQ); ++ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); + + net_rps_action_and_irq_enable(sd); + out: +@@ -8570,7 +8594,7 @@ static void netdev_init_one_queue(struct net_device *dev, + /* Initialize queue lock */ + spin_lock_init(&queue->_xmit_lock); + netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); +- queue->xmit_lock_owner = -1; ++ netdev_queue_clear_owner(queue); + netdev_queue_numa_node_write(queue, NUMA_NO_NODE); + queue->dev = dev; + #ifdef CONFIG_BQL +@@ -9517,6 +9541,7 @@ static int dev_cpu_dead(unsigned int oldcpu) + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + #ifdef CONFIG_RPS + remsd = oldsd->rps_ipi_list; +@@ -9530,10 +9555,13 @@ static int dev_cpu_dead(unsigned int oldcpu) + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } +- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { ++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return 0; + } +@@ -9844,8 +9872,9 @@ static int __init net_dev_init(void) + + INIT_WORK(flush, flush_backlog); + +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + #ifdef CONFIG_XFRM_OFFLOAD + skb_queue_head_init(&sd->xfrm_backlog); + #endif +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c +index 752744db1..7112e28b4 100644 +--- a/net/core/gen_estimator.c ++++ b/net/core/gen_estimator.c +@@ -46,7 +46,7 @@ + struct net_rate_estimator { + struct gnet_stats_basic_packed *bstats; + spinlock_t *stats_lock; +- seqcount_t *running; ++ net_seqlock_t *running; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + u8 ewma_log; + u8 intvl_log; /* period : (250ms << intvl_log) */ +@@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **rate_est, + spinlock_t *lock, +- seqcount_t *running, ++ net_seqlock_t *running, + struct nlattr *opt) + { + struct gnet_estimator *parm = nla_data(opt); +@@ -230,7 +230,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **rate_est, + spinlock_t *lock, +- seqcount_t *running, struct nlattr *opt) ++ net_seqlock_t *running, struct nlattr *opt) + { + return gen_new_estimator(bstats, cpu_bstats, rate_est, + lock, running, opt); +diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c +index e2fd8baec..8bab88738 100644 +--- a/net/core/gen_stats.c ++++ b/net/core/gen_stats.c +@@ -142,7 +142,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, + } + + void +-__gnet_stats_copy_basic(const seqcount_t *running, ++__gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +@@ -155,10 +155,10 @@ __gnet_stats_copy_basic(const seqcount_t *running, + } + do { + if (running) +- seq = read_seqcount_begin(running); ++ seq = net_seq_begin(running); + bstats->bytes = b->bytes; + bstats->packets = b->packets; +- } while (running && read_seqcount_retry(running, seq)); ++ } while (running && net_seq_retry(running, seq)); + } + EXPORT_SYMBOL(__gnet_stats_copy_basic); + +@@ -176,7 +176,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); + * if the room in the socket buffer was not sufficient. + */ + int +-gnet_stats_copy_basic(const seqcount_t *running, ++gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 092fa3d75..9d472d626 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) + s64 remaining; + struct hrtimer_sleeper t; + +- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS, ++ current); + hrtimer_set_expires(&t.timer, spin_until); + + remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); +@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) + } while (ktime_compare(end_time, spin_until) < 0); + } else { + /* see do_nanosleep */ +- hrtimer_init_sleeper(&t, current); + do { + set_current_state(TASK_INTERRUPTIBLE); + hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index e1daab49b..dc637cefc 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -330,6 +331,8 @@ struct napi_alloc_cache { + + static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); + static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); ++static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); + + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -337,10 +340,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + unsigned long flags; + void *data; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, fragsz, gfp_mask); +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; + } + +@@ -361,9 +364,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); + + static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; ++ void *data; + +- return page_frag_alloc(&nc->page, fragsz, gfp_mask); ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ data = page_frag_alloc(&nc->page, fragsz, gfp_mask); ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ return data; + } + + void *napi_alloc_frag(unsigned int fragsz) +@@ -416,13 +423,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + + if (unlikely(!data)) + return NULL; +@@ -466,6 +473,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + struct napi_alloc_cache *nc; + struct sk_buff *skb; + void *data; ++ bool pfmemalloc; + + len += NET_SKB_PAD + NET_IP_ALIGN; + +@@ -488,7 +496,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + data = page_frag_alloc(&nc->page, len, gfp_mask); ++ pfmemalloc = nc->page.pfmemalloc; ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + if (unlikely(!data)) + return NULL; + +@@ -499,7 +510,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + } + + /* use OR instead of assignment to avoid clearing of bits in mask */ +- if (nc->page.pfmemalloc) ++ if (pfmemalloc) + skb->pfmemalloc = 1; + skb->head_frag = 1; + +@@ -731,23 +742,26 @@ void __consume_stateless_skb(struct sk_buff *skb) + + void __kfree_skb_flush(void) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + /* flush skb_cache if containing objects */ + if (nc->skb_count) { + kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, + nc->skb_cache); + nc->skb_count = 0; + } ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + } + + static inline void _kfree_skb_defer(struct sk_buff *skb) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + + /* drop skb->head and call any destructors for packet */ + skb_release_all(skb); + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + /* record skb to CPU local list */ + nc->skb_cache[nc->skb_count++] = skb; + +@@ -762,6 +776,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb) + nc->skb_cache); + nc->skb_count = 0; + } ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + } + void __kfree_skb_defer(struct sk_buff *skb) + { +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 8d68298b0..f1b82db62 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -77,6 +77,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -204,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; + * + * On SMP we have one ICMP socket per-cpu. + */ ++static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock); ++ + static struct sock *icmp_sk(struct net *net) + { + return *this_cpu_ptr(net->ipv4.icmp_sk); +@@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_lock(struct net *net) + { + struct sock *sk; + ++ if (!local_trylock(icmp_sk_lock)) ++ return NULL; ++ + sk = icmp_sk(net); + + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { + /* This can happen if the output path signals a + * dst_link_failure() for an outgoing ICMP packet. + */ ++ local_unlock(icmp_sk_lock); + return NULL; + } + return sk; +@@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net) + static inline void icmp_xmit_unlock(struct sock *sk) + { + spin_unlock(&sk->sk_lock.slock); ++ local_unlock(icmp_sk_lock); + } + + int sysctl_icmp_msgs_per_sec __read_mostly = 1000; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index f1ad62ad5..11ec432ed 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -62,6 +62,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -637,6 +638,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) + } + EXPORT_SYMBOL(tcp_v4_send_check); + ++static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock); + /* + * This routine will send an RST to the other tcp. + * +@@ -771,6 +773,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) + arg.tos = ip_hdr(skb)->tos; + arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); + local_bh_disable(); ++ local_lock(tcp_sk_lock); + ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); + if (sk) + ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? +@@ -783,6 +786,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) + ctl_sk->sk_mark = 0; + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); ++ local_unlock(tcp_sk_lock); + local_bh_enable(); + + #ifdef CONFIG_TCP_MD5SIG +@@ -863,6 +867,7 @@ static void tcp_v4_send_ack(const struct sock *sk, + arg.tos = tos; + arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); + local_bh_disable(); ++ local_lock(tcp_sk_lock); + ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); + if (sk) + ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? +@@ -874,6 +879,7 @@ static void tcp_v4_send_ack(const struct sock *sk, + + ctl_sk->sk_mark = 0; + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); ++ local_unlock(tcp_sk_lock); + local_bh_enable(); + } + +diff --git a/net/netfilter/core.c b/net/netfilter/core.c +index 3f0bdc728..2c6978d88 100644 +--- a/net/netfilter/core.c ++++ b/net/netfilter/core.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -27,6 +28,11 @@ + + #include "nf_internals.h" + ++#ifdef CONFIG_PREEMPT_RT_BASE ++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); ++EXPORT_PER_CPU_SYMBOL(xt_write_lock); ++#endif ++ + const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; + EXPORT_SYMBOL_GPL(nf_ipv6_ops); + +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index fbf7d5ef8..b22fc95e0 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -668,7 +669,7 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t) + if (BLOCK_NUM_PKTS(pbd)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + +@@ -930,7 +931,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, + if (!(status & TP_STATUS_BLK_TMO)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + prb_close_block(pkc, pbd, po, status); +diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c +index 0b347f46b..f395f0603 100644 +--- a/net/rds/ib_rdma.c ++++ b/net/rds/ib_rdma.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include "rds_single_path.h" + #include "ib_mr.h" +@@ -222,7 +223,7 @@ static inline void wait_clean_list_grace(void) + for_each_online_cpu(cpu) { + flag = &per_cpu(clean_list_grace, cpu); + while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) +- cpu_relax(); ++ cpu_chill(); + } + } + +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 2c9aa5507..0e6f03129 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -1173,7 +1173,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, + rcu_assign_pointer(sch->stab, stab); + } + if (tca[TCA_RATE]) { +- seqcount_t *running; ++ net_seqlock_t *running; + + err = -EOPNOTSUPP; + if (sch->flags & TCQ_F_MQROOT) { +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index c2276a3c9..bcb82fa3c 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -571,7 +571,11 @@ struct Qdisc noop_qdisc = { + .ops = &noop_qdisc_ops, + .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), + .dev_queue = &noop_netdev_queue, ++#ifdef CONFIG_PREEMPT_RT_BASE ++ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), ++#else + .running = SEQCNT_ZERO(noop_qdisc.running), ++#endif + .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), + .gso_skb = { + .next = (struct sk_buff *)&noop_qdisc.gso_skb, +@@ -872,9 +876,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + lockdep_set_class(&sch->busylock, + dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ seqlock_init(&sch->running); ++ lockdep_set_class(&sch->running.seqcount, ++ dev->qdisc_running_key ?: &qdisc_running_key); ++ lockdep_set_class(&sch->running.lock, ++ dev->qdisc_running_key ?: &qdisc_running_key); ++#else + seqcount_init(&sch->running); + lockdep_set_class(&sch->running, + dev->qdisc_running_key ?: &qdisc_running_key); ++#endif + + sch->ops = ops; + sch->flags = ops->static_flags; +@@ -1220,7 +1232,7 @@ void dev_deactivate_many(struct list_head *head) + /* Wait for outstanding qdisc_run calls. */ + list_for_each_entry(dev, head, close_list) { + while (some_qdisc_is_busy(dev)) +- yield(); ++ msleep(1); + /* The new qdisc is assigned at this point so we can safely + * unwind stale skb lists and qdisc statistics + */ +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c +index 0ab649e02..a6b8ba031 100644 +--- a/net/sunrpc/svc_xprt.c ++++ b/net/sunrpc/svc_xprt.c +@@ -406,7 +406,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) + if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) + return; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = svc_pool_for_cpu(xprt->xpt_server, cpu); + + atomic_long_inc(&pool->sp_stats.packets); +@@ -430,7 +430,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) + rqstp = NULL; + out_unlock: + rcu_read_unlock(); +- put_cpu(); ++ put_cpu_light(); + trace_svc_xprt_do_enqueue(xprt, rqstp); + } + EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); +diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c +index a00ec715a..a97997385 100644 +--- a/net/xfrm/xfrm_ipcomp.c ++++ b/net/xfrm/xfrm_ipcomp.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -36,6 +37,7 @@ struct ipcomp_tfms { + + static DEFINE_MUTEX(ipcomp_resource_mutex); + static void * __percpu *ipcomp_scratches; ++static DEFINE_LOCAL_IRQ_LOCK(ipcomp_scratches_lock); + static int ipcomp_scratch_users; + static LIST_HEAD(ipcomp_tfms_list); + +@@ -45,12 +47,15 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) + const int plen = skb->len; + int dlen = IPCOMP_SCRATCH_SIZE; + const u8 *start = skb->data; +- const int cpu = get_cpu(); +- u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); +- struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); +- int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); +- int len; ++ u8 *scratch; ++ struct crypto_comp *tfm; ++ int err, len; ++ ++ local_lock(ipcomp_scratches_lock); + ++ scratch = *this_cpu_ptr(ipcomp_scratches); ++ tfm = *this_cpu_ptr(ipcd->tfms); ++ err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); + if (err) + goto out; + +@@ -103,7 +108,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) + err = 0; + + out: +- put_cpu(); ++ local_unlock(ipcomp_scratches_lock); + return err; + } + +@@ -146,6 +151,8 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) + int err; + + local_bh_disable(); ++ local_lock(ipcomp_scratches_lock); ++ + scratch = *this_cpu_ptr(ipcomp_scratches); + tfm = *this_cpu_ptr(ipcd->tfms); + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); +@@ -158,12 +165,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) + } + + memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); ++ local_unlock(ipcomp_scratches_lock); + local_bh_enable(); + + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); + return 0; + + out: ++ local_unlock(ipcomp_scratches_lock); + local_bh_enable(); + return err; + } +diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c +index 552269210..8b4be8e18 100644 +--- a/samples/trace_events/trace-events-sample.c ++++ b/samples/trace_events/trace-events-sample.c +@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt) + + /* Silly tracepoints */ + trace_foo_bar("hello", cnt, array, random_strings[len], +- ¤t->cpus_allowed); ++ current->cpus_ptr); + + trace_foo_with_template_simple("HELLO", cnt); + +diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h +index 87f1fc980..f67b15236 100755 +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h +@@ -5,7 +5,8 @@ TARGET=$1 + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -53,6 +54,7 @@ UTS_VERSION="#$VERSION" + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length +diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h +index b6380c5f0..12abfddb1 100644 +--- a/security/apparmor/include/path.h ++++ b/security/apparmor/include/path.h +@@ -40,8 +40,10 @@ struct aa_buffers { + + #include + #include ++#include + + DECLARE_PER_CPU(struct aa_buffers, aa_buffers); ++DECLARE_LOCAL_IRQ_LOCK(aa_buffers_lock); + + #define ASSIGN(FN, A, X, N) ((X) = FN(A, N)) + #define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/ +@@ -51,7 +53,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); + + #define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++) + +-#ifdef CONFIG_DEBUG_PREEMPT ++#ifdef CONFIG_PREEMPT_RT_BASE ++static inline void AA_BUG_PREEMPT_ENABLED(const char *s) ++{ ++ struct local_irq_lock *lv; ++ ++ lv = this_cpu_ptr(&aa_buffers_lock); ++ WARN_ONCE(lv->owner != current, ++ "__get_buffer without aa_buffers_lock\n"); ++} ++ ++#elif defined(CONFIG_DEBUG_PREEMPT) + #define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X) + #else + #define AA_BUG_PREEMPT_ENABLED(X) /* nop */ +@@ -67,14 +79,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); + + #define get_buffers(X...) \ + do { \ +- struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \ ++ struct aa_buffers *__cpu_var; \ ++ __cpu_var = get_locked_ptr(aa_buffers_lock, &aa_buffers); \ + __get_buffers(__cpu_var, X); \ + } while (0) + + #define put_buffers(X, Y...) \ + do { \ + __put_buffers(X, Y); \ +- put_cpu_ptr(&aa_buffers); \ ++ put_locked_ptr(aa_buffers_lock, &aa_buffers); \ + } while (0) + + #endif /* __AA_PATH_H */ +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index 8b8b70620..8330ef57a 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -45,7 +45,7 @@ + int apparmor_initialized; + + DEFINE_PER_CPU(struct aa_buffers, aa_buffers); +- ++DEFINE_LOCAL_IRQ_LOCK(aa_buffers_lock); + + /* + * LSM hook functions +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c +index 0f8cfc95a..767894d24 100644 +--- a/virt/kvm/arm/arch_timer.c ++++ b/virt/kvm/arm/arch_timer.c +@@ -67,7 +67,7 @@ static inline bool userspace_irqchip(struct kvm *kvm) + static void soft_timer_start(struct hrtimer *hrt, u64 ns) + { + hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), +- HRTIMER_MODE_ABS); ++ HRTIMER_MODE_ABS_HARD); + } + + static void soft_timer_cancel(struct hrtimer *hrt) +@@ -611,10 +611,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) + update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); + vcpu_ptimer(vcpu)->cntvoff = 0; + +- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + timer->bg_timer.function = kvm_bg_timer_expire; + +- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + timer->phys_timer.function = kvm_phys_timer_expire; + + vtimer->irq.irq = default_vtimer_irq.irq; +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c +index 127177987..0948a646e 100644 +--- a/virt/kvm/arm/arm.c ++++ b/virt/kvm/arm/arm.c +@@ -809,7 +809,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) + * involves poking the GIC, which must be done in a + * non-preemptible context. + */ +- preempt_disable(); ++ migrate_disable(); + + kvm_pmu_flush_hwstate(vcpu); + +@@ -858,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) + kvm_timer_sync_hwstate(vcpu); + kvm_vgic_sync_hwstate(vcpu); + local_irq_enable(); +- preempt_enable(); ++ migrate_enable(); + continue; + } + +@@ -936,7 +936,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) + /* Exit types that need handling before we can be preempted */ + handle_exit_early(vcpu, run, ret); + +- preempt_enable(); ++ migrate_enable(); + + ret = handle_exit(vcpu, run, ret); + update_vcpu_stat_time(&vcpu->stat);