26472 lines
802 KiB
Diff
26472 lines
802 KiB
Diff
From cd778763fc94f0b2c0c6c9bb4dce22e67dd459a2 Mon Sep 17 00:00:00 2001
|
|
From: zhangyu <zhangyu4@kylinos.cn>
|
|
Date: Fri, 3 Nov 2023 17:06:46 +0800
|
|
Subject: [PATCH] patch-4.19.90-2203.3.0-rt103
|
|
|
|
---
|
|
arch/Kconfig | 1 +
|
|
arch/alpha/include/asm/spinlock_types.h | 4 -
|
|
arch/arm/Kconfig | 5 +-
|
|
arch/arm/configs/at91_dt_defconfig | 2 +-
|
|
arch/arm/configs/sama5_defconfig | 2 +-
|
|
arch/arm/include/asm/irq.h | 2 +
|
|
arch/arm/include/asm/spinlock_types.h | 4 -
|
|
arch/arm/include/asm/switch_to.h | 8 +
|
|
arch/arm/include/asm/thread_info.h | 8 +-
|
|
arch/arm/kernel/asm-offsets.c | 1 +
|
|
arch/arm/kernel/entry-armv.S | 19 +-
|
|
arch/arm/kernel/entry-common.S | 9 +-
|
|
arch/arm/kernel/signal.c | 3 +-
|
|
arch/arm/mach-at91/Kconfig | 25 +
|
|
arch/arm/mach-exynos/platsmp.c | 12 +-
|
|
arch/arm/mach-hisi/platmcpm.c | 22 +-
|
|
arch/arm/mach-imx/cpuidle-imx6q.c | 10 +-
|
|
arch/arm/mach-omap2/omap-smp.c | 10 +-
|
|
arch/arm/mach-prima2/platsmp.c | 10 +-
|
|
arch/arm/mach-qcom/platsmp.c | 10 +-
|
|
arch/arm/mach-spear/platsmp.c | 10 +-
|
|
arch/arm/mach-sti/platsmp.c | 10 +-
|
|
arch/arm/mm/fault.c | 6 +
|
|
arch/arm/mm/highmem.c | 58 +-
|
|
arch/arm/plat-versatile/platsmp.c | 10 +-
|
|
arch/arm64/Kconfig | 1 +
|
|
arch/arm64/configs/openeuler_defconfig | 5 +-
|
|
arch/arm64/crypto/Kconfig | 28 +-
|
|
arch/arm64/include/asm/alternative.h | 6 +
|
|
arch/arm64/include/asm/spinlock_types.h | 4 -
|
|
arch/arm64/include/asm/thread_info.h | 17 +-
|
|
arch/arm64/kernel/alternative.c | 1 +
|
|
arch/arm64/kernel/asm-offsets.c | 1 +
|
|
arch/arm64/kernel/entry.S | 12 +-
|
|
arch/arm64/kernel/fpsimd.c | 31 +-
|
|
arch/arm64/kernel/signal.c | 2 +-
|
|
arch/arm64/kvm/va_layout.c | 7 +-
|
|
arch/hexagon/include/asm/spinlock_types.h | 4 -
|
|
arch/ia64/include/asm/spinlock_types.h | 4 -
|
|
arch/ia64/kernel/mca.c | 2 +-
|
|
arch/mips/Kconfig | 2 +-
|
|
arch/mips/include/asm/switch_to.h | 4 +-
|
|
arch/mips/kernel/mips-mt-fpaff.c | 2 +-
|
|
arch/mips/kernel/traps.c | 6 +-
|
|
arch/powerpc/Kconfig | 6 +-
|
|
arch/powerpc/include/asm/spinlock_types.h | 4 -
|
|
arch/powerpc/include/asm/thread_info.h | 18 +-
|
|
arch/powerpc/kernel/asm-offsets.c | 1 +
|
|
arch/powerpc/kernel/entry_32.S | 29 +-
|
|
arch/powerpc/kernel/entry_64.S | 28 +-
|
|
arch/powerpc/kernel/irq.c | 2 +
|
|
arch/powerpc/kernel/misc_32.S | 2 +
|
|
arch/powerpc/kernel/misc_64.S | 2 +
|
|
arch/powerpc/kvm/Kconfig | 1 +
|
|
arch/powerpc/platforms/cell/spufs/sched.c | 2 +-
|
|
arch/powerpc/platforms/ps3/device-init.c | 4 +-
|
|
arch/powerpc/platforms/pseries/iommu.c | 16 +-
|
|
arch/s390/include/asm/spinlock_types.h | 4 -
|
|
arch/sh/include/asm/spinlock_types.h | 4 -
|
|
arch/sh/kernel/irq.c | 2 +
|
|
arch/sparc/kernel/irq_64.c | 2 +
|
|
arch/x86/Kconfig | 8 +-
|
|
arch/x86/configs/openeuler_defconfig | 3 +-
|
|
arch/x86/crypto/aesni-intel_glue.c | 22 +-
|
|
arch/x86/crypto/cast5_avx_glue.c | 21 +-
|
|
arch/x86/crypto/chacha20_glue.c | 9 +-
|
|
arch/x86/crypto/glue_helper.c | 31 +-
|
|
arch/x86/entry/common.c | 11 +-
|
|
arch/x86/entry/entry_32.S | 17 +
|
|
arch/x86/entry/entry_64.S | 18 +
|
|
arch/x86/include/asm/fpu/api.h | 1 +
|
|
arch/x86/include/asm/preempt.h | 33 +-
|
|
arch/x86/include/asm/signal.h | 13 +
|
|
arch/x86/include/asm/stackprotector.h | 8 +-
|
|
arch/x86/include/asm/thread_info.h | 11 +
|
|
arch/x86/kernel/apic/io_apic.c | 16 +-
|
|
arch/x86/kernel/asm-offsets.c | 2 +
|
|
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 2 +-
|
|
arch/x86/kernel/fpu/core.c | 12 +
|
|
arch/x86/kernel/fpu/signal.c | 2 +
|
|
arch/x86/kernel/irq_32.c | 2 +
|
|
arch/x86/kernel/process_32.c | 32 +
|
|
arch/x86/kvm/lapic.c | 2 +-
|
|
arch/x86/kvm/x86.c | 7 +
|
|
arch/x86/mm/highmem_32.c | 13 +-
|
|
arch/x86/mm/iomap_32.c | 11 +-
|
|
arch/x86/mm/pageattr.c | 8 +
|
|
arch/x86/platform/efi/efi_64.c | 10 +-
|
|
arch/xtensa/include/asm/spinlock_types.h | 4 -
|
|
block/blk-core.c | 15 +-
|
|
block/blk-ioc.c | 5 +-
|
|
block/blk-mq.c | 36 +-
|
|
block/blk-mq.h | 4 +-
|
|
block/blk-softirq.c | 3 +
|
|
crypto/cryptd.c | 10 +-
|
|
crypto/scompress.c | 6 +-
|
|
drivers/block/zram/zcomp.c | 13 +-
|
|
drivers/block/zram/zcomp.h | 1 +
|
|
drivers/block/zram/zram_drv.c | 43 +-
|
|
drivers/block/zram/zram_drv.h | 3 +
|
|
drivers/char/random.c | 11 +-
|
|
drivers/char/tpm/tpm_tis.c | 29 +-
|
|
drivers/clocksource/Kconfig | 13 +-
|
|
drivers/clocksource/Makefile | 3 +-
|
|
drivers/clocksource/tcb_clksrc.c | 69 +-
|
|
drivers/clocksource/timer-atmel-tcb.c | 617 ++++++++++++
|
|
drivers/connector/cn_proc.c | 6 +-
|
|
drivers/cpufreq/Kconfig.x86 | 2 +-
|
|
drivers/crypto/caam/qi.c | 43 +-
|
|
drivers/crypto/caam/qi.h | 2 +-
|
|
drivers/dma-buf/dma-buf.c | 8 +-
|
|
drivers/dma-buf/reservation.c | 43 +-
|
|
drivers/firmware/efi/efi.c | 5 +-
|
|
.../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +-
|
|
drivers/gpu/drm/i915/i915_gem.c | 10 +-
|
|
drivers/gpu/drm/i915/i915_irq.c | 2 +
|
|
drivers/gpu/drm/i915/i915_request.c | 8 +-
|
|
drivers/gpu/drm/i915/i915_trace.h | 6 +-
|
|
drivers/gpu/drm/i915/intel_sprite.c | 13 +-
|
|
drivers/gpu/drm/radeon/radeon_display.c | 2 +
|
|
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 -
|
|
drivers/hv/hv.c | 4 +-
|
|
drivers/hv/hyperv_vmbus.h | 1 +
|
|
drivers/hv/vmbus_drv.c | 4 +-
|
|
drivers/i2c/busses/i2c-exynos5.c | 4 +-
|
|
drivers/i2c/busses/i2c-hix5hd2.c | 3 +-
|
|
drivers/infiniband/hw/hfi1/affinity.c | 6 +-
|
|
drivers/infiniband/hw/hfi1/sdma.c | 3 +-
|
|
drivers/infiniband/hw/qib/qib_file_ops.c | 7 +-
|
|
drivers/leds/trigger/Kconfig | 1 +
|
|
drivers/md/bcache/Kconfig | 1 +
|
|
drivers/md/dm-rq.c | 1 -
|
|
drivers/md/raid5.c | 8 +-
|
|
drivers/md/raid5.h | 1 +
|
|
drivers/misc/Kconfig | 12 +-
|
|
drivers/net/phy/fixed_phy.c | 25 +-
|
|
.../wireless/intersil/orinoco/orinoco_usb.c | 4 +-
|
|
drivers/of/base.c | 19 +-
|
|
drivers/pci/switch/switchtec.c | 24 +-
|
|
drivers/scsi/fcoe/fcoe.c | 16 +-
|
|
drivers/scsi/fcoe/fcoe_ctlr.c | 4 +-
|
|
drivers/scsi/libfc/fc_exch.c | 4 +-
|
|
drivers/staging/android/vsoc.c | 6 +-
|
|
drivers/thermal/x86_pkg_temp_thermal.c | 24 +-
|
|
drivers/tty/serial/8250/8250_core.c | 11 +-
|
|
drivers/tty/serial/8250/8250_port.c | 5 +-
|
|
drivers/tty/serial/amba-pl011.c | 17 +-
|
|
drivers/tty/serial/omap-serial.c | 12 +-
|
|
drivers/tty/sysrq.c | 6 +-
|
|
drivers/usb/core/hcd.c | 3 -
|
|
drivers/usb/gadget/function/f_fs.c | 2 +-
|
|
drivers/usb/gadget/legacy/inode.c | 4 +-
|
|
drivers/watchdog/watchdog_dev.c | 8 +-
|
|
fs/aio.c | 13 +-
|
|
fs/autofs/expire.c | 3 +-
|
|
fs/buffer.c | 21 +-
|
|
fs/cifs/readdir.c | 2 +-
|
|
fs/dcache.c | 50 +-
|
|
fs/eventpoll.c | 4 +-
|
|
fs/exec.c | 2 +
|
|
fs/ext4/page-io.c | 6 +-
|
|
fs/fscache/cookie.c | 8 +
|
|
fs/fscache/main.c | 1 +
|
|
fs/fuse/dir.c | 2 +-
|
|
fs/inode.c | 2 +-
|
|
fs/locks.c | 32 +-
|
|
fs/namei.c | 4 +-
|
|
fs/namespace.c | 8 +-
|
|
fs/nfs/delegation.c | 4 +-
|
|
fs/nfs/dir.c | 12 +-
|
|
fs/nfs/inode.c | 4 +
|
|
fs/nfs/nfs4_fs.h | 2 +-
|
|
fs/nfs/nfs4proc.c | 4 +-
|
|
fs/nfs/nfs4state.c | 22 +-
|
|
fs/nfs/unlink.c | 35 +-
|
|
fs/ntfs/aops.c | 10 +-
|
|
fs/proc/array.c | 4 +-
|
|
fs/proc/base.c | 3 +-
|
|
fs/proc/proc_sysctl.c | 2 +-
|
|
fs/squashfs/decompressor_multi_percpu.c | 16 +-
|
|
fs/timerfd.c | 6 +-
|
|
fs/userfaultfd.c | 12 +-
|
|
include/asm-generic/percpu.h | 1 +
|
|
include/linux/blk-cgroup.h | 2 +-
|
|
include/linux/blk-mq.h | 1 +
|
|
include/linux/blkdev.h | 5 +
|
|
include/linux/bottom_half.h | 34 +
|
|
include/linux/buffer_head.h | 42 +
|
|
include/linux/cgroup-defs.h | 2 +
|
|
include/linux/completion.h | 8 +-
|
|
include/linux/cpu.h | 1 +
|
|
include/linux/dcache.h | 4 +-
|
|
include/linux/delay.h | 6 +
|
|
include/linux/fs.h | 2 +-
|
|
include/linux/fscache.h | 1 +
|
|
include/linux/highmem.h | 32 +-
|
|
include/linux/hrtimer.h | 29 +-
|
|
include/linux/idr.h | 5 +-
|
|
include/linux/interrupt.h | 63 +-
|
|
include/linux/irq.h | 4 +-
|
|
include/linux/irq_work.h | 8 +
|
|
include/linux/irqdesc.h | 1 +
|
|
include/linux/irqflags.h | 23 +-
|
|
include/linux/jbd2.h | 24 +
|
|
include/linux/kdb.h | 2 +
|
|
include/linux/kernel.h | 4 +
|
|
include/linux/kthread.h | 4 +-
|
|
include/linux/list_bl.h | 30 +-
|
|
include/linux/locallock.h | 282 ++++++
|
|
include/linux/mm_types.h | 4 +
|
|
include/linux/mutex.h | 20 +-
|
|
include/linux/mutex_rt.h | 130 +++
|
|
include/linux/netdevice.h | 101 +-
|
|
include/linux/netfilter/x_tables.h | 7 +
|
|
include/linux/nfs_fs.h | 4 +
|
|
include/linux/nfs_xdr.h | 2 +-
|
|
include/linux/percpu-rwsem.h | 24 +-
|
|
include/linux/percpu.h | 29 +
|
|
include/linux/pid.h | 1 +
|
|
include/linux/posix-timers.h | 3 +-
|
|
include/linux/preempt.h | 107 +-
|
|
include/linux/printk.h | 2 +
|
|
include/linux/radix-tree.h | 7 +-
|
|
include/linux/random.h | 2 +-
|
|
include/linux/rbtree.h | 2 +-
|
|
include/linux/rcu_assign_pointer.h | 54 +
|
|
include/linux/rcupdate.h | 91 +-
|
|
include/linux/rcutree.h | 8 +
|
|
include/linux/reservation.h | 4 +-
|
|
include/linux/rtmutex.h | 22 +-
|
|
include/linux/rwlock_rt.h | 119 +++
|
|
include/linux/rwlock_types.h | 4 +
|
|
include/linux/rwlock_types_rt.h | 55 +
|
|
include/linux/rwsem.h | 11 +
|
|
include/linux/rwsem_rt.h | 69 ++
|
|
include/linux/sched.h | 163 ++-
|
|
include/linux/sched/mm.h | 11 +
|
|
include/linux/sched/task.h | 16 +
|
|
include/linux/sched/wake_q.h | 27 +-
|
|
include/linux/seqlock.h | 66 +-
|
|
include/linux/signal.h | 1 +
|
|
include/linux/skbuff.h | 7 +
|
|
include/linux/smp.h | 3 +
|
|
include/linux/spinlock.h | 12 +-
|
|
include/linux/spinlock_api_smp.h | 4 +-
|
|
include/linux/spinlock_rt.h | 156 +++
|
|
include/linux/spinlock_types.h | 76 +-
|
|
include/linux/spinlock_types_nort.h | 33 +
|
|
include/linux/spinlock_types_raw.h | 55 +
|
|
include/linux/spinlock_types_rt.h | 48 +
|
|
include/linux/spinlock_types_up.h | 4 -
|
|
include/linux/stop_machine.h | 2 +
|
|
include/linux/suspend.h | 6 +
|
|
include/linux/swait.h | 16 +
|
|
include/linux/swap.h | 2 +
|
|
include/linux/swork.h | 24 +
|
|
include/linux/thread_info.h | 12 +-
|
|
include/linux/timer.h | 2 +-
|
|
include/linux/trace_events.h | 2 +
|
|
include/linux/uaccess.h | 2 +
|
|
include/linux/vmstat.h | 4 +
|
|
include/linux/wait.h | 5 +-
|
|
include/linux/workqueue.h | 4 -
|
|
include/net/gen_stats.h | 9 +-
|
|
include/net/neighbour.h | 6 +-
|
|
include/net/net_seq_lock.h | 15 +
|
|
include/net/sch_generic.h | 19 +-
|
|
include/soc/at91/atmel_tcb.h | 183 ++++
|
|
init/Kconfig | 5 +-
|
|
init/Makefile | 2 +-
|
|
init/init_task.c | 14 +-
|
|
init/main.c | 1 +
|
|
kernel/Kconfig.locks | 4 +-
|
|
kernel/Kconfig.preempt | 39 +-
|
|
kernel/cgroup/cgroup.c | 9 +-
|
|
kernel/cgroup/cpuset.c | 72 +-
|
|
kernel/cgroup/rstat.c | 5 +-
|
|
kernel/cpu.c | 44 +
|
|
kernel/debug/kdb/kdb_io.c | 2 +
|
|
kernel/events/core.c | 4 +-
|
|
kernel/exit.c | 2 +-
|
|
kernel/fork.c | 45 +-
|
|
kernel/futex.c | 126 ++-
|
|
kernel/irq/handle.c | 8 +-
|
|
kernel/irq/manage.c | 13 +-
|
|
kernel/irq/settings.h | 12 +
|
|
kernel/irq/spurious.c | 8 +
|
|
kernel/irq_work.c | 76 +-
|
|
kernel/ksysfs.c | 12 +
|
|
kernel/kthread.c | 44 +-
|
|
kernel/locking/Makefile | 9 +-
|
|
kernel/locking/lockdep.c | 2 +
|
|
kernel/locking/locktorture.c | 1 -
|
|
kernel/locking/mutex-rt.c | 223 +++++
|
|
kernel/locking/rtmutex.c | 944 ++++++++++++++++--
|
|
kernel/locking/rtmutex_common.h | 31 +-
|
|
kernel/locking/rwlock-rt.c | 384 +++++++
|
|
kernel/locking/rwsem-rt.c | 312 ++++++
|
|
kernel/locking/spinlock.c | 7 +
|
|
kernel/locking/spinlock_debug.c | 5 +
|
|
kernel/panic.c | 2 +
|
|
kernel/power/hibernate.c | 7 +
|
|
kernel/power/suspend.c | 4 +
|
|
kernel/printk/printk.c | 160 ++-
|
|
kernel/printk/printk.c.rej | 23 +
|
|
kernel/ptrace.c | 32 +-
|
|
kernel/rcu/Kconfig | 6 +-
|
|
kernel/rcu/rcu.h | 11 +-
|
|
kernel/rcu/rcutorture.c | 7 +
|
|
kernel/rcu/srcutree.c | 36 +-
|
|
kernel/rcu/tree.c | 153 ++-
|
|
kernel/rcu/tree.h | 6 +-
|
|
kernel/rcu/tree_exp.h | 9 +-
|
|
kernel/rcu/tree_plugin.h | 156 +--
|
|
kernel/rcu/update.c | 6 +-
|
|
kernel/sched/Makefile | 2 +-
|
|
kernel/sched/completion.c | 34 +-
|
|
kernel/sched/core.c | 431 +++++++-
|
|
kernel/sched/cpudeadline.c | 4 +-
|
|
kernel/sched/cpupri.c | 4 +-
|
|
kernel/sched/deadline.c | 12 +-
|
|
kernel/sched/debug.c | 4 +
|
|
kernel/sched/fair.c | 62 +-
|
|
kernel/sched/features.h | 8 +
|
|
kernel/sched/rt.c | 8 +-
|
|
kernel/sched/sched.h | 13 +
|
|
kernel/sched/swait.c | 22 +-
|
|
kernel/sched/swork.c | 173 ++++
|
|
kernel/sched/topology.c | 1 +
|
|
kernel/signal.c | 114 ++-
|
|
kernel/softirq.c | 744 ++++++++++++--
|
|
kernel/stop_machine.c | 7 +-
|
|
kernel/time/alarmtimer.c | 2 +-
|
|
kernel/time/hrtimer.c | 126 ++-
|
|
kernel/time/itimer.c | 1 +
|
|
kernel/time/jiffies.c | 7 +-
|
|
kernel/time/posix-cpu-timers.c | 181 +++-
|
|
kernel/time/posix-timers.c | 42 +-
|
|
kernel/time/posix-timers.h | 2 +
|
|
kernel/time/tick-broadcast-hrtimer.c | 2 +-
|
|
kernel/time/tick-common.c | 10 +-
|
|
kernel/time/tick-sched.c | 31 +-
|
|
kernel/time/timekeeping.c | 6 +-
|
|
kernel/time/timekeeping.h | 3 +-
|
|
kernel/time/timer.c | 86 +-
|
|
kernel/trace/trace.c | 37 +-
|
|
kernel/trace/trace.h | 2 +
|
|
kernel/trace/trace_events.c | 2 +
|
|
kernel/trace/trace_hwlat.c | 2 +-
|
|
kernel/trace/trace_output.c | 19 +-
|
|
kernel/watchdog.c | 2 +-
|
|
kernel/watchdog_hld.c | 9 +
|
|
kernel/workqueue.c | 293 +++---
|
|
lib/Kconfig | 1 +
|
|
lib/Kconfig.debug | 2 +-
|
|
lib/debugobjects.c | 5 +-
|
|
lib/irq_poll.c | 5 +
|
|
lib/locking-selftest.c | 50 +
|
|
lib/radix-tree.c | 32 +-
|
|
lib/scatterlist.c | 2 +-
|
|
lib/smp_processor_id.c | 7 +-
|
|
lib/ubsan.c | 69 +-
|
|
localversion-rt | 1 +
|
|
mm/Kconfig | 2 +-
|
|
mm/compaction.c | 6 +-
|
|
mm/highmem.c | 6 +-
|
|
mm/kasan/quarantine.c | 18 +-
|
|
mm/kmemleak.c | 92 +-
|
|
mm/memcontrol.c | 28 +-
|
|
mm/mmu_context.c | 2 +
|
|
mm/page_alloc.c | 196 ++--
|
|
mm/slab.c | 94 +-
|
|
mm/slab.h | 2 +-
|
|
mm/slub.c | 139 ++-
|
|
mm/swap.c | 74 +-
|
|
mm/vmalloc.c | 13 +-
|
|
mm/vmstat.c | 12 +
|
|
mm/zsmalloc.c | 80 +-
|
|
mm/zswap.c | 12 +-
|
|
net/Kconfig | 2 +-
|
|
net/bluetooth/rfcomm/sock.c | 7 +-
|
|
net/core/dev.c | 61 +-
|
|
net/core/gen_estimator.c | 6 +-
|
|
net/core/gen_stats.c | 8 +-
|
|
net/core/pktgen.c | 4 +-
|
|
net/core/skbuff.c | 33 +-
|
|
net/ipv4/icmp.c | 8 +
|
|
net/ipv4/tcp_ipv4.c | 6 +
|
|
net/netfilter/core.c | 6 +
|
|
net/packet/af_packet.c | 5 +-
|
|
net/rds/ib_rdma.c | 3 +-
|
|
net/sched/sch_api.c | 2 +-
|
|
net/sched/sch_generic.c | 14 +-
|
|
net/sunrpc/svc_xprt.c | 4 +-
|
|
net/xfrm/xfrm_ipcomp.c | 21 +-
|
|
samples/trace_events/trace-events-sample.c | 2 +-
|
|
scripts/mkcompile_h | 4 +-
|
|
security/apparmor/include/path.h | 19 +-
|
|
security/apparmor/lsm.c | 2 +-
|
|
virt/kvm/arm/arch_timer.c | 6 +-
|
|
virt/kvm/arm/arm.c | 6 +-
|
|
401 files changed, 9515 insertions(+), 2191 deletions(-)
|
|
create mode 100644 drivers/clocksource/timer-atmel-tcb.c
|
|
create mode 100644 include/linux/locallock.h
|
|
create mode 100644 include/linux/mutex_rt.h
|
|
create mode 100644 include/linux/rcu_assign_pointer.h
|
|
create mode 100644 include/linux/rwlock_rt.h
|
|
create mode 100644 include/linux/rwlock_types_rt.h
|
|
create mode 100644 include/linux/rwsem_rt.h
|
|
create mode 100644 include/linux/spinlock_rt.h
|
|
create mode 100644 include/linux/spinlock_types_nort.h
|
|
create mode 100644 include/linux/spinlock_types_raw.h
|
|
create mode 100644 include/linux/spinlock_types_rt.h
|
|
create mode 100644 include/linux/swork.h
|
|
create mode 100644 include/net/net_seq_lock.h
|
|
create mode 100644 include/soc/at91/atmel_tcb.h
|
|
create mode 100644 kernel/locking/mutex-rt.c
|
|
create mode 100644 kernel/locking/rwlock-rt.c
|
|
create mode 100644 kernel/locking/rwsem-rt.c
|
|
create mode 100644 kernel/printk/printk.c.rej
|
|
create mode 100644 kernel/sched/swork.c
|
|
create mode 100644 localversion-rt
|
|
|
|
diff --git a/arch/Kconfig b/arch/Kconfig
|
|
index 00f55932b..9fdf4a803 100644
|
|
--- a/arch/Kconfig
|
|
+++ b/arch/Kconfig
|
|
@@ -37,6 +37,7 @@ config OPROFILE
|
|
tristate "OProfile system profiling"
|
|
depends on PROFILING
|
|
depends on HAVE_OPROFILE
|
|
+ depends on !PREEMPT_RT_FULL
|
|
select RING_BUFFER
|
|
select RING_BUFFER_ALLOW_SWAP
|
|
help
|
|
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
|
|
index 1d5716bc0..6883bc952 100644
|
|
--- a/arch/alpha/include/asm/spinlock_types.h
|
|
+++ b/arch/alpha/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef _ALPHA_SPINLOCK_TYPES_H
|
|
#define _ALPHA_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int lock;
|
|
} arch_spinlock_t;
|
|
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
|
|
index b9455d212..5cfcb8a41 100644
|
|
--- a/arch/arm/Kconfig
|
|
+++ b/arch/arm/Kconfig
|
|
@@ -52,7 +52,7 @@ config ARM
|
|
select HARDIRQS_SW_RESEND
|
|
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
|
|
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
|
|
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
|
|
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
|
|
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
|
|
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
|
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
|
|
@@ -91,6 +91,7 @@ config ARM
|
|
select HAVE_PERF_EVENTS
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
select HAVE_RSEQ
|
|
@@ -2165,7 +2166,7 @@ config NEON
|
|
|
|
config KERNEL_MODE_NEON
|
|
bool "Support for NEON in kernel mode"
|
|
- depends on NEON && AEABI
|
|
+ depends on NEON && AEABI && !PREEMPT_RT_BASE
|
|
help
|
|
Say Y to include support for NEON in kernel mode.
|
|
|
|
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
|
|
index e4b1be66b..f4b253bd0 100644
|
|
--- a/arch/arm/configs/at91_dt_defconfig
|
|
+++ b/arch/arm/configs/at91_dt_defconfig
|
|
@@ -19,6 +19,7 @@ CONFIG_ARCH_MULTI_V5=y
|
|
CONFIG_ARCH_AT91=y
|
|
CONFIG_SOC_AT91RM9200=y
|
|
CONFIG_SOC_AT91SAM9=y
|
|
+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
|
|
CONFIG_AEABI=y
|
|
CONFIG_UACCESS_WITH_MEMCPY=y
|
|
CONFIG_ZBOOT_ROM_TEXT=0x0
|
|
@@ -64,7 +65,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|
CONFIG_BLK_DEV_RAM=y
|
|
CONFIG_BLK_DEV_RAM_COUNT=4
|
|
CONFIG_BLK_DEV_RAM_SIZE=8192
|
|
-CONFIG_ATMEL_TCLIB=y
|
|
CONFIG_ATMEL_SSC=y
|
|
CONFIG_SCSI=y
|
|
CONFIG_BLK_DEV_SD=y
|
|
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
|
|
index 208002555..be92871ab 100644
|
|
--- a/arch/arm/configs/sama5_defconfig
|
|
+++ b/arch/arm/configs/sama5_defconfig
|
|
@@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y
|
|
CONFIG_SOC_SAMA5D2=y
|
|
CONFIG_SOC_SAMA5D3=y
|
|
CONFIG_SOC_SAMA5D4=y
|
|
+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
|
|
CONFIG_AEABI=y
|
|
CONFIG_UACCESS_WITH_MEMCPY=y
|
|
CONFIG_ZBOOT_ROM_TEXT=0x0
|
|
@@ -75,7 +76,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|
CONFIG_BLK_DEV_RAM=y
|
|
CONFIG_BLK_DEV_RAM_COUNT=4
|
|
CONFIG_BLK_DEV_RAM_SIZE=8192
|
|
-CONFIG_ATMEL_TCLIB=y
|
|
CONFIG_ATMEL_SSC=y
|
|
CONFIG_EEPROM_AT24=y
|
|
CONFIG_SCSI=y
|
|
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
|
|
index 46d41140d..c421b5b81 100644
|
|
--- a/arch/arm/include/asm/irq.h
|
|
+++ b/arch/arm/include/asm/irq.h
|
|
@@ -23,6 +23,8 @@
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
+#include <linux/cpumask.h>
|
|
+
|
|
struct irqaction;
|
|
struct pt_regs;
|
|
|
|
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
|
|
index 597695864..a37c08039 100644
|
|
--- a/arch/arm/include/asm/spinlock_types.h
|
|
+++ b/arch/arm/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef __ASM_SPINLOCK_TYPES_H
|
|
#define __ASM_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
#define TICKET_SHIFT 16
|
|
|
|
typedef struct {
|
|
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
|
|
index d3e937dce..6ab96a2ce 100644
|
|
--- a/arch/arm/include/asm/switch_to.h
|
|
+++ b/arch/arm/include/asm/switch_to.h
|
|
@@ -4,6 +4,13 @@
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
|
|
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
|
|
+#else
|
|
+static inline void
|
|
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* For v7 SMP cores running a preemptible kernel we may be pre-empted
|
|
* during a TLB maintenance operation, so execute an inner-shareable dsb
|
|
@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
|
|
#define switch_to(prev,next,last) \
|
|
do { \
|
|
__complete_pending_tlbi(); \
|
|
+ switch_kmaps(prev, next); \
|
|
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
|
} while (0)
|
|
|
|
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
|
|
index 8f55dc520..4f834bfca 100644
|
|
--- a/arch/arm/include/asm/thread_info.h
|
|
+++ b/arch/arm/include/asm/thread_info.h
|
|
@@ -49,6 +49,7 @@ struct cpu_context_save {
|
|
struct thread_info {
|
|
unsigned long flags; /* low level flags */
|
|
int preempt_count; /* 0 => preemptable, <0 => bug */
|
|
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
|
mm_segment_t addr_limit; /* address limit */
|
|
struct task_struct *task; /* main task structure */
|
|
__u32 cpu; /* cpu */
|
|
@@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
|
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
|
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
|
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
|
|
+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
|
|
+#define TIF_NEED_RESCHED_LAZY 7
|
|
|
|
#define TIF_NOHZ 12 /* in adaptive nohz mode */
|
|
#define TIF_USING_IWMMXT 17
|
|
@@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
@@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|
* Change these and you break ASM code in entry-common.S
|
|
*/
|
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
|
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
|
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
+ _TIF_NEED_RESCHED_LAZY)
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_ARM_THREAD_INFO_H */
|
|
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
|
|
index 3968d6c22..b35d373fc 100644
|
|
--- a/arch/arm/kernel/asm-offsets.c
|
|
+++ b/arch/arm/kernel/asm-offsets.c
|
|
@@ -56,6 +56,7 @@ int main(void)
|
|
BLANK();
|
|
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
|
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
|
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
|
|
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
|
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
|
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
|
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
|
|
index 8de1e1da9..796c667c4 100644
|
|
--- a/arch/arm/kernel/entry-armv.S
|
|
+++ b/arch/arm/kernel/entry-armv.S
|
|
@@ -216,11 +216,18 @@ __irq_svc:
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
|
- ldr r0, [tsk, #TI_FLAGS] @ get flags
|
|
teq r8, #0 @ if preempt count != 0
|
|
+ bne 1f @ return from exeption
|
|
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
|
|
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
|
|
+ blne svc_preempt @ preempt!
|
|
+
|
|
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
|
+ teq r8, #0 @ if preempt lazy count != 0
|
|
movne r0, #0 @ force flags to 0
|
|
- tst r0, #_TIF_NEED_RESCHED
|
|
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
|
blne svc_preempt
|
|
+1:
|
|
#endif
|
|
|
|
svc_exit r5, irq = 1 @ return from exception
|
|
@@ -235,8 +242,14 @@ svc_preempt:
|
|
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
|
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
|
tst r0, #_TIF_NEED_RESCHED
|
|
+ bne 1b
|
|
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
|
reteq r8 @ go again
|
|
- b 1b
|
|
+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
|
+ teq r0, #0 @ if preempt lazy count != 0
|
|
+ beq 1b
|
|
+ ret r8 @ go again
|
|
+
|
|
#endif
|
|
|
|
__und_fault:
|
|
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
|
|
index e27fc2df5..bc326519f 100644
|
|
--- a/arch/arm/kernel/entry-common.S
|
|
+++ b/arch/arm/kernel/entry-common.S
|
|
@@ -56,7 +56,9 @@ __ret_fast_syscall:
|
|
cmp r2, #TASK_SIZE
|
|
blne addr_limit_check_failed
|
|
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
|
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
|
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
|
|
+ bne fast_work_pending
|
|
+ tst r1, #_TIF_SECCOMP
|
|
bne fast_work_pending
|
|
|
|
|
|
@@ -93,8 +95,11 @@ __ret_fast_syscall:
|
|
cmp r2, #TASK_SIZE
|
|
blne addr_limit_check_failed
|
|
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
|
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
|
|
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
|
|
+ bne do_slower_path
|
|
+ tst r1, #_TIF_SECCOMP
|
|
beq no_work_pending
|
|
+do_slower_path:
|
|
UNWIND(.fnend )
|
|
ENDPROC(ret_fast_syscall)
|
|
|
|
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
|
|
index dfe24883c..e1b59f33a 100644
|
|
--- a/arch/arm/kernel/signal.c
|
|
+++ b/arch/arm/kernel/signal.c
|
|
@@ -652,7 +652,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
|
*/
|
|
trace_hardirqs_off();
|
|
do {
|
|
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
|
|
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
|
|
+ _TIF_NEED_RESCHED_LAZY))) {
|
|
schedule();
|
|
} else {
|
|
if (unlikely(!user_mode(regs)))
|
|
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
|
|
index 903f23c30..fa493a86e 100644
|
|
--- a/arch/arm/mach-at91/Kconfig
|
|
+++ b/arch/arm/mach-at91/Kconfig
|
|
@@ -107,6 +107,31 @@ config SOC_AT91SAM9
|
|
AT91SAM9X35
|
|
AT91SAM9XE
|
|
|
|
+comment "Clocksource driver selection"
|
|
+
|
|
+config ATMEL_CLOCKSOURCE_PIT
|
|
+ bool "Periodic Interval Timer (PIT) support"
|
|
+ depends on SOC_AT91SAM9 || SOC_SAMA5
|
|
+ default SOC_AT91SAM9 || SOC_SAMA5
|
|
+ select ATMEL_PIT
|
|
+ help
|
|
+ Select this to get a clocksource based on the Atmel Periodic Interval
|
|
+ Timer. It has a relatively low resolution and the TC Block clocksource
|
|
+ should be preferred.
|
|
+
|
|
+config ATMEL_CLOCKSOURCE_TCB
|
|
+ bool "Timer Counter Blocks (TCB) support"
|
|
+ depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST
|
|
+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
|
|
+ depends on !ATMEL_TCLIB
|
|
+ select ATMEL_ARM_TCB_CLKSRC
|
|
+ help
|
|
+ Select this to get a high precision clocksource based on a
|
|
+ TC block with a 5+ MHz base clock rate.
|
|
+ On platforms with 16-bit counters, two timer channels are combined
|
|
+ to make a single 32-bit timer.
|
|
+ It can also be used as a clock event device supporting oneshot mode.
|
|
+
|
|
config HAVE_AT91_UTMI
|
|
bool
|
|
|
|
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
|
|
index 6a1e68237..17dca0ff3 100644
|
|
--- a/arch/arm/mach-exynos/platsmp.c
|
|
+++ b/arch/arm/mach-exynos/platsmp.c
|
|
@@ -239,7 +239,7 @@ static void write_pen_release(int val)
|
|
sync_cache_w(&pen_release);
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
static void exynos_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -252,8 +252,8 @@ static void exynos_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
|
|
@@ -317,7 +317,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -344,7 +344,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
|
if (timeout == 0) {
|
|
printk(KERN_ERR "cpu1 power enable failed");
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
return -ETIMEDOUT;
|
|
}
|
|
}
|
|
@@ -390,7 +390,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
fail:
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? ret : 0;
|
|
}
|
|
diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
|
|
index f66815c3d..00524abd9 100644
|
|
--- a/arch/arm/mach-hisi/platmcpm.c
|
|
+++ b/arch/arm/mach-hisi/platmcpm.c
|
|
@@ -61,7 +61,7 @@
|
|
|
|
static void __iomem *sysctrl, *fabric;
|
|
static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
static u32 fabric_phys_addr;
|
|
/*
|
|
* [0]: bootwrapper physical address
|
|
@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
|
|
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
|
|
return -EINVAL;
|
|
|
|
- spin_lock_irq(&boot_lock);
|
|
+ raw_spin_lock_irq(&boot_lock);
|
|
|
|
if (hip04_cpu_table[cluster][cpu])
|
|
goto out;
|
|
@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
|
|
|
|
out:
|
|
hip04_cpu_table[cluster][cpu]++;
|
|
- spin_unlock_irq(&boot_lock);
|
|
+ raw_spin_unlock_irq(&boot_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
hip04_cpu_table[cluster][cpu]--;
|
|
if (hip04_cpu_table[cluster][cpu] == 1) {
|
|
/* A power_up request went ahead of us. */
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
return;
|
|
} else if (hip04_cpu_table[cluster][cpu] > 1) {
|
|
pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
|
|
@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
|
|
}
|
|
|
|
last_man = hip04_cluster_is_down(cluster);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
if (last_man) {
|
|
/* Since it's Cortex A15, disable L2 prefetching. */
|
|
asm volatile(
|
|
@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
|
|
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
|
|
|
|
count = TIMEOUT_MSEC / POLL_MSEC;
|
|
- spin_lock_irq(&boot_lock);
|
|
+ raw_spin_lock_irq(&boot_lock);
|
|
for (tries = 0; tries < count; tries++) {
|
|
if (hip04_cpu_table[cluster][cpu])
|
|
goto err;
|
|
@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
|
|
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
|
|
if (data & CORE_WFI_STATUS(cpu))
|
|
break;
|
|
- spin_unlock_irq(&boot_lock);
|
|
+ raw_spin_unlock_irq(&boot_lock);
|
|
/* Wait for clean L2 when the whole cluster is down. */
|
|
msleep(POLL_MSEC);
|
|
- spin_lock_irq(&boot_lock);
|
|
+ raw_spin_lock_irq(&boot_lock);
|
|
}
|
|
if (tries >= count)
|
|
goto err;
|
|
@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
|
|
goto err;
|
|
if (hip04_cluster_is_down(cluster))
|
|
hip04_set_snoop_filter(cluster, 0);
|
|
- spin_unlock_irq(&boot_lock);
|
|
+ raw_spin_unlock_irq(&boot_lock);
|
|
return 1;
|
|
err:
|
|
- spin_unlock_irq(&boot_lock);
|
|
+ raw_spin_unlock_irq(&boot_lock);
|
|
return 0;
|
|
}
|
|
#endif
|
|
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
|
|
index 326e870d7..d9ac80aa1 100644
|
|
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
|
|
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
|
|
@@ -17,22 +17,22 @@
|
|
#include "hardware.h"
|
|
|
|
static int num_idle_cpus = 0;
|
|
-static DEFINE_SPINLOCK(cpuidle_lock);
|
|
+static DEFINE_RAW_SPINLOCK(cpuidle_lock);
|
|
|
|
static int imx6q_enter_wait(struct cpuidle_device *dev,
|
|
struct cpuidle_driver *drv, int index)
|
|
{
|
|
- spin_lock(&cpuidle_lock);
|
|
+ raw_spin_lock(&cpuidle_lock);
|
|
if (++num_idle_cpus == num_online_cpus())
|
|
imx6_set_lpm(WAIT_UNCLOCKED);
|
|
- spin_unlock(&cpuidle_lock);
|
|
+ raw_spin_unlock(&cpuidle_lock);
|
|
|
|
cpu_do_idle();
|
|
|
|
- spin_lock(&cpuidle_lock);
|
|
+ raw_spin_lock(&cpuidle_lock);
|
|
if (num_idle_cpus-- == num_online_cpus())
|
|
imx6_set_lpm(WAIT_CLOCKED);
|
|
- spin_unlock(&cpuidle_lock);
|
|
+ raw_spin_unlock(&cpuidle_lock);
|
|
|
|
return index;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
|
|
index 1c73694c8..ac4d2f030 100644
|
|
--- a/arch/arm/mach-omap2/omap-smp.c
|
|
+++ b/arch/arm/mach-omap2/omap-smp.c
|
|
@@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
|
|
.startup_addr = omap5_secondary_startup,
|
|
};
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void __iomem *omap4_get_scu_base(void)
|
|
{
|
|
@@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* Update the AuxCoreBoot0 with boot state for secondary core.
|
|
@@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
|
|
index 75ef5d4be..c17c86e5d 100644
|
|
--- a/arch/arm/mach-prima2/platsmp.c
|
|
+++ b/arch/arm/mach-prima2/platsmp.c
|
|
@@ -22,7 +22,7 @@
|
|
|
|
static void __iomem *clk_base;
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
static void sirfsoc_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static const struct of_device_id clk_ids[] = {
|
|
@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
/* make sure write buffer is drained */
|
|
mb();
|
|
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
|
|
index 5494c9e0c..e8ce157d3 100644
|
|
--- a/arch/arm/mach-qcom/platsmp.c
|
|
+++ b/arch/arm/mach-qcom/platsmp.c
|
|
@@ -46,7 +46,7 @@
|
|
|
|
extern void secondary_startup_arm(void);
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void qcom_cpu_die(unsigned int cpu)
|
|
@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static int scss_release_secondary(unsigned int cpu)
|
|
@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
|
|
* set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* Send the secondary CPU a soft interrupt, thereby causing
|
|
@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
|
|
index 39038a038..6da5c9387 100644
|
|
--- a/arch/arm/mach-spear/platsmp.c
|
|
+++ b/arch/arm/mach-spear/platsmp.c
|
|
@@ -32,7 +32,7 @@ static void write_pen_release(int val)
|
|
sync_cache_w(&pen_release);
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
|
|
|
|
@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
|
|
index 231f19e17..a3419b700 100644
|
|
--- a/arch/arm/mach-sti/platsmp.c
|
|
+++ b/arch/arm/mach-sti/platsmp.c
|
|
@@ -35,7 +35,7 @@ static void write_pen_release(int val)
|
|
sync_cache_w(&pen_release);
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
static void sti_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
|
|
index f49b996ae..726f08332 100644
|
|
--- a/arch/arm/mm/fault.c
|
|
+++ b/arch/arm/mm/fault.c
|
|
@@ -436,6 +436,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|
if (addr < TASK_SIZE)
|
|
return do_page_fault(addr, fsr, regs);
|
|
|
|
+ if (interrupts_enabled(regs))
|
|
+ local_irq_enable();
|
|
+
|
|
if (user_mode(regs))
|
|
goto bad_area;
|
|
|
|
@@ -503,6 +506,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|
static int
|
|
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
{
|
|
+ if (interrupts_enabled(regs))
|
|
+ local_irq_enable();
|
|
+
|
|
do_bad_area(addr, fsr, regs);
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
|
|
index d02f8187b..542692dbd 100644
|
|
--- a/arch/arm/mm/highmem.c
|
|
+++ b/arch/arm/mm/highmem.c
|
|
@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
|
|
return *ptep;
|
|
}
|
|
|
|
+static unsigned int fixmap_idx(int type)
|
|
+{
|
|
+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
|
+}
|
|
+
|
|
void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
|
|
|
|
void *kmap_atomic(struct page *page)
|
|
{
|
|
+ pte_t pte = mk_pte(page, kmap_prot);
|
|
unsigned int idx;
|
|
unsigned long vaddr;
|
|
void *kmap;
|
|
int type;
|
|
|
|
- preempt_disable();
|
|
+ preempt_disable_nort();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
|
|
|
|
type = kmap_atomic_idx_push();
|
|
|
|
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
|
+ idx = fixmap_idx(type);
|
|
vaddr = __fix_to_virt(idx);
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
/*
|
|
@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
|
|
* in place, so the contained TLB flush ensures the TLB is updated
|
|
* with the new mapping.
|
|
*/
|
|
- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = pte;
|
|
+#endif
|
|
+ set_fixmap_pte(idx, pte);
|
|
|
|
return (void *)vaddr;
|
|
}
|
|
@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
if (kvaddr >= (void *)FIXADDR_START) {
|
|
type = kmap_atomic_idx();
|
|
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
|
+ idx = fixmap_idx(type);
|
|
|
|
if (cache_is_vivt())
|
|
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = __pte(0);
|
|
+#endif
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
BUG_ON(vaddr != __fix_to_virt(idx));
|
|
- set_fixmap_pte(idx, __pte(0));
|
|
#else
|
|
(void) idx; /* to kill a warning */
|
|
#endif
|
|
+ set_fixmap_pte(idx, __pte(0));
|
|
kmap_atomic_idx_pop();
|
|
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
|
|
/* this address was obtained through kmap_high_get() */
|
|
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
|
|
}
|
|
pagefault_enable();
|
|
- preempt_enable();
|
|
+ preempt_enable_nort();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
+ pte_t pte = pfn_pte(pfn, kmap_prot);
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
- preempt_disable();
|
|
+ preempt_disable_nort();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
|
|
type = kmap_atomic_idx_push();
|
|
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
|
+ idx = fixmap_idx(type);
|
|
vaddr = __fix_to_virt(idx);
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
|
|
#endif
|
|
- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = pte;
|
|
+#endif
|
|
+ set_fixmap_pte(idx, pte);
|
|
|
|
return (void *)vaddr;
|
|
}
|
|
+#if defined CONFIG_PREEMPT_RT_FULL
|
|
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Clear @prev's kmap_atomic mappings
|
|
+ */
|
|
+ for (i = 0; i < prev_p->kmap_idx; i++) {
|
|
+ int idx = fixmap_idx(i);
|
|
+
|
|
+ set_fixmap_pte(idx, __pte(0));
|
|
+ }
|
|
+ /*
|
|
+ * Restore @next_p's kmap_atomic mappings
|
|
+ */
|
|
+ for (i = 0; i < next_p->kmap_idx; i++) {
|
|
+ int idx = fixmap_idx(i);
|
|
+
|
|
+ if (!pte_none(next_p->kmap_pte[i]))
|
|
+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
|
|
index c23665101..6b60f582b 100644
|
|
--- a/arch/arm/plat-versatile/platsmp.c
|
|
+++ b/arch/arm/plat-versatile/platsmp.c
|
|
@@ -32,7 +32,7 @@ static void write_pen_release(int val)
|
|
sync_cache_w(&pen_release);
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void versatile_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* This is really belt and braces; we hold unintended secondary
|
|
@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
|
index fa89f762c..70adc74a8 100644
|
|
--- a/arch/arm64/Kconfig
|
|
+++ b/arch/arm64/Kconfig
|
|
@@ -143,6 +143,7 @@ config ARM64
|
|
select HAVE_PERF_EVENTS
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
select HAVE_FUTEX_CMPXCHG if FUTEX
|
|
select HAVE_RCU_TABLE_FREE
|
|
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
|
|
index b4d263c1f..222b1e19e 100644
|
|
--- a/arch/arm64/configs/openeuler_defconfig
|
|
+++ b/arch/arm64/configs/openeuler_defconfig
|
|
@@ -73,8 +73,9 @@ CONFIG_NO_HZ_FULL=y
|
|
CONFIG_NO_HZ=y
|
|
CONFIG_HIGH_RES_TIMERS=y
|
|
# CONFIG_PREEMPT_NONE is not set
|
|
-CONFIG_PREEMPT_VOLUNTARY=y
|
|
+# CONFIG_PREEMPT_VOLUNTARY is not set
|
|
# CONFIG_PREEMPT is not set
|
|
+CONFIG_PREEMPT_RT_FULL=y
|
|
|
|
#
|
|
# CPU/Task time and stats accounting
|
|
@@ -991,7 +992,7 @@ CONFIG_FRAME_VECTOR=y
|
|
# CONFIG_PERCPU_STATS is not set
|
|
# CONFIG_GUP_BENCHMARK is not set
|
|
CONFIG_ARCH_HAS_PTE_SPECIAL=y
|
|
-CONFIG_PIN_MEMORY=y
|
|
+# CONFIG_PIN_MEMORY is not set
|
|
CONFIG_PID_RESERVE=y
|
|
CONFIG_NET=y
|
|
CONFIG_COMPAT_NETLINK_MESSAGES=y
|
|
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
|
|
index a5606823e..1c2da1535 100644
|
|
--- a/arch/arm64/crypto/Kconfig
|
|
+++ b/arch/arm64/crypto/Kconfig
|
|
@@ -19,43 +19,43 @@ config CRYPTO_SHA512_ARM64
|
|
|
|
config CRYPTO_SHA1_ARM64_CE
|
|
tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_SHA1
|
|
|
|
config CRYPTO_SHA2_ARM64_CE
|
|
tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_SHA256_ARM64
|
|
|
|
config CRYPTO_SHA512_ARM64_CE
|
|
tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_SHA512_ARM64
|
|
|
|
config CRYPTO_SHA3_ARM64
|
|
tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_SHA3
|
|
|
|
config CRYPTO_SM3_ARM64_CE
|
|
tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_SM3
|
|
|
|
config CRYPTO_SM4_ARM64_CE
|
|
tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_ALGAPI
|
|
select CRYPTO_SM4
|
|
|
|
config CRYPTO_GHASH_ARM64_CE
|
|
tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
select CRYPTO_GF128MUL
|
|
select CRYPTO_AES
|
|
@@ -63,7 +63,7 @@ config CRYPTO_GHASH_ARM64_CE
|
|
|
|
config CRYPTO_CRCT10DIF_ARM64_CE
|
|
tristate "CRCT10DIF digest algorithm using PMULL instructions"
|
|
- depends on KERNEL_MODE_NEON && CRC_T10DIF
|
|
+ depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE
|
|
select CRYPTO_HASH
|
|
|
|
config CRYPTO_AES_ARM64
|
|
@@ -72,13 +72,13 @@ config CRYPTO_AES_ARM64
|
|
|
|
config CRYPTO_AES_ARM64_CE
|
|
tristate "AES core cipher using ARMv8 Crypto Extensions"
|
|
- depends on ARM64 && KERNEL_MODE_NEON
|
|
+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_ALGAPI
|
|
select CRYPTO_AES_ARM64
|
|
|
|
config CRYPTO_AES_ARM64_CE_CCM
|
|
tristate "AES in CCM mode using ARMv8 Crypto Extensions"
|
|
- depends on ARM64 && KERNEL_MODE_NEON
|
|
+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_ALGAPI
|
|
select CRYPTO_AES_ARM64_CE
|
|
select CRYPTO_AES_ARM64
|
|
@@ -86,7 +86,7 @@ config CRYPTO_AES_ARM64_CE_CCM
|
|
|
|
config CRYPTO_AES_ARM64_CE_BLK
|
|
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_BLKCIPHER
|
|
select CRYPTO_AES_ARM64_CE
|
|
select CRYPTO_AES_ARM64
|
|
@@ -94,7 +94,7 @@ config CRYPTO_AES_ARM64_CE_BLK
|
|
|
|
config CRYPTO_AES_ARM64_NEON_BLK
|
|
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_BLKCIPHER
|
|
select CRYPTO_AES_ARM64
|
|
select CRYPTO_AES
|
|
@@ -102,13 +102,13 @@ config CRYPTO_AES_ARM64_NEON_BLK
|
|
|
|
config CRYPTO_CHACHA20_NEON
|
|
tristate "NEON accelerated ChaCha20 symmetric cipher"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_BLKCIPHER
|
|
select CRYPTO_CHACHA20
|
|
|
|
config CRYPTO_AES_ARM64_BS
|
|
tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
|
|
- depends on KERNEL_MODE_NEON
|
|
+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
|
|
select CRYPTO_BLKCIPHER
|
|
select CRYPTO_AES_ARM64_NEON_BLK
|
|
select CRYPTO_AES_ARM64
|
|
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
|
|
index c3e4273c1..61d3c4f93 100644
|
|
--- a/arch/arm64/include/asm/alternative.h
|
|
+++ b/arch/arm64/include/asm/alternative.h
|
|
@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length);
|
|
static inline void apply_alternatives_module(void *start, size_t length) { }
|
|
#endif
|
|
|
|
+#ifdef CONFIG_KVM_ARM_HOST
|
|
+void kvm_compute_layout(void);
|
|
+#else
|
|
+static inline void kvm_compute_layout(void) { }
|
|
+#endif
|
|
+
|
|
#define ALTINSTR_ENTRY(feature) \
|
|
" .word 661b - .\n" /* label */ \
|
|
" .word 663f - .\n" /* new instruction */ \
|
|
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
|
|
index a157ff465..f952fdda8 100644
|
|
--- a/arch/arm64/include/asm/spinlock_types.h
|
|
+++ b/arch/arm64/include/asm/spinlock_types.h
|
|
@@ -16,10 +16,6 @@
|
|
#ifndef __ASM_SPINLOCK_TYPES_H
|
|
#define __ASM_SPINLOCK_TYPES_H
|
|
|
|
-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
#include <asm-generic/qspinlock_types.h>
|
|
#include <asm-generic/qrwlock_types.h>
|
|
|
|
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
|
|
index 6bc5fe80f..c45726e21 100644
|
|
--- a/arch/arm64/include/asm/thread_info.h
|
|
+++ b/arch/arm64/include/asm/thread_info.h
|
|
@@ -43,6 +43,7 @@ struct thread_info {
|
|
u64 ttbr0; /* saved TTBR0_EL1 */
|
|
#endif
|
|
int preempt_count; /* 0 => preemptable, <0 => bug */
|
|
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
|
};
|
|
|
|
#define thread_saved_pc(tsk) \
|
|
@@ -78,11 +79,12 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
|
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
|
|
#define TIF_SEA_NOTIFY 6 /* notify to do an error recovery */
|
|
-#define TIF_NOHZ 7
|
|
-#define TIF_SYSCALL_TRACE 8
|
|
-#define TIF_SYSCALL_AUDIT 9
|
|
-#define TIF_SYSCALL_TRACEPOINT 10
|
|
-#define TIF_SECCOMP 11
|
|
+#define TIF_NEED_RESCHED_LAZY 7
|
|
+#define TIF_NOHZ 8
|
|
+#define TIF_SYSCALL_TRACE 9
|
|
+#define TIF_SYSCALL_AUDIT 10
|
|
+#define TIF_SYSCALL_TRACEPOINT 11
|
|
+#define TIF_SECCOMP 12
|
|
#define TIF_POLLING_NRFLAG 16
|
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
|
#define TIF_FREEZE 19
|
|
@@ -114,6 +116,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|
#define _TIF_SVE (1 << TIF_SVE)
|
|
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
|
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
|
|
|
#ifdef CONFIG_UCE_KERNEL_RECOVERY
|
|
#define _TIF_UCE_KERNEL_RECOVERY (1 << TIF_UCE_KERNEL_RECOVERY)
|
|
@@ -121,8 +124,10 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|
|
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
|
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
|
- _TIF_UPROBE | _TIF_FSCHECK | _TIF_SEA_NOTIFY)
|
|
+ _TIF_UPROBE | _TIF_FSCHECK | _TIF_SEA_NOTIFY | \
|
|
+ _TIF_NEED_RESCHED_LAZY)
|
|
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
|
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
|
_TIF_NOHZ)
|
|
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
|
|
index ce5a26080..ddc263dee 100644
|
|
--- a/arch/arm64/kernel/alternative.c
|
|
+++ b/arch/arm64/kernel/alternative.c
|
|
@@ -242,6 +242,7 @@ static int __apply_alternatives_multi_stop(void *unused)
|
|
void __init apply_alternatives_all(void)
|
|
{
|
|
/* better not try code patching on a live SMP system */
|
|
+ kvm_compute_layout();
|
|
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
|
|
}
|
|
|
|
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
|
|
index 6e1847fb4..975b0f41e 100644
|
|
--- a/arch/arm64/kernel/asm-offsets.c
|
|
+++ b/arch/arm64/kernel/asm-offsets.c
|
|
@@ -41,6 +41,7 @@ int main(void)
|
|
BLANK();
|
|
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
|
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
|
+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
|
|
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
|
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
|
|
index 7c231eb21..409c913aa 100644
|
|
--- a/arch/arm64/kernel/entry.S
|
|
+++ b/arch/arm64/kernel/entry.S
|
|
@@ -701,11 +701,16 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
|
mrs x0, daif
|
|
orr w24, w24, w0
|
|
alternative_else_nop_endif
|
|
- cbnz w24, 1f // preempt count != 0 || NMI return path
|
|
+ cbnz w24, 2f // preempt count != 0 || NMI return path
|
|
ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
|
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
|
- bl el1_preempt
|
|
+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
|
+
|
|
+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
|
|
+ cbnz w24, 2f // preempt lazy count != 0
|
|
+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
|
|
1:
|
|
+ bl el1_preempt
|
|
+2:
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
|
@@ -738,6 +743,7 @@ el1_preempt:
|
|
1: bl preempt_schedule_irq // irq en/disable is done inside
|
|
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
|
|
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
|
|
+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
|
|
ret x24
|
|
#endif
|
|
|
|
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
|
|
index bb048144c..1634b1795 100644
|
|
--- a/arch/arm64/kernel/fpsimd.c
|
|
+++ b/arch/arm64/kernel/fpsimd.c
|
|
@@ -160,6 +160,16 @@ static void sve_free(struct task_struct *task)
|
|
__sve_free(task);
|
|
}
|
|
|
|
+static void *sve_free_atomic(struct task_struct *task)
|
|
+{
|
|
+ void *sve_state = task->thread.sve_state;
|
|
+
|
|
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
|
|
+
|
|
+ task->thread.sve_state = NULL;
|
|
+ return sve_state;
|
|
+}
|
|
+
|
|
/*
|
|
* TIF_SVE controls whether a task can use SVE without trapping while
|
|
* in userspace, and also the way a task's FPSIMD/SVE state is stored
|
|
@@ -573,6 +583,7 @@ int sve_set_vector_length(struct task_struct *task,
|
|
* non-SVE thread.
|
|
*/
|
|
if (task == current) {
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
fpsimd_save();
|
|
@@ -583,8 +594,10 @@ int sve_set_vector_length(struct task_struct *task,
|
|
if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
|
|
sve_to_fpsimd(task);
|
|
|
|
- if (task == current)
|
|
+ if (task == current) {
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
+ }
|
|
|
|
/*
|
|
* Force reallocation of task SVE state to the correct size
|
|
@@ -839,6 +852,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
|
|
|
sve_alloc(current);
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
fpsimd_save();
|
|
@@ -852,6 +866,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
|
WARN_ON(1); /* SVE access shouldn't have trapped */
|
|
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/*
|
|
@@ -918,10 +933,12 @@ void fpsimd_thread_switch(struct task_struct *next)
|
|
void fpsimd_flush_thread(void)
|
|
{
|
|
int vl, supported_vl;
|
|
+ void *mem = NULL;
|
|
|
|
if (!system_supports_fpsimd())
|
|
return;
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
memset(¤t->thread.uw.fpsimd_state, 0,
|
|
@@ -930,7 +947,7 @@ void fpsimd_flush_thread(void)
|
|
|
|
if (system_supports_sve()) {
|
|
clear_thread_flag(TIF_SVE);
|
|
- sve_free(current);
|
|
+ mem = sve_free_atomic(current);
|
|
|
|
/*
|
|
* Reset the task vector length as required.
|
|
@@ -966,6 +983,8 @@ void fpsimd_flush_thread(void)
|
|
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
|
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
+ kfree(mem);
|
|
}
|
|
|
|
/*
|
|
@@ -977,9 +996,11 @@ void fpsimd_preserve_current_state(void)
|
|
if (!system_supports_fpsimd())
|
|
return;
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
fpsimd_save();
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/*
|
|
@@ -1050,6 +1071,7 @@ void fpsimd_restore_current_state(void)
|
|
return;
|
|
}
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
|
@@ -1058,6 +1080,7 @@ void fpsimd_restore_current_state(void)
|
|
}
|
|
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/*
|
|
@@ -1070,6 +1093,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
|
|
if (WARN_ON(!system_supports_fpsimd()))
|
|
return;
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
current->thread.uw.fpsimd_state = *state;
|
|
@@ -1082,6 +1106,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
|
|
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
|
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/*
|
|
@@ -1128,6 +1153,7 @@ void kernel_neon_begin(void)
|
|
|
|
BUG_ON(!may_use_simd());
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
|
|
__this_cpu_write(kernel_neon_busy, true);
|
|
@@ -1141,6 +1167,7 @@ void kernel_neon_begin(void)
|
|
preempt_disable();
|
|
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(kernel_neon_begin);
|
|
|
|
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
|
|
index 75009eba3..fd9c01b43 100644
|
|
--- a/arch/arm64/kernel/signal.c
|
|
+++ b/arch/arm64/kernel/signal.c
|
|
@@ -709,7 +709,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|
/* Check valid user FS if needed */
|
|
addr_limit_user_check();
|
|
|
|
- if (thread_flags & _TIF_NEED_RESCHED) {
|
|
+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
|
|
/* Unmask Debug and SError for the next task */
|
|
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
|
|
|
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
|
|
index c712a7376..792da0e12 100644
|
|
--- a/arch/arm64/kvm/va_layout.c
|
|
+++ b/arch/arm64/kvm/va_layout.c
|
|
@@ -33,7 +33,7 @@ static u8 tag_lsb;
|
|
static u64 tag_val;
|
|
static u64 va_mask;
|
|
|
|
-static void compute_layout(void)
|
|
+__init void kvm_compute_layout(void)
|
|
{
|
|
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
|
u64 hyp_va_msb;
|
|
@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
|
|
|
BUG_ON(nr_inst != 5);
|
|
|
|
- if (!has_vhe() && !va_mask)
|
|
- compute_layout();
|
|
|
|
for (i = 0; i < nr_inst; i++) {
|
|
u32 rd, rn, insn, oinsn;
|
|
@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
|
return;
|
|
}
|
|
|
|
- if (!va_mask)
|
|
- compute_layout();
|
|
-
|
|
/*
|
|
* Compute HYP VA by using the same computation as kern_hyp_va()
|
|
*/
|
|
diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h
|
|
index 7a906b521..d8f596fec 100644
|
|
--- a/arch/hexagon/include/asm/spinlock_types.h
|
|
+++ b/arch/hexagon/include/asm/spinlock_types.h
|
|
@@ -21,10 +21,6 @@
|
|
#ifndef _ASM_SPINLOCK_TYPES_H
|
|
#define _ASM_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int lock;
|
|
} arch_spinlock_t;
|
|
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
|
|
index 6e345fefc..681408d68 100644
|
|
--- a/arch/ia64/include/asm/spinlock_types.h
|
|
+++ b/arch/ia64/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef _ASM_IA64_SPINLOCK_TYPES_H
|
|
#define _ASM_IA64_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int lock;
|
|
} arch_spinlock_t;
|
|
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
|
|
index 6115464d5..f09e34c84 100644
|
|
--- a/arch/ia64/kernel/mca.c
|
|
+++ b/arch/ia64/kernel/mca.c
|
|
@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
|
|
ti->cpu = cpu;
|
|
p->stack = ti;
|
|
p->state = TASK_UNINTERRUPTIBLE;
|
|
- cpumask_set_cpu(cpu, &p->cpus_allowed);
|
|
+ cpumask_set_cpu(cpu, &p->cpus_mask);
|
|
INIT_LIST_HEAD(&p->tasks);
|
|
p->parent = p->real_parent = p->group_leader = p;
|
|
INIT_LIST_HEAD(&p->children);
|
|
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
|
|
index d2fefde97..09782e575 100644
|
|
--- a/arch/mips/Kconfig
|
|
+++ b/arch/mips/Kconfig
|
|
@@ -2519,7 +2519,7 @@ config MIPS_CRC_SUPPORT
|
|
#
|
|
config HIGHMEM
|
|
bool "High Memory Support"
|
|
- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
|
|
+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
|
|
|
|
config CPU_SUPPORTS_HIGHMEM
|
|
bool
|
|
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
|
|
index e610473d6..1428b4feb 100644
|
|
--- a/arch/mips/include/asm/switch_to.h
|
|
+++ b/arch/mips/include/asm/switch_to.h
|
|
@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
|
|
* inline to try to keep the overhead down. If we have been forced to run on
|
|
* a "CPU" with an FPU because of a previous high level of FP computation,
|
|
* but did not actually use the FPU during the most recent time-slice (CU1
|
|
- * isn't set), we undo the restriction on cpus_allowed.
|
|
+ * isn't set), we undo the restriction on cpus_mask.
|
|
*
|
|
* We're not calling set_cpus_allowed() here, because we have no need to
|
|
* force prompt migration - we're already switching the current CPU to a
|
|
@@ -57,7 +57,7 @@ do { \
|
|
test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
|
|
(!(KSTK_STATUS(prev) & ST0_CU1))) { \
|
|
clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
|
|
- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
|
|
+ prev->cpus_mask = prev->thread.user_cpus_allowed; \
|
|
} \
|
|
next->thread.emulated_fp = 0; \
|
|
} while(0)
|
|
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
|
|
index a7c0f97e4..1a08428ee 100644
|
|
--- a/arch/mips/kernel/mips-mt-fpaff.c
|
|
+++ b/arch/mips/kernel/mips-mt-fpaff.c
|
|
@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
|
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
|
|
cpumask_and(&mask, &allowed, cpu_active_mask);
|
|
|
|
out_unlock:
|
|
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
|
|
index 9dab0ed1b..3623cf32f 100644
|
|
--- a/arch/mips/kernel/traps.c
|
|
+++ b/arch/mips/kernel/traps.c
|
|
@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void)
|
|
* restricted the allowed set to exclude any CPUs with FPUs,
|
|
* we'll skip the procedure.
|
|
*/
|
|
- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
|
|
+ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
|
|
cpumask_t tmask;
|
|
|
|
current->thread.user_cpus_allowed
|
|
- = current->cpus_allowed;
|
|
- cpumask_and(&tmask, ¤t->cpus_allowed,
|
|
+ = current->cpus_mask;
|
|
+ cpumask_and(&tmask, ¤t->cpus_mask,
|
|
&mt_fpu_cpumask);
|
|
set_cpus_allowed_ptr(current, &tmask);
|
|
set_thread_flag(TIF_FPUBOUND);
|
|
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
|
|
index 5329ef16d..cda7db582 100644
|
|
--- a/arch/powerpc/Kconfig
|
|
+++ b/arch/powerpc/Kconfig
|
|
@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT
|
|
|
|
config RWSEM_GENERIC_SPINLOCK
|
|
bool
|
|
+ default y if PREEMPT_RT_FULL
|
|
|
|
config RWSEM_XCHGADD_ALGORITHM
|
|
bool
|
|
- default y
|
|
+ default y if !PREEMPT_RT_FULL
|
|
|
|
config GENERIC_LOCKBREAK
|
|
bool
|
|
@@ -216,6 +217,7 @@ config PPC
|
|
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_RCU_TABLE_FREE if SMP
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
|
|
@@ -398,7 +400,7 @@ menu "Kernel options"
|
|
|
|
config HIGHMEM
|
|
bool "High memory support"
|
|
- depends on PPC32
|
|
+ depends on PPC32 && !PREEMPT_RT_FULL
|
|
|
|
source kernel/Kconfig.hz
|
|
|
|
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
|
|
index 87adaf13b..7305cb6a5 100644
|
|
--- a/arch/powerpc/include/asm/spinlock_types.h
|
|
+++ b/arch/powerpc/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
|
|
#define _ASM_POWERPC_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int slock;
|
|
} arch_spinlock_t;
|
|
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
|
|
index 3c0002044..64c3d1a72 100644
|
|
--- a/arch/powerpc/include/asm/thread_info.h
|
|
+++ b/arch/powerpc/include/asm/thread_info.h
|
|
@@ -37,6 +37,8 @@ struct thread_info {
|
|
int cpu; /* cpu we're on */
|
|
int preempt_count; /* 0 => preemptable,
|
|
<0 => BUG */
|
|
+ int preempt_lazy_count; /* 0 => preemptable,
|
|
+ <0 => BUG */
|
|
unsigned long local_flags; /* private flags for thread */
|
|
#ifdef CONFIG_LIVEPATCH
|
|
unsigned long *livepatch_sp;
|
|
@@ -81,18 +83,18 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
|
|
#define TIF_SIGPENDING 1 /* signal pending */
|
|
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
|
#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */
|
|
-#define TIF_32BIT 4 /* 32 bit binary */
|
|
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
|
|
#define TIF_PATCH_PENDING 6 /* pending live patching update */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
|
#define TIF_NOHZ 9 /* in adaptive nohz mode */
|
|
#define TIF_SECCOMP 10 /* secure computing */
|
|
-#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
|
-#define TIF_NOERROR 12 /* Force successful syscall return */
|
|
+
|
|
+#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */
|
|
+#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
|
|
+
|
|
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
|
#define TIF_UPROBE 14 /* breakpointed or single-stepping */
|
|
-#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
|
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
|
|
for stack store? */
|
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
|
@@ -100,6 +102,10 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
|
|
#define TIF_ELF2ABI 18 /* function descriptors must die! */
|
|
#endif
|
|
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
|
+#define TIF_32BIT 20 /* 32 bit binary */
|
|
+#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */
|
|
+#define TIF_NOERROR 22 /* Force successful syscall return */
|
|
+
|
|
|
|
/* as above, but as bit values */
|
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
|
@@ -119,6 +125,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
|
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
|
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
|
|
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
|
|
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
|
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
|
|
@@ -127,8 +134,9 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
|
|
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
|
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
|
- _TIF_FSCHECK)
|
|
+ _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
|
|
/* Bits in local_flags */
|
|
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
|
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
|
|
index 50400f213..1bb82c5dc 100644
|
|
--- a/arch/powerpc/kernel/asm-offsets.c
|
|
+++ b/arch/powerpc/kernel/asm-offsets.c
|
|
@@ -156,6 +156,7 @@ int main(void)
|
|
OFFSET(TI_FLAGS, thread_info, flags);
|
|
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
|
|
OFFSET(TI_PREEMPT, thread_info, preempt_count);
|
|
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
|
|
OFFSET(TI_TASK, thread_info, task);
|
|
OFFSET(TI_CPU, thread_info, cpu);
|
|
|
|
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
|
|
index 26b3f853c..44bcf1585 100644
|
|
--- a/arch/powerpc/kernel/entry_32.S
|
|
+++ b/arch/powerpc/kernel/entry_32.S
|
|
@@ -393,7 +393,9 @@ ret_from_syscall:
|
|
MTMSRD(r10)
|
|
lwz r9,TI_FLAGS(r12)
|
|
li r8,-MAX_ERRNO
|
|
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
|
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
|
+ ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
|
+ and. r0,r9,r0
|
|
bne- syscall_exit_work
|
|
cmplw 0,r3,r8
|
|
blt+ syscall_exit_cont
|
|
@@ -511,13 +513,13 @@ syscall_dotrace:
|
|
b syscall_dotrace_cont
|
|
|
|
syscall_exit_work:
|
|
- andi. r0,r9,_TIF_RESTOREALL
|
|
+ andis. r0,r9,_TIF_RESTOREALL@h
|
|
beq+ 0f
|
|
REST_NVGPRS(r1)
|
|
b 2f
|
|
0: cmplw 0,r3,r8
|
|
blt+ 1f
|
|
- andi. r0,r9,_TIF_NOERROR
|
|
+ andis. r0,r9,_TIF_NOERROR@h
|
|
bne- 1f
|
|
lwz r11,_CCR(r1) /* Load CR */
|
|
neg r3,r3
|
|
@@ -526,12 +528,12 @@ syscall_exit_work:
|
|
|
|
1: stw r6,RESULT(r1) /* Save result */
|
|
stw r3,GPR3(r1) /* Update return value */
|
|
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
|
+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
|
beq 4f
|
|
|
|
/* Clear per-syscall TIF flags if any are set. */
|
|
|
|
- li r11,_TIF_PERSYSCALL_MASK
|
|
+ lis r11,_TIF_PERSYSCALL_MASK@h
|
|
addi r12,r12,TI_FLAGS
|
|
3: lwarx r8,0,r12
|
|
andc r8,r8,r11
|
|
@@ -888,7 +890,14 @@ resume_kernel:
|
|
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
|
bne restore
|
|
andi. r8,r8,_TIF_NEED_RESCHED
|
|
+ bne+ 1f
|
|
+ lwz r0,TI_PREEMPT_LAZY(r9)
|
|
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
|
+ bne restore
|
|
+ lwz r0,TI_FLAGS(r9)
|
|
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
|
|
beq+ restore
|
|
+1:
|
|
lwz r3,_MSR(r1)
|
|
andi. r0,r3,MSR_EE /* interrupts off? */
|
|
beq restore /* don't schedule if so */
|
|
@@ -899,11 +908,11 @@ resume_kernel:
|
|
*/
|
|
bl trace_hardirqs_off
|
|
#endif
|
|
-1: bl preempt_schedule_irq
|
|
+2: bl preempt_schedule_irq
|
|
CURRENT_THREAD_INFO(r9, r1)
|
|
lwz r3,TI_FLAGS(r9)
|
|
- andi. r0,r3,_TIF_NEED_RESCHED
|
|
- bne- 1b
|
|
+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
|
|
+ bne- 2b
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
/* And now, to properly rebalance the above, we tell lockdep they
|
|
* are being turned back on, which will happen when we return
|
|
@@ -1232,7 +1241,7 @@ global_dbcr0:
|
|
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
|
|
|
do_work: /* r10 contains MSR_KERNEL here */
|
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
|
beq do_user_signal
|
|
|
|
do_resched: /* r10 contains MSR_KERNEL here */
|
|
@@ -1253,7 +1262,7 @@ recheck:
|
|
MTMSRD(r10) /* disable interrupts */
|
|
CURRENT_THREAD_INFO(r9, r1)
|
|
lwz r9,TI_FLAGS(r9)
|
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
|
bne- do_resched
|
|
andi. r0,r9,_TIF_USER_WORK_MASK
|
|
beq restore_user
|
|
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
|
|
index 58b50967b..229cdb04e 100644
|
|
--- a/arch/powerpc/kernel/entry_64.S
|
|
+++ b/arch/powerpc/kernel/entry_64.S
|
|
@@ -176,7 +176,7 @@ system_call: /* label this so stack traces look sane */
|
|
* based on caller's run-mode / personality.
|
|
*/
|
|
ld r11,SYS_CALL_TABLE@toc(2)
|
|
- andi. r10,r10,_TIF_32BIT
|
|
+ andis. r10,r10,_TIF_32BIT@h
|
|
beq 15f
|
|
addi r11,r11,8 /* use 32-bit syscall entries */
|
|
clrldi r3,r3,32
|
|
@@ -250,7 +250,9 @@ system_call_exit:
|
|
|
|
ld r9,TI_FLAGS(r12)
|
|
li r11,-MAX_ERRNO
|
|
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
|
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
|
+ ori r0,r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
|
+ and. r0,r9,r0
|
|
bne- .Lsyscall_exit_work
|
|
|
|
andi. r0,r8,MSR_FP
|
|
@@ -363,25 +365,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
|
|
If TIF_NOERROR is set, just save r3 as it is. */
|
|
|
|
- andi. r0,r9,_TIF_RESTOREALL
|
|
+ andis. r0,r9,_TIF_RESTOREALL@h
|
|
beq+ 0f
|
|
REST_NVGPRS(r1)
|
|
b 2f
|
|
0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
|
|
blt+ 1f
|
|
- andi. r0,r9,_TIF_NOERROR
|
|
+ andis. r0,r9,_TIF_NOERROR@h
|
|
bne- 1f
|
|
ld r5,_CCR(r1)
|
|
neg r3,r3
|
|
oris r5,r5,0x1000 /* Set SO bit in CR */
|
|
std r5,_CCR(r1)
|
|
1: std r3,GPR3(r1)
|
|
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
|
+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
|
beq 4f
|
|
|
|
/* Clear per-syscall TIF flags if any are set. */
|
|
|
|
- li r11,_TIF_PERSYSCALL_MASK
|
|
+ lis r11,(_TIF_PERSYSCALL_MASK)@h
|
|
addi r12,r12,TI_FLAGS
|
|
3: ldarx r10,0,r12
|
|
andc r10,r10,r11
|
|
@@ -774,7 +776,7 @@ _GLOBAL(ret_from_except_lite)
|
|
bl restore_math
|
|
b restore
|
|
#endif
|
|
-1: andi. r0,r4,_TIF_NEED_RESCHED
|
|
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
|
beq 2f
|
|
bl restore_interrupts
|
|
SCHEDULE_USER
|
|
@@ -836,10 +838,18 @@ resume_kernel:
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
/* Check if we need to preempt */
|
|
+ lwz r8,TI_PREEMPT(r9)
|
|
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
|
|
+ bne restore
|
|
andi. r0,r4,_TIF_NEED_RESCHED
|
|
+ bne+ check_count
|
|
+
|
|
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
|
|
beq+ restore
|
|
+ lwz r8,TI_PREEMPT_LAZY(r9)
|
|
+
|
|
/* Check that preempt_count() == 0 and interrupts are enabled */
|
|
- lwz r8,TI_PREEMPT(r9)
|
|
+check_count:
|
|
cmpwi cr0,r8,0
|
|
bne restore
|
|
ld r0,SOFTE(r1)
|
|
@@ -856,7 +866,7 @@ resume_kernel:
|
|
/* Re-test flags and eventually loop */
|
|
CURRENT_THREAD_INFO(r9, r1)
|
|
ld r4,TI_FLAGS(r9)
|
|
- andi. r0,r4,_TIF_NEED_RESCHED
|
|
+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
|
bne 1b
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
|
|
index d37704ebc..b45a9849b 100644
|
|
--- a/arch/powerpc/kernel/irq.c
|
|
+++ b/arch/powerpc/kernel/irq.c
|
|
@@ -766,6 +766,7 @@ void irq_ctx_init(void)
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void do_softirq_own_stack(void)
|
|
{
|
|
struct thread_info *curtp, *irqtp;
|
|
@@ -783,6 +784,7 @@ void do_softirq_own_stack(void)
|
|
if (irqtp->flags)
|
|
set_bits(irqtp->flags, &curtp->flags);
|
|
}
|
|
+#endif
|
|
|
|
irq_hw_number_t virq_to_hw(unsigned int virq)
|
|
{
|
|
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
|
|
index 695b24a2d..032ada21b 100644
|
|
--- a/arch/powerpc/kernel/misc_32.S
|
|
+++ b/arch/powerpc/kernel/misc_32.S
|
|
@@ -42,6 +42,7 @@
|
|
* We store the saved ksp_limit in the unused part
|
|
* of the STACK_FRAME_OVERHEAD
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
_GLOBAL(call_do_softirq)
|
|
mflr r0
|
|
stw r0,4(r1)
|
|
@@ -58,6 +59,7 @@ _GLOBAL(call_do_softirq)
|
|
stw r10,THREAD+KSP_LIMIT(r2)
|
|
mtlr r0
|
|
blr
|
|
+#endif
|
|
|
|
/*
|
|
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
|
|
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
|
|
index facc02964..8b1774186 100644
|
|
--- a/arch/powerpc/kernel/misc_64.S
|
|
+++ b/arch/powerpc/kernel/misc_64.S
|
|
@@ -32,6 +32,7 @@
|
|
|
|
.text
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
_GLOBAL(call_do_softirq)
|
|
mflr r0
|
|
std r0,16(r1)
|
|
@@ -42,6 +43,7 @@ _GLOBAL(call_do_softirq)
|
|
ld r0,16(r1)
|
|
mtlr r0
|
|
blr
|
|
+#endif
|
|
|
|
_GLOBAL(call_do_irq)
|
|
mflr r0
|
|
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
|
|
index 68a0e9d5b..6f4d5d761 100644
|
|
--- a/arch/powerpc/kvm/Kconfig
|
|
+++ b/arch/powerpc/kvm/Kconfig
|
|
@@ -178,6 +178,7 @@ config KVM_E500MC
|
|
config KVM_MPIC
|
|
bool "KVM in-kernel MPIC emulation"
|
|
depends on KVM && E500
|
|
+ depends on !PREEMPT_RT_FULL
|
|
select HAVE_KVM_IRQCHIP
|
|
select HAVE_KVM_IRQFD
|
|
select HAVE_KVM_IRQ_ROUTING
|
|
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
|
|
index c9ef3c532..cb10249b1 100644
|
|
--- a/arch/powerpc/platforms/cell/spufs/sched.c
|
|
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
|
|
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
|
|
* runqueue. The context will be rescheduled on the proper node
|
|
* if it is timesliced or preempted.
|
|
*/
|
|
- cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed);
|
|
+ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
|
|
|
|
/* Save the current cpu id for spu interrupt routing. */
|
|
ctx->last_ran = raw_smp_processor_id();
|
|
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
|
|
index e7075aaff..1580464a9 100644
|
|
--- a/arch/powerpc/platforms/ps3/device-init.c
|
|
+++ b/arch/powerpc/platforms/ps3/device-init.c
|
|
@@ -752,8 +752,8 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
|
|
}
|
|
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
|
|
|
|
- res = wait_event_interruptible(dev->done.wait,
|
|
- dev->done.done || kthread_should_stop());
|
|
+ res = swait_event_interruptible_exclusive(dev->done.wait,
|
|
+ dev->done.done || kthread_should_stop());
|
|
if (kthread_should_stop())
|
|
res = -EINTR;
|
|
if (res) {
|
|
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
|
|
index 06f02960b..d80d919c7 100644
|
|
--- a/arch/powerpc/platforms/pseries/iommu.c
|
|
+++ b/arch/powerpc/platforms/pseries/iommu.c
|
|
@@ -38,6 +38,7 @@
|
|
#include <linux/of.h>
|
|
#include <linux/iommu.h>
|
|
#include <linux/rculist.h>
|
|
+#include <linux/locallock.h>
|
|
#include <asm/io.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/rtas.h>
|
|
@@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|
}
|
|
|
|
static DEFINE_PER_CPU(__be64 *, tce_page);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock);
|
|
|
|
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|
long npages, unsigned long uaddr,
|
|
@@ -232,7 +234,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|
direction, attrs);
|
|
}
|
|
|
|
- local_irq_save(flags); /* to protect tcep and the page behind it */
|
|
+ /* to protect tcep and the page behind it */
|
|
+ local_lock_irqsave(tcp_page_lock, flags);
|
|
|
|
tcep = __this_cpu_read(tce_page);
|
|
|
|
@@ -243,7 +246,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
|
/* If allocation fails, fall back to the loop implementation */
|
|
if (!tcep) {
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(tcp_page_lock, flags);
|
|
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
|
direction, attrs);
|
|
}
|
|
@@ -277,7 +280,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|
tcenum += limit;
|
|
} while (npages > 0 && !rc);
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(tcp_page_lock, flags);
|
|
|
|
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
|
ret = (int)rc;
|
|
@@ -435,13 +438,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
|
u64 rc = 0;
|
|
long l, limit;
|
|
|
|
- local_irq_disable(); /* to protect tcep and the page behind it */
|
|
+ /* to protect tcep and the page behind it */
|
|
+ local_lock_irq(tcp_page_lock);
|
|
tcep = __this_cpu_read(tce_page);
|
|
|
|
if (!tcep) {
|
|
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
|
if (!tcep) {
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(tcp_page_lock);
|
|
return -ENOMEM;
|
|
}
|
|
__this_cpu_write(tce_page, tcep);
|
|
@@ -487,7 +491,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
|
|
|
/* error cleanup: caller will clear whole range */
|
|
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(tcp_page_lock);
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
|
|
index cfed272e4..8e28e8176 100644
|
|
--- a/arch/s390/include/asm/spinlock_types.h
|
|
+++ b/arch/s390/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef __ASM_SPINLOCK_TYPES_H
|
|
#define __ASM_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
int lock;
|
|
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
|
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
|
|
index e82369f28..22ca9a98b 100644
|
|
--- a/arch/sh/include/asm/spinlock_types.h
|
|
+++ b/arch/sh/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef __ASM_SH_SPINLOCK_TYPES_H
|
|
#define __ASM_SH_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int lock;
|
|
} arch_spinlock_t;
|
|
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
|
|
index 5717c7cbd..66dd399b2 100644
|
|
--- a/arch/sh/kernel/irq.c
|
|
+++ b/arch/sh/kernel/irq.c
|
|
@@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu)
|
|
hardirq_ctx[cpu] = NULL;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void do_softirq_own_stack(void)
|
|
{
|
|
struct thread_info *curctx;
|
|
@@ -175,6 +176,7 @@ void do_softirq_own_stack(void)
|
|
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
|
|
);
|
|
}
|
|
+#endif
|
|
#else
|
|
static inline void handle_one_irq(unsigned int irq)
|
|
{
|
|
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
|
|
index 713670e6d..5dfc71534 100644
|
|
--- a/arch/sparc/kernel/irq_64.c
|
|
+++ b/arch/sparc/kernel/irq_64.c
|
|
@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
|
|
set_irq_regs(old_regs);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void do_softirq_own_stack(void)
|
|
{
|
|
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
|
|
@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
|
|
__asm__ __volatile__("mov %0, %%sp"
|
|
: : "r" (orig_sp));
|
|
}
|
|
+#endif
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
void fixup_irqs(void)
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 7c28f4b18..5728c3883 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -182,6 +182,7 @@ config X86
|
|
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_RCU_TABLE_FREE if PARAVIRT
|
|
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
@@ -266,8 +267,11 @@ config ARCH_MAY_HAVE_PC_FDC
|
|
def_bool y
|
|
depends on ISA_DMA_API
|
|
|
|
+config RWSEM_GENERIC_SPINLOCK
|
|
+ def_bool PREEMPT_RT_FULL
|
|
+
|
|
config RWSEM_XCHGADD_ALGORITHM
|
|
- def_bool y
|
|
+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
|
|
|
|
config GENERIC_CALIBRATE_DELAY
|
|
def_bool y
|
|
@@ -946,7 +950,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
|
|
config MAXSMP
|
|
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
|
|
depends on X86_64 && SMP && DEBUG_KERNEL
|
|
- select CPUMASK_OFFSTACK
|
|
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
|
|
---help---
|
|
Enable maximum number of CPUS and NUMA Nodes for this architecture.
|
|
If unsure, say N.
|
|
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
|
|
index 59baeb297..794f6b551 100644
|
|
--- a/arch/x86/configs/openeuler_defconfig
|
|
+++ b/arch/x86/configs/openeuler_defconfig
|
|
@@ -78,8 +78,9 @@ CONFIG_NO_HZ_FULL=y
|
|
CONFIG_NO_HZ=y
|
|
CONFIG_HIGH_RES_TIMERS=y
|
|
# CONFIG_PREEMPT_NONE is not set
|
|
-CONFIG_PREEMPT_VOLUNTARY=y
|
|
+# CONFIG_PREEMPT_VOLUNTARY is not set
|
|
# CONFIG_PREEMPT is not set
|
|
+CONFIG_PREEMPT_RT_FULL=y
|
|
|
|
#
|
|
# CPU/Task time and stats accounting
|
|
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
|
|
index 917f25e4d..58d8c03fc 100644
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_request *req)
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_request *req)
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_request *req)
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_request *req)
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_request *req)
|
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
ctr_crypt_final(ctx, &walk);
|
|
+ kernel_fpu_end();
|
|
err = skcipher_walk_done(&walk, 0);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
|
|
index 41034745d..d4bf7fc02 100644
|
|
--- a/arch/x86/crypto/cast5_avx_glue.c
|
|
+++ b/arch/x86/crypto/cast5_avx_glue.c
|
|
@@ -61,7 +61,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
|
|
|
|
static int ecb_crypt(struct skcipher_request *req, bool enc)
|
|
{
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
struct skcipher_walk walk;
|
|
@@ -76,7 +76,7 @@ static int ecb_crypt(struct skcipher_request *req, bool enc)
|
|
u8 *wsrc = walk.src.virt.addr;
|
|
u8 *wdst = walk.dst.virt.addr;
|
|
|
|
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
|
+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes);
|
|
|
|
/* Process multi-block batch */
|
|
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
|
|
@@ -105,10 +105,9 @@ static int ecb_crypt(struct skcipher_request *req, bool enc)
|
|
} while (nbytes >= bsize);
|
|
|
|
done:
|
|
+ cast5_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
-
|
|
- cast5_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
|
|
@@ -212,7 +211,7 @@ static int cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
struct skcipher_walk walk;
|
|
unsigned int nbytes;
|
|
int err;
|
|
@@ -220,12 +219,11 @@ static int cbc_decrypt(struct skcipher_request *req)
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
|
+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes);
|
|
nbytes = __cbc_decrypt(ctx, &walk);
|
|
+ cast5_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
-
|
|
- cast5_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
|
|
@@ -292,7 +290,7 @@ static int ctr_crypt(struct skcipher_request *req)
|
|
{
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
struct skcipher_walk walk;
|
|
unsigned int nbytes;
|
|
int err;
|
|
@@ -300,13 +298,12 @@ static int ctr_crypt(struct skcipher_request *req)
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
|
|
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
|
+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes);
|
|
nbytes = __ctr_crypt(&walk, ctx);
|
|
+ cast5_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
- cast5_fpu_end(fpu_enabled);
|
|
-
|
|
if (walk.nbytes) {
|
|
ctr_crypt_final(&walk, ctx);
|
|
err = skcipher_walk_done(&walk, 0);
|
|
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
|
|
index dce7c5d39..6194160b7 100644
|
|
--- a/arch/x86/crypto/chacha20_glue.c
|
|
+++ b/arch/x86/crypto/chacha20_glue.c
|
|
@@ -81,23 +81,24 @@ static int chacha20_simd(struct skcipher_request *req)
|
|
|
|
crypto_chacha20_init(state, ctx, walk.iv);
|
|
|
|
- kernel_fpu_begin();
|
|
-
|
|
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
+
|
|
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
|
|
+ kernel_fpu_end();
|
|
err = skcipher_walk_done(&walk,
|
|
walk.nbytes % CHACHA20_BLOCK_SIZE);
|
|
}
|
|
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
walk.nbytes);
|
|
+ kernel_fpu_end();
|
|
err = skcipher_walk_done(&walk, 0);
|
|
}
|
|
|
|
- kernel_fpu_end();
|
|
-
|
|
return err;
|
|
}
|
|
|
|
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
|
|
index a78ef99a9..dac489a1c 100644
|
|
--- a/arch/x86/crypto/glue_helper.c
|
|
+++ b/arch/x86/crypto/glue_helper.c
|
|
@@ -38,7 +38,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
@@ -51,7 +51,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
|
unsigned int i;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
- &walk, fpu_enabled, nbytes);
|
|
+ &walk, false, nbytes);
|
|
for (i = 0; i < gctx->num_funcs; i++) {
|
|
func_bytes = bsize * gctx->funcs[i].num_blocks;
|
|
|
|
@@ -69,10 +69,9 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
|
if (nbytes < bsize)
|
|
break;
|
|
}
|
|
+ glue_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
-
|
|
- glue_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
|
|
@@ -115,7 +114,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
@@ -129,7 +128,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
|
u128 last_iv;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
- &walk, fpu_enabled, nbytes);
|
|
+ &walk, false, nbytes);
|
|
/* Start of the last block. */
|
|
src += nbytes / bsize - 1;
|
|
dst += nbytes / bsize - 1;
|
|
@@ -161,10 +160,10 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
|
done:
|
|
u128_xor(dst, dst, (u128 *)walk.iv);
|
|
*(u128 *)walk.iv = last_iv;
|
|
+ glue_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
- glue_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
|
|
@@ -175,7 +174,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
@@ -189,7 +188,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|
le128 ctrblk;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
- &walk, fpu_enabled, nbytes);
|
|
+ &walk, false, nbytes);
|
|
|
|
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
|
|
|
@@ -213,11 +212,10 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|
}
|
|
|
|
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
|
+ glue_fpu_end(fpu_enabled);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
- glue_fpu_end(fpu_enabled);
|
|
-
|
|
if (nbytes) {
|
|
le128 ctrblk;
|
|
u128 tmp;
|
|
@@ -278,7 +276,7 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|
{
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
- bool fpu_enabled = false;
|
|
+ bool fpu_enabled;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
@@ -289,21 +287,24 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|
|
|
/* set minimum length to bsize, for tweak_fn */
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
- &walk, fpu_enabled,
|
|
+ &walk, false,
|
|
nbytes < bsize ? bsize : nbytes);
|
|
|
|
/* calculate first value of T */
|
|
tweak_fn(tweak_ctx, walk.iv, walk.iv);
|
|
|
|
while (nbytes) {
|
|
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
+ &walk, fpu_enabled,
|
|
+ nbytes < bsize ? bsize : nbytes);
|
|
nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
|
|
|
|
+ glue_fpu_end(fpu_enabled);
|
|
+ fpu_enabled = false;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
nbytes = walk.nbytes;
|
|
}
|
|
|
|
- glue_fpu_end(fpu_enabled);
|
|
-
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
|
|
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
|
|
index d3944912c..8940c802f 100644
|
|
--- a/arch/x86/entry/common.c
|
|
+++ b/arch/x86/entry/common.c
|
|
@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
|
|
|
|
#define EXIT_TO_USERMODE_LOOP_FLAGS \
|
|
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
|
|
+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
|
|
|
|
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
|
|
{
|
|
@@ -149,9 +149,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
|
|
/* We have work to do. */
|
|
local_irq_enable();
|
|
|
|
- if (cached_flags & _TIF_NEED_RESCHED)
|
|
+ if (cached_flags & _TIF_NEED_RESCHED_MASK)
|
|
schedule();
|
|
|
|
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+ if (unlikely(current->forced_info.si_signo)) {
|
|
+ struct task_struct *t = current;
|
|
+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
|
|
+ t->forced_info.si_signo = 0;
|
|
+ }
|
|
+#endif
|
|
if (cached_flags & _TIF_UPROBE)
|
|
uprobe_notify_resume(regs);
|
|
|
|
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
|
|
index a009e317f..a1d2029f5 100644
|
|
--- a/arch/x86/entry/entry_32.S
|
|
+++ b/arch/x86/entry/entry_32.S
|
|
@@ -764,8 +764,25 @@ END(ret_from_exception)
|
|
ENTRY(resume_kernel)
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
.Lneed_resched:
|
|
+ # preempt count == 0 + NEED_RS set?
|
|
cmpl $0, PER_CPU_VAR(__preempt_count)
|
|
+#ifndef CONFIG_PREEMPT_LAZY
|
|
jnz restore_all_kernel
|
|
+#else
|
|
+ jz test_int_off
|
|
+
|
|
+ # atleast preempt count == 0 ?
|
|
+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
|
|
+ jne restore_all_kernel
|
|
+
|
|
+ movl PER_CPU_VAR(current_task), %ebp
|
|
+ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
|
|
+ jnz restore_all_kernel
|
|
+
|
|
+ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
|
|
+ jz restore_all_kernel
|
|
+test_int_off:
|
|
+#endif
|
|
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
|
jz restore_all_kernel
|
|
call preempt_schedule_irq
|
|
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
|
|
index 994e3ea4c..44176c08f 100644
|
|
--- a/arch/x86/entry/entry_64.S
|
|
+++ b/arch/x86/entry/entry_64.S
|
|
@@ -736,7 +736,23 @@ retint_kernel:
|
|
btl $9, EFLAGS(%rsp) /* were interrupts off? */
|
|
jnc 1f
|
|
0: cmpl $0, PER_CPU_VAR(__preempt_count)
|
|
+#ifndef CONFIG_PREEMPT_LAZY
|
|
jnz 1f
|
|
+#else
|
|
+ jz do_preempt_schedule_irq
|
|
+
|
|
+ # atleast preempt count == 0 ?
|
|
+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
|
|
+ jnz 1f
|
|
+
|
|
+ movq PER_CPU_VAR(current_task), %rcx
|
|
+ cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
|
|
+ jnz 1f
|
|
+
|
|
+ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
|
|
+ jnc 1f
|
|
+do_preempt_schedule_irq:
|
|
+#endif
|
|
call preempt_schedule_irq
|
|
jmp 0b
|
|
1:
|
|
@@ -1087,6 +1103,7 @@ bad_gs:
|
|
jmp 2b
|
|
.previous
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/* Call softirq on interrupt stack. Interrupts are off. */
|
|
ENTRY(do_softirq_own_stack)
|
|
pushq %rbp
|
|
@@ -1097,6 +1114,7 @@ ENTRY(do_softirq_own_stack)
|
|
leaveq
|
|
ret
|
|
ENDPROC(do_softirq_own_stack)
|
|
+#endif
|
|
|
|
#ifdef CONFIG_XEN
|
|
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
|
|
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
|
|
index b56d504af..e51c70940 100644
|
|
--- a/arch/x86/include/asm/fpu/api.h
|
|
+++ b/arch/x86/include/asm/fpu/api.h
|
|
@@ -20,6 +20,7 @@
|
|
*/
|
|
extern void kernel_fpu_begin(void);
|
|
extern void kernel_fpu_end(void);
|
|
+extern void kernel_fpu_resched(void);
|
|
extern bool irq_fpu_usable(void);
|
|
|
|
/*
|
|
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
|
|
index 90cb2f36c..98a38e098 100644
|
|
--- a/arch/x86/include/asm/preempt.h
|
|
+++ b/arch/x86/include/asm/preempt.h
|
|
@@ -86,17 +86,48 @@ static __always_inline void __preempt_count_sub(int val)
|
|
* a decrement which hits zero means we have no preempt_count and should
|
|
* reschedule.
|
|
*/
|
|
-static __always_inline bool __preempt_count_dec_and_test(void)
|
|
+static __always_inline bool ____preempt_count_dec_and_test(void)
|
|
{
|
|
return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
|
|
}
|
|
|
|
+static __always_inline bool __preempt_count_dec_and_test(void)
|
|
+{
|
|
+ if (____preempt_count_dec_and_test())
|
|
+ return true;
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+ if (preempt_count())
|
|
+ return false;
|
|
+ if (current_thread_info()->preempt_lazy_count)
|
|
+ return false;
|
|
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
|
+#else
|
|
+ return false;
|
|
+#endif
|
|
+}
|
|
+
|
|
/*
|
|
* Returns true when we need to resched and can (barring IRQ state).
|
|
*/
|
|
static __always_inline bool should_resched(int preempt_offset)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+ u32 tmp;
|
|
+
|
|
+ tmp = raw_cpu_read_4(__preempt_count);
|
|
+ if (tmp == preempt_offset)
|
|
+ return true;
|
|
+
|
|
+ /* preempt count == 0 ? */
|
|
+ tmp &= ~PREEMPT_NEED_RESCHED;
|
|
+ if (tmp != preempt_offset)
|
|
+ return false;
|
|
+ if (current_thread_info()->preempt_lazy_count)
|
|
+ return false;
|
|
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
|
+#else
|
|
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
|
|
+#endif
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
|
|
index 33d3c88a7..c00e27af2 100644
|
|
--- a/arch/x86/include/asm/signal.h
|
|
+++ b/arch/x86/include/asm/signal.h
|
|
@@ -28,6 +28,19 @@ typedef struct {
|
|
#define SA_IA32_ABI 0x02000000u
|
|
#define SA_X32_ABI 0x01000000u
|
|
|
|
+/*
|
|
+ * Because some traps use the IST stack, we must keep preemption
|
|
+ * disabled while calling do_trap(), but do_trap() may call
|
|
+ * force_sig_info() which will grab the signal spin_locks for the
|
|
+ * task, which in PREEMPT_RT_FULL are mutexes. By defining
|
|
+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
|
|
+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
|
|
+ * trap.
|
|
+ */
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL)
|
|
+#define ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+#endif
|
|
+
|
|
#ifndef CONFIG_COMPAT
|
|
typedef sigset_t compat_sigset_t;
|
|
#endif
|
|
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
|
|
index 9c556ea2e..b136992be 100644
|
|
--- a/arch/x86/include/asm/stackprotector.h
|
|
+++ b/arch/x86/include/asm/stackprotector.h
|
|
@@ -65,7 +65,7 @@
|
|
*/
|
|
static __always_inline void boot_init_stack_canary(void)
|
|
{
|
|
- u64 canary;
|
|
+ u64 uninitialized_var(canary);
|
|
u64 tsc;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -76,8 +76,14 @@ static __always_inline void boot_init_stack_canary(void)
|
|
* of randomness. The TSC only matters for very early init,
|
|
* there it already has some randomness on most systems. Later
|
|
* on during the bootup the random pool has true entropy too.
|
|
+ * For preempt-rt we need to weaken the randomness a bit, as
|
|
+ * we can't call into the random generator from atomic context
|
|
+ * due to locking constraints. We just leave canary
|
|
+ * uninitialized and use the TSC based randomness on top of it.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
get_random_bytes(&canary, sizeof(canary));
|
|
+#endif
|
|
tsc = rdtsc();
|
|
canary += tsc + (tsc << 32UL);
|
|
canary &= CANARY_MASK;
|
|
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
|
index 82b73b75d..dc267291f 100644
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -56,17 +56,24 @@ struct task_struct;
|
|
struct thread_info {
|
|
unsigned long flags; /* low level flags */
|
|
u32 status; /* thread synchronous flags */
|
|
+ int preempt_lazy_count; /* 0 => lazy preemptable
|
|
+ <0 => BUG */
|
|
};
|
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
{ \
|
|
.flags = 0, \
|
|
+ .preempt_lazy_count = 0, \
|
|
}
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
+#define GET_THREAD_INFO(reg) \
|
|
+ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
|
|
+ _ASM_SUB $(THREAD_SIZE),reg ;
|
|
+
|
|
#endif
|
|
|
|
/*
|
|
@@ -91,6 +98,7 @@ struct thread_info {
|
|
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
|
|
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
|
|
#define TIF_IA32 17 /* IA32 compatibility process */
|
|
+#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */
|
|
#define TIF_NOHZ 19 /* in adaptive nohz mode */
|
|
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
|
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
|
|
@@ -120,6 +128,7 @@ struct thread_info {
|
|
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
|
|
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
|
#define _TIF_IA32 (1 << TIF_IA32)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_NOHZ (1 << TIF_NOHZ)
|
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
|
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
|
@@ -165,6 +174,8 @@ struct thread_info {
|
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
|
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
+
|
|
#define STACK_WARN (THREAD_SIZE/8)
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
|
|
index a6397da20..cebec7489 100644
|
|
--- a/arch/x86/kernel/apic/io_apic.c
|
|
+++ b/arch/x86/kernel/apic/io_apic.c
|
|
@@ -1723,7 +1723,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
|
|
return false;
|
|
}
|
|
|
|
-static inline bool ioapic_irqd_mask(struct irq_data *data)
|
|
+static inline bool ioapic_prepare_move(struct irq_data *data)
|
|
{
|
|
/* If we are moving the IRQ we need to mask it */
|
|
if (unlikely(irqd_is_setaffinity_pending(data))) {
|
|
@@ -1734,9 +1734,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data)
|
|
return false;
|
|
}
|
|
|
|
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
|
|
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
|
|
{
|
|
- if (unlikely(masked)) {
|
|
+ if (unlikely(moveit)) {
|
|
/* Only migrate the irq if the ack has been received.
|
|
*
|
|
* On rare occasions the broadcast level triggered ack gets
|
|
@@ -1771,11 +1771,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
|
|
}
|
|
}
|
|
#else
|
|
-static inline bool ioapic_irqd_mask(struct irq_data *data)
|
|
+static inline bool ioapic_prepare_move(struct irq_data *data)
|
|
{
|
|
return false;
|
|
}
|
|
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
|
|
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
|
|
{
|
|
}
|
|
#endif
|
|
@@ -1784,11 +1784,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
|
|
{
|
|
struct irq_cfg *cfg = irqd_cfg(irq_data);
|
|
unsigned long v;
|
|
- bool masked;
|
|
+ bool moveit;
|
|
int i;
|
|
|
|
irq_complete_move(cfg);
|
|
- masked = ioapic_irqd_mask(irq_data);
|
|
+ moveit = ioapic_prepare_move(irq_data);
|
|
|
|
/*
|
|
* It appears there is an erratum which affects at least version 0x11
|
|
@@ -1843,7 +1843,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
|
|
eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
|
|
}
|
|
|
|
- ioapic_irqd_unmask(irq_data, masked);
|
|
+ ioapic_finish_move(irq_data, moveit);
|
|
}
|
|
|
|
static void ioapic_ir_ack_level(struct irq_data *irq_data)
|
|
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
|
|
index 01de31db3..ce1c5b9fb 100644
|
|
--- a/arch/x86/kernel/asm-offsets.c
|
|
+++ b/arch/x86/kernel/asm-offsets.c
|
|
@@ -38,6 +38,7 @@ void common(void) {
|
|
|
|
BLANK();
|
|
OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
|
|
+ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
|
|
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
|
|
|
|
BLANK();
|
|
@@ -94,6 +95,7 @@ void common(void) {
|
|
|
|
BLANK();
|
|
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
|
|
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
|
|
|
|
/* TLB state for the entry code */
|
|
OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
|
|
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
|
|
index a999a58ca..d6410d074 100644
|
|
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
|
|
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
|
|
@@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
* may be scheduled elsewhere and invalidate entries in the
|
|
* pseudo-locked region.
|
|
*/
|
|
- if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) {
|
|
+ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
|
|
mutex_unlock(&rdtgroup_mutex);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
|
|
index 2e5003fef..768c53767 100644
|
|
--- a/arch/x86/kernel/fpu/core.c
|
|
+++ b/arch/x86/kernel/fpu/core.c
|
|
@@ -136,6 +136,18 @@ void kernel_fpu_end(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(kernel_fpu_end);
|
|
|
|
+void kernel_fpu_resched(void)
|
|
+{
|
|
+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
|
|
+
|
|
+ if (should_resched(PREEMPT_OFFSET)) {
|
|
+ kernel_fpu_end();
|
|
+ cond_resched();
|
|
+ kernel_fpu_begin();
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(kernel_fpu_resched);
|
|
+
|
|
/*
|
|
* Save the FPU state (mark it for reload if necessary):
|
|
*
|
|
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
|
|
index 3d9f4b7b4..3d56397e2 100644
|
|
--- a/arch/x86/kernel/fpu/signal.c
|
|
+++ b/arch/x86/kernel/fpu/signal.c
|
|
@@ -351,10 +351,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
|
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
|
|
}
|
|
|
|
+ preempt_disable();
|
|
local_bh_disable();
|
|
fpu->initialized = 1;
|
|
fpu__restore(fpu);
|
|
local_bh_enable();
|
|
+ preempt_enable();
|
|
|
|
/* Failure is already handled */
|
|
return err;
|
|
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
|
|
index 95600a99a..9192d7608 100644
|
|
--- a/arch/x86/kernel/irq_32.c
|
|
+++ b/arch/x86/kernel/irq_32.c
|
|
@@ -130,6 +130,7 @@ void irq_ctx_init(int cpu)
|
|
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void do_softirq_own_stack(void)
|
|
{
|
|
struct irq_stack *irqstk;
|
|
@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
|
|
|
|
call_on_stack(__do_softirq, isp);
|
|
}
|
|
+#endif
|
|
|
|
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
|
|
{
|
|
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
|
|
index 020efe0f9..5d0c97555 100644
|
|
--- a/arch/x86/kernel/process_32.c
|
|
+++ b/arch/x86/kernel/process_32.c
|
|
@@ -38,6 +38,7 @@
|
|
#include <linux/io.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/syscalls.h>
|
|
+#include <linux/highmem.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/ldt.h>
|
|
@@ -205,6 +206,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
|
}
|
|
EXPORT_SYMBOL_GPL(start_thread);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Clear @prev's kmap_atomic mappings
|
|
+ */
|
|
+ for (i = 0; i < prev_p->kmap_idx; i++) {
|
|
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
|
+ pte_t *ptep = kmap_pte - idx;
|
|
+
|
|
+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
+ }
|
|
+ /*
|
|
+ * Restore @next_p's kmap_atomic mappings
|
|
+ */
|
|
+ for (i = 0; i < next_p->kmap_idx; i++) {
|
|
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
|
+
|
|
+ if (!pte_none(next_p->kmap_pte[i]))
|
|
+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static inline void
|
|
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
|
|
+#endif
|
|
+
|
|
|
|
/*
|
|
* switch_to(x,y) should switch tasks from x to y.
|
|
@@ -274,6 +304,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
|
switch_to_extra(prev_p, next_p);
|
|
|
|
+ switch_kmaps(prev_p, next_p);
|
|
+
|
|
/*
|
|
* Leave lazy mode, flushing any hypercalls made here.
|
|
* This must be done before restoring TLS segments so
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 262e49301..c2f51b6e8 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -2257,7 +2257,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
|
apic->vcpu = vcpu;
|
|
|
|
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
|
|
- HRTIMER_MODE_ABS_PINNED);
|
|
+ HRTIMER_MODE_ABS_PINNED_HARD);
|
|
apic->lapic_timer.timer.function = apic_timer_fn;
|
|
|
|
/*
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 53564a109..efb2ae6bd 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -6939,6 +6939,13 @@ int kvm_arch_init(void *opaque)
|
|
goto out;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+#endif
|
|
+
|
|
r = kvm_mmu_module_init();
|
|
if (r)
|
|
goto out_free_percpu;
|
|
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
|
|
index 6d18b70ed..f752724c2 100644
|
|
--- a/arch/x86/mm/highmem_32.c
|
|
+++ b/arch/x86/mm/highmem_32.c
|
|
@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
|
|
*/
|
|
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
+ pte_t pte = mk_pte(page, prot);
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
- preempt_disable();
|
|
+ preempt_disable_nort();
|
|
pagefault_disable();
|
|
|
|
if (!PageHighMem(page))
|
|
@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
- set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = pte;
|
|
+#endif
|
|
+ set_pte(kmap_pte-idx, pte);
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
|
|
* is a bad idea also, in case the page changes cacheability
|
|
* attributes or becomes a protected page in a hypervisor.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = __pte(0);
|
|
+#endif
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
kmap_atomic_idx_pop();
|
|
arch_flush_lazy_mmu_mode();
|
|
@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
#endif
|
|
|
|
pagefault_enable();
|
|
- preempt_enable();
|
|
+ preempt_enable_nort();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
|
|
index b3294d367..c0ec8d430 100644
|
|
--- a/arch/x86/mm/iomap_32.c
|
|
+++ b/arch/x86/mm/iomap_32.c
|
|
@@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
|
|
|
|
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
|
|
{
|
|
+ pte_t pte = pfn_pte(pfn, prot);
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
@@ -68,7 +69,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
|
|
type = kmap_atomic_idx_push();
|
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
|
|
+ WARN_ON(!pte_none(*(kmap_pte - idx)));
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = pte;
|
|
+#endif
|
|
+ set_pte(kmap_pte - idx, pte);
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
@@ -119,6 +125,9 @@ iounmap_atomic(void __iomem *kvaddr)
|
|
* is a bad idea also, in case the page changes cacheability
|
|
* attributes or becomes a protected page in a hypervisor.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ current->kmap_pte[type] = __pte(0);
|
|
+#endif
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
kmap_atomic_idx_pop();
|
|
}
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
|
index e2d4b25c7..9626ebb9e 100644
|
|
--- a/arch/x86/mm/pageattr.c
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
@@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
pgprot_t ref_prot;
|
|
|
|
spin_lock(&pgd_lock);
|
|
+ /*
|
|
+ * Keep preemption disabled after __flush_tlb_all() which expects not be
|
|
+ * preempted during the flush of the local TLB.
|
|
+ */
|
|
+ preempt_disable();
|
|
/*
|
|
* Check for races, another CPU might have split this page
|
|
* up for us already:
|
|
*/
|
|
tmp = _lookup_address_cpa(cpa, address, &level);
|
|
if (tmp != kpte) {
|
|
+ preempt_enable();
|
|
spin_unlock(&pgd_lock);
|
|
return 1;
|
|
}
|
|
@@ -726,6 +732,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
break;
|
|
|
|
default:
|
|
+ preempt_enable();
|
|
spin_unlock(&pgd_lock);
|
|
return 1;
|
|
}
|
|
@@ -764,6 +771,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
* going on.
|
|
*/
|
|
__flush_tlb_all();
|
|
+ preempt_enable();
|
|
spin_unlock(&pgd_lock);
|
|
|
|
return 0;
|
|
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
|
|
index dfc809b31..3b7dd7064 100644
|
|
--- a/arch/x86/platform/efi/efi_64.c
|
|
+++ b/arch/x86/platform/efi/efi_64.c
|
|
@@ -622,18 +622,16 @@ void __init efi_dump_pagetable(void)
|
|
|
|
/*
|
|
* Makes the calling thread switch to/from efi_mm context. Can be used
|
|
- * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
|
|
- * as during efi runtime calls i.e current->active_mm == current_mm.
|
|
- * We are not mm_dropping()/mm_grabbing() any mm, because we are not
|
|
- * losing/creating any references.
|
|
+ * in a kernel thread and user context. Preemption needs to remain disabled
|
|
+ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
|
|
+ * can not change under us.
|
|
+ * It should be ensured that there are no concurent calls to this function.
|
|
*/
|
|
void efi_switch_mm(struct mm_struct *mm)
|
|
{
|
|
- task_lock(current);
|
|
efi_scratch.prev_mm = current->active_mm;
|
|
current->active_mm = mm;
|
|
switch_mm(efi_scratch.prev_mm, mm, NULL);
|
|
- task_unlock(current);
|
|
}
|
|
|
|
#ifdef CONFIG_EFI_MIXED
|
|
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
|
|
index bb1fe6c18..8a22f1e7b 100644
|
|
--- a/arch/xtensa/include/asm/spinlock_types.h
|
|
+++ b/arch/xtensa/include/asm/spinlock_types.h
|
|
@@ -2,10 +2,6 @@
|
|
#ifndef __ASM_SPINLOCK_TYPES_H
|
|
#define __ASM_SPINLOCK_TYPES_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
typedef struct {
|
|
volatile unsigned int slock;
|
|
} arch_spinlock_t;
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index acf5585b0..8a6cc213c 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -447,6 +447,9 @@ void __blk_rq_init(struct request_queue *q, struct request *rq)
|
|
|
|
INIT_LIST_HEAD(&rq->queuelist);
|
|
INIT_LIST_HEAD(&rq->timeout_list);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
|
|
+#endif
|
|
rq->cpu = -1;
|
|
rq->q = q;
|
|
rq->__sector = (sector_t) -1;
|
|
@@ -1243,12 +1246,21 @@ void blk_queue_exit(struct request_queue *q)
|
|
percpu_ref_put(&q->q_usage_counter);
|
|
}
|
|
|
|
+static void blk_queue_usage_counter_release_wrk(struct work_struct *work)
|
|
+{
|
|
+ struct request_queue *q =
|
|
+ container_of(work, struct request_queue, mq_pcpu_wake);
|
|
+
|
|
+ wake_up_all(&q->mq_freeze_wq);
|
|
+}
|
|
+
|
|
static void blk_queue_usage_counter_release(struct percpu_ref *ref)
|
|
{
|
|
struct request_queue *q =
|
|
container_of(ref, struct request_queue, q_usage_counter);
|
|
|
|
- wake_up_all(&q->mq_freeze_wq);
|
|
+ if (wq_has_sleeper(&q->mq_freeze_wq))
|
|
+ schedule_work(&q->mq_pcpu_wake);
|
|
}
|
|
|
|
static void blk_rq_timed_out_timer(struct timer_list *t)
|
|
@@ -1352,6 +1364,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
|
queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
|
|
|
|
init_waitqueue_head(&q->mq_freeze_wq);
|
|
+ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
|
|
mutex_init(&q_wrapper->mq_freeze_lock);
|
|
|
|
/*
|
|
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
|
|
index 281b7a93e..d6ae26a5e 100644
|
|
--- a/block/blk-ioc.c
|
|
+++ b/block/blk-ioc.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <linux/blkdev.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched/task.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#include "blk.h"
|
|
|
|
@@ -119,7 +120,7 @@ static void ioc_release_fn(struct work_struct *work)
|
|
spin_unlock(q->queue_lock);
|
|
} else {
|
|
spin_unlock_irqrestore(&ioc->lock, flags);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
}
|
|
}
|
|
@@ -203,7 +204,7 @@ void put_io_context_active(struct io_context *ioc)
|
|
spin_unlock(icq->q->queue_lock);
|
|
} else {
|
|
spin_unlock_irqrestore(&ioc->lock, flags);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto retry;
|
|
}
|
|
}
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index 96debbe63..3e97db215 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -377,6 +377,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|
rq->extra_len = 0;
|
|
rq->__deadline = 0;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
|
|
+#endif
|
|
INIT_LIST_HEAD(&rq->timeout_list);
|
|
rq->timeout = 0;
|
|
|
|
@@ -604,12 +607,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
|
|
}
|
|
EXPORT_SYMBOL(blk_mq_end_request);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
|
|
+{
|
|
+ struct request *rq = container_of(work, struct request, work);
|
|
+
|
|
+ rq->q->softirq_done_fn(rq);
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
static void __blk_mq_complete_request_remote(void *data)
|
|
{
|
|
struct request *rq = data;
|
|
|
|
rq->q->softirq_done_fn(rq);
|
|
}
|
|
+#endif
|
|
|
|
/**
|
|
* blk_mq_force_complete_rq() - Force complete the request, bypassing any error
|
|
@@ -651,19 +666,27 @@ void blk_mq_force_complete_rq(struct request *rq)
|
|
return;
|
|
}
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
|
|
shared = cpus_share_cache(cpu, ctx->cpu);
|
|
|
|
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
|
|
+ * here. But we could try to invoke it one the CPU like this.
|
|
+ */
|
|
+ schedule_work_on(ctx->cpu, &rq->work);
|
|
+#else
|
|
rq->csd.func = __blk_mq_complete_request_remote;
|
|
rq->csd.info = rq;
|
|
rq->csd.flags = 0;
|
|
smp_call_function_single_async(ctx->cpu, &rq->csd);
|
|
+#endif
|
|
} else {
|
|
rq->q->softirq_done_fn(rq);
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq);
|
|
|
|
@@ -1466,14 +1489,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
|
return;
|
|
|
|
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
|
- int cpu = get_cpu();
|
|
+ int cpu = get_cpu_light();
|
|
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
|
|
__blk_mq_run_hw_queue(hctx);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
return;
|
|
}
|
|
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
/*
|
|
@@ -3437,10 +3460,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
|
kt = nsecs;
|
|
|
|
mode = HRTIMER_MODE_REL;
|
|
- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
|
|
+ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current);
|
|
hrtimer_set_expires(&hs.timer, kt);
|
|
|
|
- hrtimer_init_sleeper(&hs, current);
|
|
do {
|
|
if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
|
|
break;
|
|
diff --git a/block/blk-mq.h b/block/blk-mq.h
|
|
index c6ec9aa12..83315e369 100644
|
|
--- a/block/blk-mq.h
|
|
+++ b/block/blk-mq.h
|
|
@@ -125,12 +125,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
|
*/
|
|
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
|
|
{
|
|
- return __blk_mq_get_ctx(q, get_cpu());
|
|
+ return __blk_mq_get_ctx(q, get_cpu_light());
|
|
}
|
|
|
|
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
|
|
{
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
struct blk_mq_alloc_data {
|
|
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
|
|
index e47a2f751..7726f48d7 100644
|
|
--- a/block/blk-softirq.c
|
|
+++ b/block/blk-softirq.c
|
|
@@ -53,6 +53,7 @@ static void trigger_softirq(void *data)
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/*
|
|
@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
|
|
this_cpu_ptr(&blk_cpu_done));
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
return 0;
|
|
}
|
|
@@ -142,6 +144,7 @@ void __blk_complete_request(struct request *req)
|
|
goto do_local;
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__blk_complete_request);
|
|
|
|
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
|
|
index 3feebeec1..7130fb86e 100644
|
|
--- a/crypto/cryptd.c
|
|
+++ b/crypto/cryptd.c
|
|
@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
|
|
struct cryptd_cpu_queue {
|
|
struct crypto_queue queue;
|
|
struct work_struct work;
|
|
+ spinlock_t qlock;
|
|
};
|
|
|
|
struct cryptd_queue {
|
|
@@ -121,6 +122,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
|
|
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
|
|
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
|
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
|
|
+ spin_lock_init(&cpu_queue->qlock);
|
|
}
|
|
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
|
|
return 0;
|
|
@@ -145,7 +147,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
|
struct cryptd_cpu_queue *cpu_queue;
|
|
atomic_t *refcnt;
|
|
|
|
- local_bh_disable();
|
|
+ spin_lock_bh(&cpu_queue->qlock);
|
|
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
|
|
|
@@ -159,7 +161,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
|
if (!atomic_read(refcnt))
|
|
goto out;
|
|
|
|
- atomic_inc(refcnt);
|
|
+ spin_unlock_bh(&cpu_queue->qlock);
|
|
|
|
out:
|
|
local_bh_enable();
|
|
@@ -179,10 +181,10 @@ static void cryptd_queue_worker(struct work_struct *work)
|
|
/*
|
|
* Only handle one request at a time to avoid hogging crypto workqueue.
|
|
*/
|
|
- local_bh_disable();
|
|
+ spin_lock_bh(&cpu_queue->qlock);
|
|
backlog = crypto_get_backlog(&cpu_queue->queue);
|
|
req = crypto_dequeue_request(&cpu_queue->queue);
|
|
- local_bh_enable();
|
|
+ spin_unlock_bh(&cpu_queue->qlock);
|
|
|
|
if (!req)
|
|
return;
|
|
diff --git a/crypto/scompress.c b/crypto/scompress.c
|
|
index 968bbcf65..c2f0077e0 100644
|
|
--- a/crypto/scompress.c
|
|
+++ b/crypto/scompress.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <linux/cryptouser.h>
|
|
#include <net/netlink.h>
|
|
#include <linux/scatterlist.h>
|
|
+#include <linux/locallock.h>
|
|
#include <crypto/scatterwalk.h>
|
|
#include <crypto/internal/acompress.h>
|
|
#include <crypto/internal/scompress.h>
|
|
@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches;
|
|
static void * __percpu *scomp_dst_scratches;
|
|
static int scomp_scratch_users;
|
|
static DEFINE_MUTEX(scomp_lock);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
|
|
|
|
#ifdef CONFIG_NET
|
|
static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
@@ -146,7 +148,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
|
void **tfm_ctx = acomp_tfm_ctx(tfm);
|
|
struct crypto_scomp *scomp = *tfm_ctx;
|
|
void **ctx = acomp_request_ctx(req);
|
|
- const int cpu = get_cpu();
|
|
+ const int cpu = local_lock_cpu(scomp_scratches_lock);
|
|
u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
|
|
u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
|
|
int ret;
|
|
@@ -181,7 +183,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
|
1);
|
|
}
|
|
out:
|
|
- put_cpu();
|
|
+ local_unlock_cpu(scomp_scratches_lock);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
|
|
index 4ed0a78fd..eece02262 100644
|
|
--- a/drivers/block/zram/zcomp.c
|
|
+++ b/drivers/block/zram/zcomp.c
|
|
@@ -116,12 +116,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
|
|
|
|
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
|
|
{
|
|
- return *get_cpu_ptr(comp->stream);
|
|
+ struct zcomp_strm *zstrm;
|
|
+
|
|
+ zstrm = *get_local_ptr(comp->stream);
|
|
+ spin_lock(&zstrm->zcomp_lock);
|
|
+ return zstrm;
|
|
}
|
|
|
|
void zcomp_stream_put(struct zcomp *comp)
|
|
{
|
|
- put_cpu_ptr(comp->stream);
|
|
+ struct zcomp_strm *zstrm;
|
|
+
|
|
+ zstrm = *this_cpu_ptr(comp->stream);
|
|
+ spin_unlock(&zstrm->zcomp_lock);
|
|
+ put_local_ptr(zstrm);
|
|
}
|
|
|
|
int zcomp_compress(struct zcomp_strm *zstrm,
|
|
@@ -171,6 +179,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
|
|
pr_err("Can't allocate a compression stream\n");
|
|
return -ENOMEM;
|
|
}
|
|
+ spin_lock_init(&zstrm->zcomp_lock);
|
|
*per_cpu_ptr(comp->stream, cpu) = zstrm;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
|
|
index 41c1002a7..d424eafcb 100644
|
|
--- a/drivers/block/zram/zcomp.h
|
|
+++ b/drivers/block/zram/zcomp.h
|
|
@@ -14,6 +14,7 @@ struct zcomp_strm {
|
|
/* compression/decompression buffer */
|
|
void *buffer;
|
|
struct crypto_comp *tfm;
|
|
+ spinlock_t zcomp_lock;
|
|
};
|
|
|
|
/* dynamic per-device compression frontend */
|
|
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
|
index dade3734a..d3aace0d1 100644
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -53,6 +53,40 @@ static size_t huge_class_size;
|
|
|
|
static void zram_free_page(struct zram *zram, size_t index);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
|
|
+{
|
|
+ size_t index;
|
|
+
|
|
+ for (index = 0; index < num_pages; index++)
|
|
+ spin_lock_init(&zram->table[index].lock);
|
|
+}
|
|
+
|
|
+static int zram_slot_trylock(struct zram *zram, u32 index)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = spin_trylock(&zram->table[index].lock);
|
|
+ if (ret)
|
|
+ __set_bit(ZRAM_LOCK, &zram->table[index].value);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void zram_slot_lock(struct zram *zram, u32 index)
|
|
+{
|
|
+ spin_lock(&zram->table[index].lock);
|
|
+ __set_bit(ZRAM_LOCK, &zram->table[index].value);
|
|
+}
|
|
+
|
|
+static void zram_slot_unlock(struct zram *zram, u32 index)
|
|
+{
|
|
+ __clear_bit(ZRAM_LOCK, &zram->table[index].value);
|
|
+ spin_unlock(&zram->table[index].lock);
|
|
+}
|
|
+
|
|
+#else
|
|
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
|
|
+
|
|
static int zram_slot_trylock(struct zram *zram, u32 index)
|
|
{
|
|
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value);
|
|
@@ -67,6 +101,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
|
|
{
|
|
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
|
|
}
|
|
+#endif
|
|
|
|
static inline bool init_done(struct zram *zram)
|
|
{
|
|
@@ -902,6 +937,8 @@ static DEVICE_ATTR_RO(io_stat);
|
|
static DEVICE_ATTR_RO(mm_stat);
|
|
static DEVICE_ATTR_RO(debug_stat);
|
|
|
|
+
|
|
+
|
|
static void zram_meta_free(struct zram *zram, u64 disksize)
|
|
{
|
|
size_t num_pages = disksize >> PAGE_SHIFT;
|
|
@@ -932,6 +969,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
|
|
|
if (!huge_class_size)
|
|
huge_class_size = zs_huge_class_size(zram->mem_pool);
|
|
+ zram_meta_init_table_locks(zram, num_pages);
|
|
return true;
|
|
}
|
|
|
|
@@ -990,6 +1028,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
|
|
unsigned long handle;
|
|
unsigned int size;
|
|
void *src, *dst;
|
|
+ struct zcomp_strm *zstrm;
|
|
|
|
if (zram_wb_enabled(zram)) {
|
|
zram_slot_lock(zram, index);
|
|
@@ -1024,6 +1063,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
|
|
|
|
size = zram_get_obj_size(zram, index);
|
|
|
|
+ zstrm = zcomp_stream_get(zram->comp);
|
|
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
|
if (size == PAGE_SIZE) {
|
|
dst = kmap_atomic(page);
|
|
@@ -1031,14 +1071,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
|
|
kunmap_atomic(dst);
|
|
ret = 0;
|
|
} else {
|
|
- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
|
|
|
|
dst = kmap_atomic(page);
|
|
ret = zcomp_decompress(zstrm, src, size, dst);
|
|
kunmap_atomic(dst);
|
|
- zcomp_stream_put(zram->comp);
|
|
}
|
|
zs_unmap_object(zram->mem_pool, handle);
|
|
+ zcomp_stream_put(zram->comp);
|
|
zram_slot_unlock(zram, index);
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
|
|
index d1095dfdf..144e91061 100644
|
|
--- a/drivers/block/zram/zram_drv.h
|
|
+++ b/drivers/block/zram/zram_drv.h
|
|
@@ -61,6 +61,9 @@ struct zram_table_entry {
|
|
unsigned long element;
|
|
};
|
|
unsigned long value;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t lock;
|
|
+#endif
|
|
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
|
|
ktime_t ac_time;
|
|
#endif
|
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
|
index a008d816c..068660401 100644
|
|
--- a/drivers/char/random.c
|
|
+++ b/drivers/char/random.c
|
|
@@ -1239,28 +1239,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
|
|
return *ptr;
|
|
}
|
|
|
|
-void add_interrupt_randomness(int irq, int irq_flags)
|
|
+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
|
|
{
|
|
struct entropy_store *r;
|
|
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
|
|
- struct pt_regs *regs = get_irq_regs();
|
|
unsigned long now = jiffies;
|
|
cycles_t cycles = random_get_entropy();
|
|
__u32 c_high, j_high;
|
|
- __u64 ip;
|
|
unsigned long seed;
|
|
int credit = 0;
|
|
|
|
if (cycles == 0)
|
|
- cycles = get_reg(fast_pool, regs);
|
|
+ cycles = get_reg(fast_pool, NULL);
|
|
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
|
|
j_high = (sizeof(now) > 4) ? now >> 32 : 0;
|
|
fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
|
|
fast_pool->pool[1] ^= now ^ c_high;
|
|
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
|
|
+ if (!ip)
|
|
+ ip = _RET_IP_;
|
|
fast_pool->pool[2] ^= ip;
|
|
fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
|
|
- get_reg(fast_pool, regs);
|
|
+ get_reg(fast_pool, NULL);
|
|
|
|
fast_mix(fast_pool);
|
|
add_interrupt_bench(cycles);
|
|
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
|
|
index f08949a5f..58790a4f0 100644
|
|
--- a/drivers/char/tpm/tpm_tis.c
|
|
+++ b/drivers/char/tpm/tpm_tis.c
|
|
@@ -53,6 +53,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
|
|
return container_of(data, struct tpm_tis_tcg_phy, priv);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * Flushes previous write operations to chip so that a subsequent
|
|
+ * ioread*()s won't stall a cpu.
|
|
+ */
|
|
+static inline void tpm_tis_flush(void __iomem *iobase)
|
|
+{
|
|
+ ioread8(iobase + TPM_ACCESS(0));
|
|
+}
|
|
+#else
|
|
+#define tpm_tis_flush(iobase) do { } while (0)
|
|
+#endif
|
|
+
|
|
+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
|
|
+{
|
|
+ iowrite8(b, iobase + addr);
|
|
+ tpm_tis_flush(iobase);
|
|
+}
|
|
+
|
|
+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
|
|
+{
|
|
+ iowrite32(b, iobase + addr);
|
|
+ tpm_tis_flush(iobase);
|
|
+}
|
|
+
|
|
static bool interrupts = true;
|
|
module_param(interrupts, bool, 0444);
|
|
MODULE_PARM_DESC(interrupts, "Enable interrupts");
|
|
@@ -150,7 +175,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
|
|
|
|
while (len--)
|
|
- iowrite8(*value++, phy->iobase + addr);
|
|
+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
|
|
|
|
return 0;
|
|
}
|
|
@@ -177,7 +202,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
|
|
{
|
|
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
|
|
|
|
- iowrite32(value, phy->iobase + addr);
|
|
+ tpm_tis_iowrite32(value, phy->iobase, addr);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
|
|
index 4d37f018d..34b07047b 100644
|
|
--- a/drivers/clocksource/Kconfig
|
|
+++ b/drivers/clocksource/Kconfig
|
|
@@ -404,8 +404,11 @@ config ARMV7M_SYSTICK
|
|
This options enables support for the ARMv7M system timer unit
|
|
|
|
config ATMEL_PIT
|
|
+ bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST
|
|
select TIMER_OF if OF
|
|
- def_bool SOC_AT91SAM9 || SOC_SAMA5
|
|
+ help
|
|
+ This enables build of clocksource and clockevent driver for
|
|
+ the integrated PIT in Microchip ARM SoCs.
|
|
|
|
config ATMEL_ST
|
|
bool "Atmel ST timer support" if COMPILE_TEST
|
|
@@ -415,6 +418,14 @@ config ATMEL_ST
|
|
help
|
|
Support for the Atmel ST timer.
|
|
|
|
+config ATMEL_ARM_TCB_CLKSRC
|
|
+ bool "Microchip ARM TC Block" if COMPILE_TEST
|
|
+ select REGMAP_MMIO
|
|
+ depends on GENERIC_CLOCKEVENTS
|
|
+ help
|
|
+ This enables build of clocksource and clockevent driver for
|
|
+ the integrated Timer Counter Blocks in Microchip ARM SoCs.
|
|
+
|
|
config CLKSRC_EXYNOS_MCT
|
|
bool "Exynos multi core timer driver" if COMPILE_TEST
|
|
depends on ARM || ARM64
|
|
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
|
|
index db51b2427..0df9384a1 100644
|
|
--- a/drivers/clocksource/Makefile
|
|
+++ b/drivers/clocksource/Makefile
|
|
@@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
|
|
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
|
|
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
|
|
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
|
|
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
|
|
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
|
|
+obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o
|
|
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
|
|
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
|
|
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
|
|
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
|
|
index 43f4d5c4d..ba15242a6 100644
|
|
--- a/drivers/clocksource/tcb_clksrc.c
|
|
+++ b/drivers/clocksource/tcb_clksrc.c
|
|
@@ -25,8 +25,7 @@
|
|
* this 32 bit free-running counter. the second channel is not used.
|
|
*
|
|
* - The third channel may be used to provide a 16-bit clockevent
|
|
- * source, used in either periodic or oneshot mode. This runs
|
|
- * at 32 KiHZ, and can handle delays of up to two seconds.
|
|
+ * source, used in either periodic or oneshot mode.
|
|
*
|
|
* A boot clocksource and clockevent source are also currently needed,
|
|
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
|
|
@@ -126,6 +125,8 @@ static struct clocksource clksrc = {
|
|
struct tc_clkevt_device {
|
|
struct clock_event_device clkevt;
|
|
struct clk *clk;
|
|
+ bool clk_enabled;
|
|
+ u32 freq;
|
|
void __iomem *regs;
|
|
};
|
|
|
|
@@ -134,15 +135,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
|
|
return container_of(clkevt, struct tc_clkevt_device, clkevt);
|
|
}
|
|
|
|
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
|
|
- * because using one of the divided clocks would usually mean the
|
|
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
|
|
- *
|
|
- * A divided clock could be good for high resolution timers, since
|
|
- * 30.5 usec resolution can seem "low".
|
|
- */
|
|
static u32 timer_clock;
|
|
|
|
+static void tc_clk_disable(struct clock_event_device *d)
|
|
+{
|
|
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
|
|
+
|
|
+ clk_disable(tcd->clk);
|
|
+ tcd->clk_enabled = false;
|
|
+}
|
|
+
|
|
+static void tc_clk_enable(struct clock_event_device *d)
|
|
+{
|
|
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
|
|
+
|
|
+ if (tcd->clk_enabled)
|
|
+ return;
|
|
+ clk_enable(tcd->clk);
|
|
+ tcd->clk_enabled = true;
|
|
+}
|
|
+
|
|
static int tc_shutdown(struct clock_event_device *d)
|
|
{
|
|
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
|
|
@@ -150,8 +162,14 @@ static int tc_shutdown(struct clock_event_device *d)
|
|
|
|
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
|
|
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tc_shutdown_clk_off(struct clock_event_device *d)
|
|
+{
|
|
+ tc_shutdown(d);
|
|
if (!clockevent_state_detached(d))
|
|
- clk_disable(tcd->clk);
|
|
+ tc_clk_disable(d);
|
|
|
|
return 0;
|
|
}
|
|
@@ -164,9 +182,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
|
|
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
|
|
tc_shutdown(d);
|
|
|
|
- clk_enable(tcd->clk);
|
|
+ tc_clk_enable(d);
|
|
|
|
- /* slow clock, count up to RC, then irq and stop */
|
|
+ /* count up to RC, then irq and stop */
|
|
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
|
|
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
|
|
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
|
|
@@ -186,12 +204,12 @@ static int tc_set_periodic(struct clock_event_device *d)
|
|
/* By not making the gentime core emulate periodic mode on top
|
|
* of oneshot, we get lower overhead and improved accuracy.
|
|
*/
|
|
- clk_enable(tcd->clk);
|
|
+ tc_clk_enable(d);
|
|
|
|
- /* slow clock, count up to RC, then irq and restart */
|
|
+ /* count up to RC, then irq and restart */
|
|
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
|
|
regs + ATMEL_TC_REG(2, CMR));
|
|
- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
|
|
+ writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
|
|
|
|
/* Enable clock and interrupts on RC compare */
|
|
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
|
|
@@ -218,9 +236,13 @@ static struct tc_clkevt_device clkevt = {
|
|
.features = CLOCK_EVT_FEAT_PERIODIC |
|
|
CLOCK_EVT_FEAT_ONESHOT,
|
|
/* Should be lower than at91rm9200's system timer */
|
|
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
.rating = 125,
|
|
+#else
|
|
+ .rating = 200,
|
|
+#endif
|
|
.set_next_event = tc_next_event,
|
|
- .set_state_shutdown = tc_shutdown,
|
|
+ .set_state_shutdown = tc_shutdown_clk_off,
|
|
.set_state_periodic = tc_set_periodic,
|
|
.set_state_oneshot = tc_set_oneshot,
|
|
},
|
|
@@ -240,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
|
|
return IRQ_NONE;
|
|
}
|
|
|
|
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
|
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
|
|
{
|
|
+ unsigned divisor = atmel_tc_divisors[divisor_idx];
|
|
int ret;
|
|
struct clk *t2_clk = tc->clk[2];
|
|
int irq = tc->irq[2];
|
|
@@ -262,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
|
clkevt.regs = tc->regs;
|
|
clkevt.clk = t2_clk;
|
|
|
|
- timer_clock = clk32k_divisor_idx;
|
|
+ timer_clock = divisor_idx;
|
|
+ if (!divisor)
|
|
+ clkevt.freq = 32768;
|
|
+ else
|
|
+ clkevt.freq = clk_get_rate(t2_clk) / divisor;
|
|
|
|
clkevt.clkevt.cpumask = cpumask_of(0);
|
|
|
|
@@ -273,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
|
return ret;
|
|
}
|
|
|
|
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
|
|
+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
|
|
|
|
return ret;
|
|
}
|
|
@@ -410,7 +437,11 @@ static int __init tcb_clksrc_init(void)
|
|
goto err_disable_t1;
|
|
|
|
/* channel 2: periodic and oneshot timer support */
|
|
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
ret = setup_clkevents(tc, clk32k_divisor_idx);
|
|
+#else
|
|
+ ret = setup_clkevents(tc, best_divisor_idx);
|
|
+#endif
|
|
if (ret)
|
|
goto err_unregister_clksrc;
|
|
|
|
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
|
|
new file mode 100644
|
|
index 000000000..63ce3b693
|
|
--- /dev/null
|
|
+++ b/drivers/clocksource/timer-atmel-tcb.c
|
|
@@ -0,0 +1,617 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clockchips.h>
|
|
+#include <linux/clocksource.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/mfd/syscon.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/regmap.h>
|
|
+#include <linux/sched_clock.h>
|
|
+#include <soc/at91/atmel_tcb.h>
|
|
+
|
|
+struct atmel_tcb_clksrc {
|
|
+ struct clocksource clksrc;
|
|
+ struct clock_event_device clkevt;
|
|
+ struct regmap *regmap;
|
|
+ void __iomem *base;
|
|
+ struct clk *clk[2];
|
|
+ char name[20];
|
|
+ int channels[2];
|
|
+ int bits;
|
|
+ int irq;
|
|
+ struct {
|
|
+ u32 cmr;
|
|
+ u32 imr;
|
|
+ u32 rc;
|
|
+ bool clken;
|
|
+ } cache[2];
|
|
+ u32 bmr_cache;
|
|
+ bool registered;
|
|
+ bool clk_enabled;
|
|
+};
|
|
+
|
|
+static struct atmel_tcb_clksrc tc, tce;
|
|
+
|
|
+static struct clk *tcb_clk_get(struct device_node *node, int channel)
|
|
+{
|
|
+ struct clk *clk;
|
|
+ char clk_name[] = "t0_clk";
|
|
+
|
|
+ clk_name[1] += channel;
|
|
+ clk = of_clk_get_by_name(node->parent, clk_name);
|
|
+ if (!IS_ERR(clk))
|
|
+ return clk;
|
|
+
|
|
+ return of_clk_get_by_name(node->parent, "t0_clk");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Clockevent device using its own channel
|
|
+ */
|
|
+
|
|
+static void tc_clkevt2_clk_disable(struct clock_event_device *d)
|
|
+{
|
|
+ clk_disable(tce.clk[0]);
|
|
+ tce.clk_enabled = false;
|
|
+}
|
|
+
|
|
+static void tc_clkevt2_clk_enable(struct clock_event_device *d)
|
|
+{
|
|
+ if (tce.clk_enabled)
|
|
+ return;
|
|
+ clk_enable(tce.clk[0]);
|
|
+ tce.clk_enabled = true;
|
|
+}
|
|
+
|
|
+static int tc_clkevt2_stop(struct clock_event_device *d)
|
|
+{
|
|
+ writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0]));
|
|
+ writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0]));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tc_clkevt2_shutdown(struct clock_event_device *d)
|
|
+{
|
|
+ tc_clkevt2_stop(d);
|
|
+ if (!clockevent_state_detached(d))
|
|
+ tc_clkevt2_clk_disable(d);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
|
|
+ * because using one of the divided clocks would usually mean the
|
|
+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
|
|
+ *
|
|
+ * A divided clock could be good for high resolution timers, since
|
|
+ * 30.5 usec resolution can seem "low".
|
|
+ */
|
|
+static int tc_clkevt2_set_oneshot(struct clock_event_device *d)
|
|
+{
|
|
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
|
|
+ tc_clkevt2_stop(d);
|
|
+
|
|
+ tc_clkevt2_clk_enable(d);
|
|
+
|
|
+ /* slow clock, count up to RC, then irq and stop */
|
|
+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP |
|
|
+ ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC,
|
|
+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
|
|
+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tc_clkevt2_set_periodic(struct clock_event_device *d)
|
|
+{
|
|
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
|
|
+ tc_clkevt2_stop(d);
|
|
+
|
|
+ /* By not making the gentime core emulate periodic mode on top
|
|
+ * of oneshot, we get lower overhead and improved accuracy.
|
|
+ */
|
|
+ tc_clkevt2_clk_enable(d);
|
|
+
|
|
+ /* slow clock, count up to RC, then irq and restart */
|
|
+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE |
|
|
+ ATMEL_TC_CMR_WAVESEL_UPRC,
|
|
+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
|
|
+ writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0]));
|
|
+
|
|
+ /* Enable clock and interrupts on RC compare */
|
|
+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
|
|
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
|
|
+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tc_clkevt2_next_event(unsigned long delta,
|
|
+ struct clock_event_device *d)
|
|
+{
|
|
+ writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0]));
|
|
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
|
|
+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static irqreturn_t tc_clkevt2_irq(int irq, void *handle)
|
|
+{
|
|
+ unsigned int sr;
|
|
+
|
|
+ sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0]));
|
|
+ if (sr & ATMEL_TC_CPCS) {
|
|
+ tce.clkevt.event_handler(&tce.clkevt);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ return IRQ_NONE;
|
|
+}
|
|
+
|
|
+static void tc_clkevt2_suspend(struct clock_event_device *d)
|
|
+{
|
|
+ tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0]));
|
|
+ tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0]));
|
|
+ tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0]));
|
|
+ tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) &
|
|
+ ATMEL_TC_CLKSTA);
|
|
+}
|
|
+
|
|
+static void tc_clkevt2_resume(struct clock_event_device *d)
|
|
+{
|
|
+ /* Restore registers for the channel, RA and RB are not used */
|
|
+ writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0]));
|
|
+ writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0]));
|
|
+ writel(0, tc.base + ATMEL_TC_RA(tce.channels[0]));
|
|
+ writel(0, tc.base + ATMEL_TC_RB(tce.channels[0]));
|
|
+ /* Disable all the interrupts */
|
|
+ writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0]));
|
|
+ /* Reenable interrupts that were enabled before suspending */
|
|
+ writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0]));
|
|
+
|
|
+ /* Start the clock if it was used */
|
|
+ if (tce.cache[0].clken)
|
|
+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
|
|
+ tc.base + ATMEL_TC_CCR(tce.channels[0]));
|
|
+}
|
|
+
|
|
+static int __init tc_clkevt_register(struct device_node *node,
|
|
+ struct regmap *regmap, void __iomem *base,
|
|
+ int channel, int irq, int bits)
|
|
+{
|
|
+ int ret;
|
|
+ struct clk *slow_clk;
|
|
+
|
|
+ tce.regmap = regmap;
|
|
+ tce.base = base;
|
|
+ tce.channels[0] = channel;
|
|
+ tce.irq = irq;
|
|
+
|
|
+ slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
|
|
+ if (IS_ERR(slow_clk))
|
|
+ return PTR_ERR(slow_clk);
|
|
+
|
|
+ ret = clk_prepare_enable(slow_clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ tce.clk[0] = tcb_clk_get(node, tce.channels[0]);
|
|
+ if (IS_ERR(tce.clk[0])) {
|
|
+ ret = PTR_ERR(tce.clk[0]);
|
|
+ goto err_slow;
|
|
+ }
|
|
+
|
|
+ snprintf(tce.name, sizeof(tce.name), "%s:%d",
|
|
+ kbasename(node->parent->full_name), channel);
|
|
+ tce.clkevt.cpumask = cpumask_of(0);
|
|
+ tce.clkevt.name = tce.name;
|
|
+ tce.clkevt.set_next_event = tc_clkevt2_next_event,
|
|
+ tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown,
|
|
+ tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic,
|
|
+ tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot,
|
|
+ tce.clkevt.suspend = tc_clkevt2_suspend,
|
|
+ tce.clkevt.resume = tc_clkevt2_resume,
|
|
+ tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
|
|
+ tce.clkevt.rating = 140;
|
|
+
|
|
+ /* try to enable clk to avoid future errors in mode change */
|
|
+ ret = clk_prepare_enable(tce.clk[0]);
|
|
+ if (ret)
|
|
+ goto err_slow;
|
|
+ clk_disable(tce.clk[0]);
|
|
+
|
|
+ clockevents_config_and_register(&tce.clkevt, 32768, 1,
|
|
+ CLOCKSOURCE_MASK(bits));
|
|
+
|
|
+ ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED,
|
|
+ tce.clkevt.name, &tce);
|
|
+ if (ret)
|
|
+ goto err_clk;
|
|
+
|
|
+ tce.registered = true;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_clk:
|
|
+ clk_unprepare(tce.clk[0]);
|
|
+err_slow:
|
|
+ clk_disable_unprepare(slow_clk);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Clocksource and clockevent using the same channel(s)
|
|
+ */
|
|
+static u64 tc_get_cycles(struct clocksource *cs)
|
|
+{
|
|
+ u32 lower, upper;
|
|
+
|
|
+ do {
|
|
+ upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]));
|
|
+ lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
|
|
+ } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])));
|
|
+
|
|
+ return (upper << 16) | lower;
|
|
+}
|
|
+
|
|
+static u64 tc_get_cycles32(struct clocksource *cs)
|
|
+{
|
|
+ return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
|
|
+}
|
|
+
|
|
+static u64 notrace tc_sched_clock_read(void)
|
|
+{
|
|
+ return tc_get_cycles(&tc.clksrc);
|
|
+}
|
|
+
|
|
+static u64 notrace tc_sched_clock_read32(void)
|
|
+{
|
|
+ return tc_get_cycles32(&tc.clksrc);
|
|
+}
|
|
+
|
|
+static int tcb_clkevt_next_event(unsigned long delta,
|
|
+ struct clock_event_device *d)
|
|
+{
|
|
+ u32 old, next, cur;
|
|
+
|
|
+ old = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
|
|
+ next = old + delta;
|
|
+ writel(next, tc.base + ATMEL_TC_RC(tc.channels[0]));
|
|
+ cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
|
|
+
|
|
+ /* check whether the delta elapsed while setting the register */
|
|
+ if ((next < old && cur < old && cur > next) ||
|
|
+ (next > old && (cur < old || cur > next))) {
|
|
+ /*
|
|
+ * Clear the CPCS bit in the status register to avoid
|
|
+ * generating a spurious interrupt next time a valid
|
|
+ * timer event is configured.
|
|
+ */
|
|
+ old = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
|
|
+ return -ETIME;
|
|
+ }
|
|
+
|
|
+ writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0]));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static irqreturn_t tc_clkevt_irq(int irq, void *handle)
|
|
+{
|
|
+ unsigned int sr;
|
|
+
|
|
+ sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
|
|
+ if (sr & ATMEL_TC_CPCS) {
|
|
+ tc.clkevt.event_handler(&tc.clkevt);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ return IRQ_NONE;
|
|
+}
|
|
+
|
|
+static int tcb_clkevt_oneshot(struct clock_event_device *dev)
|
|
+{
|
|
+ if (clockevent_state_oneshot(dev))
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * Because both clockevent devices may share the same IRQ, we don't want
|
|
+ * the less likely one to stay requested
|
|
+ */
|
|
+ return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED,
|
|
+ tc.name, &tc);
|
|
+}
|
|
+
|
|
+static int tcb_clkevt_shutdown(struct clock_event_device *dev)
|
|
+{
|
|
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0]));
|
|
+ if (tc.bits == 16)
|
|
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1]));
|
|
+
|
|
+ if (!clockevent_state_detached(dev))
|
|
+ free_irq(tc.irq, &tc);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc,
|
|
+ int mck_divisor_idx)
|
|
+{
|
|
+ /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */
|
|
+ writel(mck_divisor_idx /* likely divide-by-8 */
|
|
+ | ATMEL_TC_CMR_WAVE
|
|
+ | ATMEL_TC_CMR_WAVESEL_UP /* free-run */
|
|
+ | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */
|
|
+ | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */
|
|
+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
|
|
+ writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0]));
|
|
+ writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0]));
|
|
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
|
|
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
|
|
+
|
|
+ /* second channel: waveform mode, input TIOA */
|
|
+ writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */
|
|
+ | ATMEL_TC_CMR_WAVE
|
|
+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
|
|
+ tc->base + ATMEL_TC_CMR(tc->channels[1]));
|
|
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */
|
|
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1]));
|
|
+
|
|
+ /* chain both channel, we assume the previous channel */
|
|
+ regmap_write(tc->regmap, ATMEL_TC_BMR,
|
|
+ ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1]));
|
|
+ /* then reset all the timers */
|
|
+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
|
|
+}
|
|
+
|
|
+static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc,
|
|
+ int mck_divisor_idx)
|
|
+{
|
|
+ /* channel 0: waveform mode, input mclk/8 */
|
|
+ writel(mck_divisor_idx /* likely divide-by-8 */
|
|
+ | ATMEL_TC_CMR_WAVE
|
|
+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
|
|
+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
|
|
+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
|
|
+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
|
|
+
|
|
+ /* then reset all the timers */
|
|
+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
|
|
+}
|
|
+
|
|
+static void tc_clksrc_suspend(struct clocksource *cs)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
|
|
+ tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i]));
|
|
+ tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i]));
|
|
+ tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i]));
|
|
+ tc.cache[i].clken = !!(readl(tc.base +
|
|
+ ATMEL_TC_SR(tc.channels[i])) &
|
|
+ ATMEL_TC_CLKSTA);
|
|
+ }
|
|
+
|
|
+ if (tc.bits == 16)
|
|
+ regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache);
|
|
+}
|
|
+
|
|
+static void tc_clksrc_resume(struct clocksource *cs)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
|
|
+ /* Restore registers for the channel, RA and RB are not used */
|
|
+ writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i]));
|
|
+ writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i]));
|
|
+ writel(0, tc.base + ATMEL_TC_RA(tc.channels[i]));
|
|
+ writel(0, tc.base + ATMEL_TC_RB(tc.channels[i]));
|
|
+ /* Disable all the interrupts */
|
|
+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i]));
|
|
+ /* Reenable interrupts that were enabled before suspending */
|
|
+ writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i]));
|
|
+
|
|
+ /* Start the clock if it was used */
|
|
+ if (tc.cache[i].clken)
|
|
+ writel(ATMEL_TC_CCR_CLKEN, tc.base +
|
|
+ ATMEL_TC_CCR(tc.channels[i]));
|
|
+ }
|
|
+
|
|
+ /* in case of dual channel, chain channels */
|
|
+ if (tc.bits == 16)
|
|
+ regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache);
|
|
+ /* Finally, trigger all the channels*/
|
|
+ regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
|
|
+}
|
|
+
|
|
+static int __init tcb_clksrc_register(struct device_node *node,
|
|
+ struct regmap *regmap, void __iomem *base,
|
|
+ int channel, int channel1, int irq,
|
|
+ int bits)
|
|
+{
|
|
+ u32 rate, divided_rate = 0;
|
|
+ int best_divisor_idx = -1;
|
|
+ int i, err = -1;
|
|
+ u64 (*tc_sched_clock)(void);
|
|
+
|
|
+ tc.regmap = regmap;
|
|
+ tc.base = base;
|
|
+ tc.channels[0] = channel;
|
|
+ tc.channels[1] = channel1;
|
|
+ tc.irq = irq;
|
|
+ tc.bits = bits;
|
|
+
|
|
+ tc.clk[0] = tcb_clk_get(node, tc.channels[0]);
|
|
+ if (IS_ERR(tc.clk[0]))
|
|
+ return PTR_ERR(tc.clk[0]);
|
|
+ err = clk_prepare_enable(tc.clk[0]);
|
|
+ if (err) {
|
|
+ pr_debug("can't enable T0 clk\n");
|
|
+ goto err_clk;
|
|
+ }
|
|
+
|
|
+ /* How fast will we be counting? Pick something over 5 MHz. */
|
|
+ rate = (u32)clk_get_rate(tc.clk[0]);
|
|
+ for (i = 0; i < 5; i++) {
|
|
+ unsigned int divisor = atmel_tc_divisors[i];
|
|
+ unsigned int tmp;
|
|
+
|
|
+ if (!divisor)
|
|
+ continue;
|
|
+
|
|
+ tmp = rate / divisor;
|
|
+ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
|
|
+ if (best_divisor_idx > 0) {
|
|
+ if (tmp < 5 * 1000 * 1000)
|
|
+ continue;
|
|
+ }
|
|
+ divided_rate = tmp;
|
|
+ best_divisor_idx = i;
|
|
+ }
|
|
+
|
|
+ if (tc.bits == 32) {
|
|
+ tc.clksrc.read = tc_get_cycles32;
|
|
+ tcb_setup_single_chan(&tc, best_divisor_idx);
|
|
+ tc_sched_clock = tc_sched_clock_read32;
|
|
+ snprintf(tc.name, sizeof(tc.name), "%s:%d",
|
|
+ kbasename(node->parent->full_name), tc.channels[0]);
|
|
+ } else {
|
|
+ tc.clk[1] = tcb_clk_get(node, tc.channels[1]);
|
|
+ if (IS_ERR(tc.clk[1]))
|
|
+ goto err_disable_t0;
|
|
+
|
|
+ err = clk_prepare_enable(tc.clk[1]);
|
|
+ if (err) {
|
|
+ pr_debug("can't enable T1 clk\n");
|
|
+ goto err_clk1;
|
|
+ }
|
|
+ tc.clksrc.read = tc_get_cycles,
|
|
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
|
|
+ tc_sched_clock = tc_sched_clock_read;
|
|
+ snprintf(tc.name, sizeof(tc.name), "%s:%d,%d",
|
|
+ kbasename(node->parent->full_name), tc.channels[0],
|
|
+ tc.channels[1]);
|
|
+ }
|
|
+
|
|
+ pr_debug("%s at %d.%03d MHz\n", tc.name,
|
|
+ divided_rate / 1000000,
|
|
+ ((divided_rate + 500000) % 1000000) / 1000);
|
|
+
|
|
+ tc.clksrc.name = tc.name;
|
|
+ tc.clksrc.suspend = tc_clksrc_suspend;
|
|
+ tc.clksrc.resume = tc_clksrc_resume;
|
|
+ tc.clksrc.rating = 200;
|
|
+ tc.clksrc.mask = CLOCKSOURCE_MASK(32);
|
|
+ tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
|
|
+
|
|
+ err = clocksource_register_hz(&tc.clksrc, divided_rate);
|
|
+ if (err)
|
|
+ goto err_disable_t1;
|
|
+
|
|
+ sched_clock_register(tc_sched_clock, 32, divided_rate);
|
|
+
|
|
+ tc.registered = true;
|
|
+
|
|
+ /* Set up and register clockevents */
|
|
+ tc.clkevt.name = tc.name;
|
|
+ tc.clkevt.cpumask = cpumask_of(0);
|
|
+ tc.clkevt.set_next_event = tcb_clkevt_next_event;
|
|
+ tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot;
|
|
+ tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown;
|
|
+ tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
|
|
+ tc.clkevt.rating = 125;
|
|
+
|
|
+ clockevents_config_and_register(&tc.clkevt, divided_rate, 1,
|
|
+ BIT(tc.bits) - 1);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_disable_t1:
|
|
+ if (tc.bits == 16)
|
|
+ clk_disable_unprepare(tc.clk[1]);
|
|
+
|
|
+err_clk1:
|
|
+ if (tc.bits == 16)
|
|
+ clk_put(tc.clk[1]);
|
|
+
|
|
+err_disable_t0:
|
|
+ clk_disable_unprepare(tc.clk[0]);
|
|
+
|
|
+err_clk:
|
|
+ clk_put(tc.clk[0]);
|
|
+
|
|
+ pr_err("%s: unable to register clocksource/clockevent\n",
|
|
+ tc.clksrc.name);
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int __init tcb_clksrc_init(struct device_node *node)
|
|
+{
|
|
+ const struct of_device_id *match;
|
|
+ struct regmap *regmap;
|
|
+ void __iomem *tcb_base;
|
|
+ u32 channel;
|
|
+ int irq, err, chan1 = -1;
|
|
+ unsigned bits;
|
|
+
|
|
+ if (tc.registered && tce.registered)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /*
|
|
+ * The regmap has to be used to access registers that are shared
|
|
+ * between channels on the same TCB but we keep direct IO access for
|
|
+ * the counters to avoid the impact on performance
|
|
+ */
|
|
+ regmap = syscon_node_to_regmap(node->parent);
|
|
+ if (IS_ERR(regmap))
|
|
+ return PTR_ERR(regmap);
|
|
+
|
|
+ tcb_base = of_iomap(node->parent, 0);
|
|
+ if (!tcb_base) {
|
|
+ pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ match = of_match_node(atmel_tcb_dt_ids, node->parent);
|
|
+ bits = (uintptr_t)match->data;
|
|
+
|
|
+ err = of_property_read_u32_index(node, "reg", 0, &channel);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ irq = of_irq_get(node->parent, channel);
|
|
+ if (irq < 0) {
|
|
+ irq = of_irq_get(node->parent, 0);
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
+ }
|
|
+
|
|
+ if (tc.registered)
|
|
+ return tc_clkevt_register(node, regmap, tcb_base, channel, irq,
|
|
+ bits);
|
|
+
|
|
+ if (bits == 16) {
|
|
+ of_property_read_u32_index(node, "reg", 1, &chan1);
|
|
+ if (chan1 == -1) {
|
|
+ if (tce.registered) {
|
|
+ pr_err("%s: clocksource needs two channels\n",
|
|
+ node->parent->full_name);
|
|
+ return -EINVAL;
|
|
+ } else {
|
|
+ return tc_clkevt_register(node, regmap,
|
|
+ tcb_base, channel,
|
|
+ irq, bits);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq,
|
|
+ bits);
|
|
+}
|
|
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
|
|
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
|
|
index ad48fd52c..c5264b3ee 100644
|
|
--- a/drivers/connector/cn_proc.c
|
|
+++ b/drivers/connector/cn_proc.c
|
|
@@ -32,6 +32,7 @@
|
|
#include <linux/pid_namespace.h>
|
|
|
|
#include <linux/cn_proc.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
/*
|
|
* Size of a cn_msg followed by a proc_event structure. Since the
|
|
@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
|
|
|
|
/* proc_event_counts is used as the sequence number of the netlink message */
|
|
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
|
|
+static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock);
|
|
|
|
static inline void send_msg(struct cn_msg *msg)
|
|
{
|
|
- preempt_disable();
|
|
+ local_lock(send_msg_lock);
|
|
|
|
msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
|
|
((struct proc_event *)msg->data)->cpu = smp_processor_id();
|
|
@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
|
|
*/
|
|
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
|
|
|
|
- preempt_enable();
|
|
+ local_unlock(send_msg_lock);
|
|
}
|
|
|
|
void proc_fork_connector(struct task_struct *task)
|
|
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
|
|
index 35f71825b..bb4a6160d 100644
|
|
--- a/drivers/cpufreq/Kconfig.x86
|
|
+++ b/drivers/cpufreq/Kconfig.x86
|
|
@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI
|
|
|
|
config X86_POWERNOW_K8
|
|
tristate "AMD Opteron/Athlon64 PowerNow!"
|
|
- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
|
|
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
|
|
help
|
|
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
|
|
Support for K10 and newer processors is now in acpi-cpufreq.
|
|
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
|
|
index 67f7f8c42..b84e6c8b1 100644
|
|
--- a/drivers/crypto/caam/qi.c
|
|
+++ b/drivers/crypto/caam/qi.c
|
|
@@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested);
|
|
static u64 times_congested;
|
|
#endif
|
|
|
|
-/*
|
|
- * CPU from where the module initialised. This is required because QMan driver
|
|
- * requires CGRs to be removed from same CPU from where they were originally
|
|
- * allocated.
|
|
- */
|
|
-static int mod_init_cpu;
|
|
-
|
|
/*
|
|
* This is a a cache of buffers, from which the users of CAAM QI driver
|
|
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
|
|
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
|
|
}
|
|
EXPORT_SYMBOL(caam_drv_ctx_rel);
|
|
|
|
-int caam_qi_shutdown(struct device *qidev)
|
|
+void caam_qi_shutdown(struct device *qidev)
|
|
{
|
|
- int i, ret;
|
|
+ int i;
|
|
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
|
|
const cpumask_t *cpus = qman_affine_cpus();
|
|
- struct cpumask old_cpumask = current->cpus_allowed;
|
|
|
|
for_each_cpu(i, cpus) {
|
|
struct napi_struct *irqtask;
|
|
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
|
|
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
|
|
}
|
|
|
|
- /*
|
|
- * QMan driver requires CGRs to be deleted from same CPU from where they
|
|
- * were instantiated. Hence we get the module removal execute from the
|
|
- * same CPU from where it was originally inserted.
|
|
- */
|
|
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
|
|
-
|
|
- ret = qman_delete_cgr(&priv->cgr);
|
|
- if (ret)
|
|
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
|
|
- else
|
|
- qman_release_cgrid(priv->cgr.cgrid);
|
|
+ qman_delete_cgr_safe(&priv->cgr);
|
|
+ qman_release_cgrid(priv->cgr.cgrid);
|
|
|
|
kmem_cache_destroy(qi_cache);
|
|
|
|
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
|
|
- set_cpus_allowed_ptr(current, &old_cpumask);
|
|
-
|
|
platform_device_unregister(priv->qi_pdev);
|
|
- return ret;
|
|
}
|
|
|
|
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
|
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
|
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
|
struct caam_drv_private *ctrlpriv;
|
|
const cpumask_t *cpus = qman_affine_cpus();
|
|
- struct cpumask old_cpumask = current->cpus_allowed;
|
|
static struct platform_device_info qi_pdev_info = {
|
|
.name = "caam_qi",
|
|
.id = PLATFORM_DEVID_NONE
|
|
};
|
|
|
|
- /*
|
|
- * QMAN requires CGRs to be removed from same CPU+portal from where it
|
|
- * was originally allocated. Hence we need to note down the
|
|
- * initialisation CPU and use the same CPU for module exit.
|
|
- * We select the first CPU to from the list of portal owning CPUs.
|
|
- * Then we pin module init to this CPU.
|
|
- */
|
|
- mod_init_cpu = cpumask_first(cpus);
|
|
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
|
|
-
|
|
qi_pdev_info.parent = ctrldev;
|
|
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
|
|
qi_pdev = platform_device_register_full(&qi_pdev_info);
|
|
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- /* Done with the CGRs; restore the cpus allowed mask */
|
|
- set_cpus_allowed_ptr(current, &old_cpumask);
|
|
#ifdef CONFIG_DEBUG_FS
|
|
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
|
|
×_congested, &caam_fops_u64_ro);
|
|
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
|
|
index 357b69f57..b6c8acc30 100644
|
|
--- a/drivers/crypto/caam/qi.h
|
|
+++ b/drivers/crypto/caam/qi.h
|
|
@@ -174,7 +174,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
|
|
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
|
|
|
|
int caam_qi_init(struct platform_device *pdev);
|
|
-int caam_qi_shutdown(struct device *dev);
|
|
+void caam_qi_shutdown(struct device *dev);
|
|
|
|
/**
|
|
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
|
|
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
|
|
index 69842145c..4c3ef46e7 100644
|
|
--- a/drivers/dma-buf/dma-buf.c
|
|
+++ b/drivers/dma-buf/dma-buf.c
|
|
@@ -179,7 +179,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
|
return 0;
|
|
|
|
retry:
|
|
- seq = read_seqcount_begin(&resv->seq);
|
|
+ seq = read_seqbegin(&resv->seq);
|
|
rcu_read_lock();
|
|
|
|
fobj = rcu_dereference(resv->fence);
|
|
@@ -188,7 +188,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
|
else
|
|
shared_count = 0;
|
|
fence_excl = rcu_dereference(resv->fence_excl);
|
|
- if (read_seqcount_retry(&resv->seq, seq)) {
|
|
+ if (read_seqretry(&resv->seq, seq)) {
|
|
rcu_read_unlock();
|
|
goto retry;
|
|
}
|
|
@@ -1046,12 +1046,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
|
|
|
robj = buf_obj->resv;
|
|
while (true) {
|
|
- seq = read_seqcount_begin(&robj->seq);
|
|
+ seq = read_seqbegin(&robj->seq);
|
|
rcu_read_lock();
|
|
fobj = rcu_dereference(robj->fence);
|
|
shared_count = fobj ? fobj->shared_count : 0;
|
|
fence = rcu_dereference(robj->fence_excl);
|
|
- if (!read_seqcount_retry(&robj->seq, seq))
|
|
+ if (!read_seqretry(&robj->seq, seq))
|
|
break;
|
|
rcu_read_unlock();
|
|
}
|
|
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
|
|
index 49ab09468..f11d58492 100644
|
|
--- a/drivers/dma-buf/reservation.c
|
|
+++ b/drivers/dma-buf/reservation.c
|
|
@@ -109,8 +109,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
|
|
|
|
dma_fence_get(fence);
|
|
|
|
- preempt_disable();
|
|
- write_seqcount_begin(&obj->seq);
|
|
+ write_seqlock(&obj->seq);
|
|
|
|
for (i = 0; i < fobj->shared_count; ++i) {
|
|
struct dma_fence *old_fence;
|
|
@@ -121,8 +120,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
|
|
if (old_fence->context == fence->context) {
|
|
/* memory barrier is added by write_seqcount_begin */
|
|
RCU_INIT_POINTER(fobj->shared[i], fence);
|
|
- write_seqcount_end(&obj->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&obj->seq);
|
|
|
|
dma_fence_put(old_fence);
|
|
return;
|
|
@@ -146,8 +144,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
|
|
fobj->shared_count++;
|
|
}
|
|
|
|
- write_seqcount_end(&obj->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&obj->seq);
|
|
|
|
dma_fence_put(signaled);
|
|
}
|
|
@@ -191,15 +188,13 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
|
|
fobj->shared_count++;
|
|
|
|
done:
|
|
- preempt_disable();
|
|
- write_seqcount_begin(&obj->seq);
|
|
+ write_seqlock(&obj->seq);
|
|
/*
|
|
* RCU_INIT_POINTER can be used here,
|
|
* seqcount provides the necessary barriers
|
|
*/
|
|
RCU_INIT_POINTER(obj->fence, fobj);
|
|
- write_seqcount_end(&obj->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&obj->seq);
|
|
|
|
if (!old)
|
|
return;
|
|
@@ -259,14 +254,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
|
|
if (fence)
|
|
dma_fence_get(fence);
|
|
|
|
- preempt_disable();
|
|
- write_seqcount_begin(&obj->seq);
|
|
- /* write_seqcount_begin provides the necessary memory barrier */
|
|
+ write_seqlock(&obj->seq);
|
|
RCU_INIT_POINTER(obj->fence_excl, fence);
|
|
if (old)
|
|
old->shared_count = 0;
|
|
- write_seqcount_end(&obj->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&obj->seq);
|
|
|
|
/* inplace update, no shared fences */
|
|
while (i--)
|
|
@@ -349,13 +341,10 @@ int reservation_object_copy_fences(struct reservation_object *dst,
|
|
src_list = reservation_object_get_list(dst);
|
|
old = reservation_object_get_excl(dst);
|
|
|
|
- preempt_disable();
|
|
- write_seqcount_begin(&dst->seq);
|
|
- /* write_seqcount_begin provides the necessary memory barrier */
|
|
+ write_seqlock(&dst->seq);
|
|
RCU_INIT_POINTER(dst->fence_excl, new);
|
|
RCU_INIT_POINTER(dst->fence, dst_list);
|
|
- write_seqcount_end(&dst->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&dst->seq);
|
|
|
|
if (src_list)
|
|
kfree_rcu(src_list, rcu);
|
|
@@ -396,7 +385,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
|
shared_count = i = 0;
|
|
|
|
rcu_read_lock();
|
|
- seq = read_seqcount_begin(&obj->seq);
|
|
+ seq = read_seqbegin(&obj->seq);
|
|
|
|
fence_excl = rcu_dereference(obj->fence_excl);
|
|
if (fence_excl && !dma_fence_get_rcu(fence_excl))
|
|
@@ -445,7 +434,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
|
}
|
|
}
|
|
|
|
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
|
|
+ if (i != shared_count || read_seqretry(&obj->seq, seq)) {
|
|
while (i--)
|
|
dma_fence_put(shared[i]);
|
|
dma_fence_put(fence_excl);
|
|
@@ -494,7 +483,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
|
|
|
retry:
|
|
shared_count = 0;
|
|
- seq = read_seqcount_begin(&obj->seq);
|
|
+ seq = read_seqbegin(&obj->seq);
|
|
rcu_read_lock();
|
|
i = -1;
|
|
|
|
@@ -541,7 +530,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
|
|
|
rcu_read_unlock();
|
|
if (fence) {
|
|
- if (read_seqcount_retry(&obj->seq, seq)) {
|
|
+ if (read_seqretry(&obj->seq, seq)) {
|
|
dma_fence_put(fence);
|
|
goto retry;
|
|
}
|
|
@@ -597,7 +586,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
|
retry:
|
|
ret = true;
|
|
shared_count = 0;
|
|
- seq = read_seqcount_begin(&obj->seq);
|
|
+ seq = read_seqbegin(&obj->seq);
|
|
|
|
if (test_all) {
|
|
unsigned i;
|
|
@@ -618,7 +607,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
|
break;
|
|
}
|
|
|
|
- if (read_seqcount_retry(&obj->seq, seq))
|
|
+ if (read_seqretry(&obj->seq, seq))
|
|
goto retry;
|
|
}
|
|
|
|
@@ -631,7 +620,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
|
if (ret < 0)
|
|
goto retry;
|
|
|
|
- if (read_seqcount_retry(&obj->seq, seq))
|
|
+ if (read_seqretry(&obj->seq, seq))
|
|
goto retry;
|
|
}
|
|
}
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index 184ad34b9..661d980dc 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -89,7 +89,7 @@ struct mm_struct efi_mm = {
|
|
|
|
struct workqueue_struct *efi_rts_wq;
|
|
|
|
-static bool disable_runtime;
|
|
+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE);
|
|
static int __init setup_noefi(char *arg)
|
|
{
|
|
disable_runtime = true;
|
|
@@ -115,6 +115,9 @@ static int __init parse_efi_cmdline(char *str)
|
|
if (parse_option_str(str, "noruntime"))
|
|
disable_runtime = true;
|
|
|
|
+ if (parse_option_str(str, "runtime"))
|
|
+ disable_runtime = false;
|
|
+
|
|
return 0;
|
|
}
|
|
early_param("efi", parse_efi_cmdline);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
|
|
index f92597c29..10c675850 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
|
|
@@ -261,11 +261,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
|
}
|
|
|
|
/* Install the new fence list, seqcount provides the barriers */
|
|
- preempt_disable();
|
|
- write_seqcount_begin(&resv->seq);
|
|
+ write_seqlock(&resv->seq);
|
|
RCU_INIT_POINTER(resv->fence, new);
|
|
- write_seqcount_end(&resv->seq);
|
|
- preempt_enable();
|
|
+ write_sequnlock(&resv->seq);
|
|
|
|
/* Drop the references to the removed fences or move them to ef_list */
|
|
for (i = j, k = 0; i < old->shared_count; ++i) {
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
|
|
index b1b207747..a6949f9d2 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
|
@@ -516,7 +516,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
|
|
long timeout,
|
|
struct intel_rps_client *rps_client)
|
|
{
|
|
- unsigned int seq = __read_seqcount_begin(&resv->seq);
|
|
+ unsigned int seq = read_seqbegin(&resv->seq);
|
|
struct dma_fence *excl;
|
|
bool prune_fences = false;
|
|
|
|
@@ -569,9 +569,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
|
|
* signaled and that the reservation object has not been changed (i.e.
|
|
* no new fences have been added).
|
|
*/
|
|
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
|
|
+ if (prune_fences && !read_seqretry(&resv->seq, seq)) {
|
|
if (reservation_object_trylock(resv)) {
|
|
- if (!__read_seqcount_retry(&resv->seq, seq))
|
|
+ if (!read_seqretry(&resv->seq, seq))
|
|
reservation_object_add_excl_fence(resv, NULL);
|
|
reservation_object_unlock(resv);
|
|
}
|
|
@@ -4693,7 +4693,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|
*
|
|
*/
|
|
retry:
|
|
- seq = raw_read_seqcount(&obj->resv->seq);
|
|
+ seq = read_seqbegin(&obj->resv->seq);
|
|
|
|
/* Translate the exclusive fence to the READ *and* WRITE engine */
|
|
args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
|
|
@@ -4711,7 +4711,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|
}
|
|
}
|
|
|
|
- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
|
|
+ if (args->busy && read_seqretry(&obj->resv->seq, seq))
|
|
goto retry;
|
|
|
|
err = 0;
|
|
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
|
|
index 298779693..f65817c51 100644
|
|
--- a/drivers/gpu/drm/i915/i915_irq.c
|
|
+++ b/drivers/gpu/drm/i915/i915_irq.c
|
|
@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
|
|
|
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
|
+ preempt_disable_rt();
|
|
|
|
/* Get optional system timestamp before query. */
|
|
if (stime)
|
|
@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|
*etime = ktime_get();
|
|
|
|
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
|
|
+ preempt_enable_rt();
|
|
|
|
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
|
|
|
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
|
|
index 5c2c93cba..7124510b9 100644
|
|
--- a/drivers/gpu/drm/i915/i915_request.c
|
|
+++ b/drivers/gpu/drm/i915/i915_request.c
|
|
@@ -356,9 +356,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
|
|
|
|
GEM_BUG_ON(!i915_request_completed(rq));
|
|
|
|
- local_irq_disable();
|
|
-
|
|
- spin_lock(&engine->timeline.lock);
|
|
+ spin_lock_irq(&engine->timeline.lock);
|
|
GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
|
|
list_del_init(&rq->link);
|
|
spin_unlock(&engine->timeline.lock);
|
|
@@ -372,9 +370,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
|
|
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
|
|
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
|
|
}
|
|
- spin_unlock(&rq->lock);
|
|
-
|
|
- local_irq_enable();
|
|
+ spin_unlock_irq(&rq->lock);
|
|
|
|
/*
|
|
* The backing object for the context is done after switching to the
|
|
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
|
|
index b50c6b829..33028d8f4 100644
|
|
--- a/drivers/gpu/drm/i915/i915_trace.h
|
|
+++ b/drivers/gpu/drm/i915/i915_trace.h
|
|
@@ -2,6 +2,10 @@
|
|
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _I915_TRACE_H_
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+#define NOTRACE
|
|
+#endif
|
|
+
|
|
#include <linux/stringify.h>
|
|
#include <linux/types.h>
|
|
#include <linux/tracepoint.h>
|
|
@@ -679,7 +683,7 @@ DEFINE_EVENT(i915_request, i915_request_add,
|
|
TP_ARGS(rq)
|
|
);
|
|
|
|
-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
|
|
+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
|
|
DEFINE_EVENT(i915_request, i915_request_submit,
|
|
TP_PROTO(struct i915_request *rq),
|
|
TP_ARGS(rq)
|
|
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
|
|
index f7026e887..07e4ddebd 100644
|
|
--- a/drivers/gpu/drm/i915/intel_sprite.c
|
|
+++ b/drivers/gpu/drm/i915/intel_sprite.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <drm/drm_rect.h>
|
|
#include <drm/drm_atomic.h>
|
|
#include <drm/drm_plane_helper.h>
|
|
+#include <linux/locallock.h>
|
|
#include "intel_drv.h"
|
|
#include "intel_frontbuffer.h"
|
|
#include <drm/i915_drm.h>
|
|
@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
|
|
#define VBLANK_EVASION_TIME_US 100
|
|
#endif
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock);
|
|
+
|
|
/**
|
|
* intel_pipe_update_start() - start update of a set of display registers
|
|
* @new_crtc_state: the new crtc state
|
|
@@ -107,7 +110,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
|
if (intel_psr_wait_for_idle(new_crtc_state))
|
|
DRM_ERROR("PSR idle timed out, atomic update may fail\n");
|
|
|
|
- local_irq_disable();
|
|
+ local_lock_irq(pipe_update_lock);
|
|
|
|
crtc->debug.min_vbl = min;
|
|
crtc->debug.max_vbl = max;
|
|
@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
|
break;
|
|
}
|
|
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(pipe_update_lock);
|
|
|
|
timeout = schedule_timeout(timeout);
|
|
|
|
- local_irq_disable();
|
|
+ local_lock_irq(pipe_update_lock);
|
|
}
|
|
|
|
finish_wait(wq, &wait);
|
|
@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
|
return;
|
|
|
|
irq_disable:
|
|
- local_irq_disable();
|
|
+ local_lock_irq(pipe_update_lock);
|
|
}
|
|
|
|
/**
|
|
@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
|
new_crtc_state->base.event = NULL;
|
|
}
|
|
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(pipe_update_lock);
|
|
|
|
if (intel_vgpu_active(dev_priv))
|
|
return;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
|
|
index 09522b915..a3e183c59 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_display.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_display.c
|
|
@@ -1818,6 +1818,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
|
+ preempt_disable_rt();
|
|
|
|
/* Get optional system timestamp before query. */
|
|
if (stime)
|
|
@@ -1910,6 +1911,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|
*etime = ktime_get();
|
|
|
|
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
|
|
+ preempt_enable_rt();
|
|
|
|
/* Decode into vertical and horizontal scanout position. */
|
|
*vpos = position & 0x1fff;
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
index d0fd147ef..fb5a3461b 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
@@ -167,10 +167,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
|
{
|
|
u32 *fifo_mem = dev_priv->mmio_virt;
|
|
|
|
- preempt_disable();
|
|
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
|
|
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
|
- preempt_enable();
|
|
}
|
|
|
|
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
|
|
index 12bc9fa21..278f03f50 100644
|
|
--- a/drivers/hv/hv.c
|
|
+++ b/drivers/hv/hv.c
|
|
@@ -112,10 +112,12 @@ int hv_post_message(union hv_connection_id connection_id,
|
|
static void hv_stimer0_isr(void)
|
|
{
|
|
struct hv_per_cpu_context *hv_cpu;
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
+ u64 ip = regs ? instruction_pointer(regs) : 0;
|
|
|
|
hv_cpu = this_cpu_ptr(hv_context.cpu_context);
|
|
hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
|
|
- add_interrupt_randomness(stimer0_vector, 0);
|
|
+ add_interrupt_randomness(stimer0_vector, 0, ip);
|
|
}
|
|
|
|
static int hv_ce_set_next_event(unsigned long delta,
|
|
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
|
|
index 87d3d7da7..1d2d8a4b8 100644
|
|
--- a/drivers/hv/hyperv_vmbus.h
|
|
+++ b/drivers/hv/hyperv_vmbus.h
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/atomic.h>
|
|
#include <linux/hyperv.h>
|
|
#include <linux/interrupt.h>
|
|
+#include <linux/irq.h>
|
|
|
|
#include "hv_trace.h"
|
|
|
|
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
|
|
index 9aa18f387..39aaa1499 100644
|
|
--- a/drivers/hv/vmbus_drv.c
|
|
+++ b/drivers/hv/vmbus_drv.c
|
|
@@ -1042,6 +1042,8 @@ static void vmbus_isr(void)
|
|
void *page_addr = hv_cpu->synic_event_page;
|
|
struct hv_message *msg;
|
|
union hv_synic_event_flags *event;
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
+ u64 ip = regs ? instruction_pointer(regs) : 0;
|
|
bool handled = false;
|
|
|
|
if (unlikely(page_addr == NULL))
|
|
@@ -1085,7 +1087,7 @@ static void vmbus_isr(void)
|
|
tasklet_schedule(&hv_cpu->msg_dpc);
|
|
}
|
|
|
|
- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
|
|
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
|
|
index c1ce2299a..5c57ecf4b 100644
|
|
--- a/drivers/i2c/busses/i2c-exynos5.c
|
|
+++ b/drivers/i2c/busses/i2c-exynos5.c
|
|
@@ -800,9 +800,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
|
|
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
|
|
- dev_name(&pdev->dev), i2c);
|
|
-
|
|
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c);
|
|
if (ret != 0) {
|
|
dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
|
|
goto err_clk;
|
|
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
|
|
index 061a4bfb0..575aff50b 100644
|
|
--- a/drivers/i2c/busses/i2c-hix5hd2.c
|
|
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
|
|
@@ -449,8 +449,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
|
|
hix5hd2_i2c_init(priv);
|
|
|
|
ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq,
|
|
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
|
|
- dev_name(&pdev->dev), priv);
|
|
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
|
|
if (ret != 0) {
|
|
dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
|
|
goto err_clk;
|
|
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
|
|
index bedd5fba3..3f4259f11 100644
|
|
--- a/drivers/infiniband/hw/hfi1/affinity.c
|
|
+++ b/drivers/infiniband/hw/hfi1/affinity.c
|
|
@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node)
|
|
struct hfi1_affinity_node *entry;
|
|
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
|
|
const struct cpumask *node_mask,
|
|
- *proc_mask = ¤t->cpus_allowed;
|
|
+ *proc_mask = current->cpus_ptr;
|
|
struct hfi1_affinity_node_list *affinity = &node_affinity;
|
|
struct cpu_mask_set *set = &affinity->proc;
|
|
|
|
@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node)
|
|
* check whether process/context affinity has already
|
|
* been set
|
|
*/
|
|
- if (cpumask_weight(proc_mask) == 1) {
|
|
+ if (current->nr_cpus_allowed == 1) {
|
|
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
|
|
current->pid, current->comm,
|
|
cpumask_pr_args(proc_mask));
|
|
@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node)
|
|
cpu = cpumask_first(proc_mask);
|
|
cpumask_set_cpu(cpu, &set->used);
|
|
goto done;
|
|
- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
|
|
+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
|
|
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
|
|
current->pid, current->comm,
|
|
cpumask_pr_args(proc_mask));
|
|
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
|
|
index 291c12f58..05e7b28a0 100644
|
|
--- a/drivers/infiniband/hw/hfi1/sdma.c
|
|
+++ b/drivers/infiniband/hw/hfi1/sdma.c
|
|
@@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
|
|
{
|
|
struct sdma_rht_node *rht_node;
|
|
struct sdma_engine *sde = NULL;
|
|
- const struct cpumask *current_mask = ¤t->cpus_allowed;
|
|
unsigned long cpu_id;
|
|
|
|
/*
|
|
* To ensure that always the same sdma engine(s) will be
|
|
* selected make sure the process is pinned to this CPU only.
|
|
*/
|
|
- if (cpumask_weight(current_mask) != 1)
|
|
+ if (current->nr_cpus_allowed != 1)
|
|
goto out;
|
|
|
|
cpu_id = smp_processor_id();
|
|
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
index 78fa634de..27b6e664e 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
|
|
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
|
|
{
|
|
struct qib_filedata *fd = fp->private_data;
|
|
- const unsigned int weight = cpumask_weight(¤t->cpus_allowed);
|
|
+ const unsigned int weight = current->nr_cpus_allowed;
|
|
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
|
|
int local_cpu;
|
|
|
|
@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
|
|
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
|
|
else {
|
|
int unit;
|
|
- const unsigned int cpu = cpumask_first(¤t->cpus_allowed);
|
|
- const unsigned int weight =
|
|
- cpumask_weight(¤t->cpus_allowed);
|
|
+ const unsigned int cpu = cpumask_first(current->cpus_ptr);
|
|
+ const unsigned int weight = current->nr_cpus_allowed;
|
|
|
|
if (weight == 1 && !test_bit(cpu, qib_cpulist))
|
|
if (!find_hca(cpu, &unit) && unit >= 0)
|
|
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
|
|
index 4018af769..b4ce8c115 100644
|
|
--- a/drivers/leds/trigger/Kconfig
|
|
+++ b/drivers/leds/trigger/Kconfig
|
|
@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT
|
|
|
|
config LEDS_TRIGGER_CPU
|
|
bool "LED CPU Trigger"
|
|
+ depends on !PREEMPT_RT_BASE
|
|
help
|
|
This allows LEDs to be controlled by active CPUs. This shows
|
|
the active CPUs across an array of LEDs so you can see which
|
|
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
|
|
index f6e0a8b3a..18c03d79a 100644
|
|
--- a/drivers/md/bcache/Kconfig
|
|
+++ b/drivers/md/bcache/Kconfig
|
|
@@ -1,6 +1,7 @@
|
|
|
|
config BCACHE
|
|
tristate "Block device as cache"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
select CRC64
|
|
help
|
|
Allows a block device to be used as cache for other devices; uses
|
|
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
|
|
index 288064e94..23cd88a14 100644
|
|
--- a/drivers/md/dm-rq.c
|
|
+++ b/drivers/md/dm-rq.c
|
|
@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q)
|
|
/* Establish tio->ti before queuing work (map_tio_request) */
|
|
tio->ti = ti;
|
|
kthread_queue_work(&md->kworker, &tio->work);
|
|
- BUG_ON(!irqs_disabled());
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 24ef07d52..2b6f91da7 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|
struct raid5_percpu *percpu;
|
|
unsigned long cpu;
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
percpu = per_cpu_ptr(conf->percpu, cpu);
|
|
+ spin_lock(&percpu->lock);
|
|
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
|
|
ops_run_biofill(sh);
|
|
overlap_clear++;
|
|
@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
|
wake_up(&sh->raid_conf->wait_for_overlap);
|
|
}
|
|
- put_cpu();
|
|
+ spin_unlock(&percpu->lock);
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
|
|
@@ -6815,6 +6817,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
|
|
__func__, cpu);
|
|
return -ENOMEM;
|
|
}
|
|
+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -6825,7 +6828,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
|
|
conf->percpu = alloc_percpu(struct raid5_percpu);
|
|
if (!conf->percpu)
|
|
return -ENOMEM;
|
|
-
|
|
err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
|
|
if (!err) {
|
|
conf->scribble_disks = max(conf->raid_disks,
|
|
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
|
|
index 8474c2241..a3bf907ab 100644
|
|
--- a/drivers/md/raid5.h
|
|
+++ b/drivers/md/raid5.h
|
|
@@ -637,6 +637,7 @@ struct r5conf {
|
|
int recovery_disabled;
|
|
/* per cpu variables */
|
|
struct raid5_percpu {
|
|
+ spinlock_t lock; /* Protection for -RT */
|
|
struct page *spare_page; /* Used when checking P/Q in raid6 */
|
|
struct flex_array *scribble; /* space for constructing buffer
|
|
* lists and performing address
|
|
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
|
|
index 74f7c79d5..7fe963930 100644
|
|
--- a/drivers/misc/Kconfig
|
|
+++ b/drivers/misc/Kconfig
|
|
@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC
|
|
are combined to make a single 32-bit timer.
|
|
|
|
When GENERIC_CLOCKEVENTS is defined, the third timer channel
|
|
- may be used as a clock event device supporting oneshot mode
|
|
- (delays of up to two seconds) based on the 32 KiHz clock.
|
|
+ may be used as a clock event device supporting oneshot mode.
|
|
|
|
config ATMEL_TCB_CLKSRC_BLOCK
|
|
int
|
|
@@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
|
|
TC can be used for other purposes, such as PWM generation and
|
|
interval timing.
|
|
|
|
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
+ bool "TC Block use 32 KiHz clock"
|
|
+ depends on ATMEL_TCB_CLKSRC
|
|
+ default y
|
|
+ help
|
|
+ Select this to use 32 KiHz base clock rate as TC block clock
|
|
+ source for clock events.
|
|
+
|
|
+
|
|
config DUMMY_IRQ
|
|
tristate "Dummy IRQ handler"
|
|
default n
|
|
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
|
|
index 598201645..953ff54dd 100644
|
|
--- a/drivers/net/phy/fixed_phy.c
|
|
+++ b/drivers/net/phy/fixed_phy.c
|
|
@@ -23,7 +23,6 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/of.h>
|
|
#include <linux/gpio.h>
|
|
-#include <linux/seqlock.h>
|
|
#include <linux/idr.h>
|
|
|
|
#include "swphy.h"
|
|
@@ -36,7 +35,6 @@ struct fixed_mdio_bus {
|
|
struct fixed_phy {
|
|
int addr;
|
|
struct phy_device *phydev;
|
|
- seqcount_t seqcount;
|
|
struct fixed_phy_status status;
|
|
int (*link_update)(struct net_device *, struct fixed_phy_status *);
|
|
struct list_head node;
|
|
@@ -62,18 +60,15 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
|
|
list_for_each_entry(fp, &fmb->phys, node) {
|
|
if (fp->addr == phy_addr) {
|
|
struct fixed_phy_status state;
|
|
- int s;
|
|
-
|
|
- do {
|
|
- s = read_seqcount_begin(&fp->seqcount);
|
|
- /* Issue callback if user registered it. */
|
|
- if (fp->link_update)
|
|
- fp->link_update(fp->phydev->attached_dev,
|
|
- &fp->status);
|
|
- /* Check the GPIO for change in status */
|
|
- fixed_phy_update(fp);
|
|
- state = fp->status;
|
|
- } while (read_seqcount_retry(&fp->seqcount, s));
|
|
+
|
|
+ /* Issue callback if user registered it. */
|
|
+ if (fp->link_update)
|
|
+ fp->link_update(fp->phydev->attached_dev,
|
|
+ &fp->status);
|
|
+
|
|
+ /* Check the GPIO for change in status */
|
|
+ fixed_phy_update(fp);
|
|
+ state = fp->status;
|
|
|
|
return swphy_read_reg(reg_num, &state);
|
|
}
|
|
@@ -131,8 +126,6 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
|
|
if (!fp)
|
|
return -ENOMEM;
|
|
|
|
- seqcount_init(&fp->seqcount);
|
|
-
|
|
if (irq != PHY_POLL)
|
|
fmb->mii_bus->irq[phy_addr] = irq;
|
|
|
|
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
|
|
index 94ad6fe29..52a49f0bb 100644
|
|
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
|
|
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
|
|
@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
|
|
while (!ctx->done.done && msecs--)
|
|
udelay(1000);
|
|
} else {
|
|
- wait_event_interruptible(ctx->done.wait,
|
|
- ctx->done.done);
|
|
+ swait_event_interruptible_exclusive(ctx->done.wait,
|
|
+ ctx->done.done);
|
|
}
|
|
break;
|
|
default:
|
|
diff --git a/drivers/of/base.c b/drivers/of/base.c
|
|
index f0dbb7ad8..c59b30bab 100644
|
|
--- a/drivers/of/base.c
|
|
+++ b/drivers/of/base.c
|
|
@@ -130,31 +130,34 @@ static u32 phandle_cache_mask;
|
|
/*
|
|
* Caller must hold devtree_lock.
|
|
*/
|
|
-static void __of_free_phandle_cache(void)
|
|
+static struct device_node** __of_free_phandle_cache(void)
|
|
{
|
|
u32 cache_entries = phandle_cache_mask + 1;
|
|
u32 k;
|
|
+ struct device_node **shadow;
|
|
|
|
if (!phandle_cache)
|
|
- return;
|
|
+ return NULL;
|
|
|
|
for (k = 0; k < cache_entries; k++)
|
|
of_node_put(phandle_cache[k]);
|
|
|
|
- kfree(phandle_cache);
|
|
+ shadow = phandle_cache;
|
|
phandle_cache = NULL;
|
|
+ return shadow;
|
|
}
|
|
|
|
int of_free_phandle_cache(void)
|
|
{
|
|
unsigned long flags;
|
|
+ struct device_node **shadow;
|
|
|
|
raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
- __of_free_phandle_cache();
|
|
+ shadow = __of_free_phandle_cache();
|
|
|
|
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
-
|
|
+ kfree(shadow);
|
|
return 0;
|
|
}
|
|
#if !defined(CONFIG_MODULES)
|
|
@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void)
|
|
u32 cache_entries;
|
|
struct device_node *np;
|
|
u32 phandles = 0;
|
|
+ struct device_node **shadow;
|
|
|
|
raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
- __of_free_phandle_cache();
|
|
+ shadow = __of_free_phandle_cache();
|
|
|
|
for_each_of_allnodes(np)
|
|
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
|
@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void)
|
|
|
|
if (!phandles)
|
|
goto out;
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
cache_entries = roundup_pow_of_two(phandles);
|
|
phandle_cache_mask = cache_entries - 1;
|
|
|
|
phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
|
|
GFP_ATOMIC);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
if (!phandle_cache)
|
|
goto out;
|
|
|
|
@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void)
|
|
|
|
out:
|
|
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
+ kfree(shadow);
|
|
}
|
|
|
|
void __init of_core_init(void)
|
|
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
|
|
index 5aaa4ce04..45cebf92a 100644
|
|
--- a/drivers/pci/switch/switchtec.c
|
|
+++ b/drivers/pci/switch/switchtec.c
|
|
@@ -43,10 +43,11 @@ struct switchtec_user {
|
|
|
|
enum mrpc_state state;
|
|
|
|
- struct completion comp;
|
|
+ wait_queue_head_t cmd_comp;
|
|
struct kref kref;
|
|
struct list_head list;
|
|
|
|
+ bool cmd_done;
|
|
u32 cmd;
|
|
u32 status;
|
|
u32 return_code;
|
|
@@ -68,7 +69,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
|
|
stuser->stdev = stdev;
|
|
kref_init(&stuser->kref);
|
|
INIT_LIST_HEAD(&stuser->list);
|
|
- init_completion(&stuser->comp);
|
|
+ init_waitqueue_head(&stuser->cmd_comp);
|
|
stuser->event_cnt = atomic_read(&stdev->event_cnt);
|
|
|
|
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
|
|
@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
|
|
kref_get(&stuser->kref);
|
|
stuser->read_len = sizeof(stuser->data);
|
|
stuser_set_state(stuser, MRPC_QUEUED);
|
|
- init_completion(&stuser->comp);
|
|
+ stuser->cmd_done = false;
|
|
list_add_tail(&stuser->list, &stdev->mrpc_queue);
|
|
|
|
mrpc_cmd_submit(stdev);
|
|
@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
|
|
stuser->read_len);
|
|
|
|
out:
|
|
- complete_all(&stuser->comp);
|
|
+ stuser->cmd_done = true;
|
|
+ wake_up_interruptible(&stuser->cmd_comp);
|
|
list_del_init(&stuser->list);
|
|
stuser_put(stuser);
|
|
stdev->mrpc_busy = 0;
|
|
@@ -358,7 +360,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp)
|
|
return PTR_ERR(stuser);
|
|
|
|
filp->private_data = stuser;
|
|
- nonseekable_open(inode, filp);
|
|
+ stream_open(inode, filp);
|
|
|
|
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
|
|
|
|
@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
|
|
mutex_unlock(&stdev->mrpc_mutex);
|
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
- if (!try_wait_for_completion(&stuser->comp))
|
|
+ if (!READ_ONCE(stuser->cmd_done))
|
|
return -EAGAIN;
|
|
} else {
|
|
- rc = wait_for_completion_interruptible(&stuser->comp);
|
|
+ rc = wait_event_interruptible(stuser->cmd_comp,
|
|
+ stuser->cmd_done);
|
|
if (rc < 0)
|
|
return rc;
|
|
}
|
|
@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
|
|
struct switchtec_dev *stdev = stuser->stdev;
|
|
__poll_t ret = 0;
|
|
|
|
- poll_wait(filp, &stuser->comp.wait, wait);
|
|
+ poll_wait(filp, &stuser->cmd_comp, wait);
|
|
poll_wait(filp, &stdev->event_wq, wait);
|
|
|
|
if (lock_mutex_and_test_alive(stdev))
|
|
@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
|
|
|
|
mutex_unlock(&stdev->mrpc_mutex);
|
|
|
|
- if (try_wait_for_completion(&stuser->comp))
|
|
+ if (READ_ONCE(stuser->cmd_done))
|
|
ret |= EPOLLIN | EPOLLRDNORM;
|
|
|
|
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
|
|
@@ -1041,7 +1044,8 @@ static void stdev_kill(struct switchtec_dev *stdev)
|
|
|
|
/* Wake up and kill any users waiting on an MRPC request */
|
|
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
|
|
- complete_all(&stuser->comp);
|
|
+ stuser->cmd_done = true;
|
|
+ wake_up_interruptible(&stuser->cmd_comp);
|
|
list_del_init(&stuser->list);
|
|
stuser_put(stuser);
|
|
}
|
|
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
|
|
index 6768b2e81..c20f51af6 100644
|
|
--- a/drivers/scsi/fcoe/fcoe.c
|
|
+++ b/drivers/scsi/fcoe/fcoe.c
|
|
@@ -1459,11 +1459,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
|
|
{
|
|
struct fcoe_percpu_s *fps;
|
|
- int rc;
|
|
+ int rc, cpu = get_cpu_light();
|
|
|
|
- fps = &get_cpu_var(fcoe_percpu);
|
|
+ fps = &per_cpu(fcoe_percpu, cpu);
|
|
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
|
|
- put_cpu_var(fcoe_percpu);
|
|
+ put_cpu_light();
|
|
|
|
return rc;
|
|
}
|
|
@@ -1650,11 +1650,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
|
return 0;
|
|
}
|
|
|
|
- stats = per_cpu_ptr(lport->stats, get_cpu());
|
|
+ stats = per_cpu_ptr(lport->stats, get_cpu_light());
|
|
stats->InvalidCRCCount++;
|
|
if (stats->InvalidCRCCount < 5)
|
|
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -1697,7 +1697,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|
*/
|
|
hp = (struct fcoe_hdr *) skb_network_header(skb);
|
|
|
|
- stats = per_cpu_ptr(lport->stats, get_cpu());
|
|
+ stats = per_cpu_ptr(lport->stats, get_cpu_light());
|
|
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
|
|
if (stats->ErrorFrames < 5)
|
|
printk(KERN_WARNING "fcoe: FCoE version "
|
|
@@ -1729,13 +1729,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|
goto drop;
|
|
|
|
if (!fcoe_filter_frames(lport, fp)) {
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
fc_exch_recv(lport, fp);
|
|
return;
|
|
}
|
|
drop:
|
|
stats->ErrorFrames++;
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
index 658c07265..bceab74ee 100644
|
|
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
@@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
|
|
|
INIT_LIST_HEAD(&del_list);
|
|
|
|
- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
|
|
+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
|
|
|
|
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
|
|
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
|
|
@@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
|
sel_time = fcf->time;
|
|
}
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
|
|
list_for_each_entry_safe(fcf, next, &del_list, list) {
|
|
/* Removes fcf from current list */
|
|
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
|
|
index 6ba257cbc..d2398a19f 100644
|
|
--- a/drivers/scsi/libfc/fc_exch.c
|
|
+++ b/drivers/scsi/libfc/fc_exch.c
|
|
@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
}
|
|
memset(ep, 0, sizeof(*ep));
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
pool = per_cpu_ptr(mp->pool, cpu);
|
|
spin_lock_bh(&pool->lock);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
|
|
/* peek cache of free slot */
|
|
if (pool->left != FC_XID_UNKNOWN) {
|
|
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
|
|
index 22571abca..78a529d36 100644
|
|
--- a/drivers/staging/android/vsoc.c
|
|
+++ b/drivers/staging/android/vsoc.c
|
|
@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
|
|
return -EINVAL;
|
|
wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
|
|
|
|
- hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
|
|
- HRTIMER_MODE_ABS);
|
|
+ hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC,
|
|
+ HRTIMER_MODE_ABS, current);
|
|
hrtimer_set_expires_range_ns(&to->timer, wake_time,
|
|
current->timer_slack_ns);
|
|
-
|
|
- hrtimer_init_sleeper(to, current);
|
|
}
|
|
|
|
while (1) {
|
|
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
|
|
index 1ef937d79..540becb78 100644
|
|
--- a/drivers/thermal/x86_pkg_temp_thermal.c
|
|
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
|
|
@@ -75,7 +75,7 @@ static int max_packages __read_mostly;
|
|
/* Array of package pointers */
|
|
static struct pkg_device **packages;
|
|
/* Serializes interrupt notification, work and hotplug */
|
|
-static DEFINE_SPINLOCK(pkg_temp_lock);
|
|
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
|
|
/* Protects zone operation in the work function against hotplug removal */
|
|
static DEFINE_MUTEX(thermal_zone_mutex);
|
|
|
|
@@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|
u64 msr_val, wr_val;
|
|
|
|
mutex_lock(&thermal_zone_mutex);
|
|
- spin_lock_irq(&pkg_temp_lock);
|
|
+ raw_spin_lock_irq(&pkg_temp_lock);
|
|
++pkg_work_cnt;
|
|
|
|
pkgdev = pkg_temp_thermal_get_dev(cpu);
|
|
if (!pkgdev) {
|
|
- spin_unlock_irq(&pkg_temp_lock);
|
|
+ raw_spin_unlock_irq(&pkg_temp_lock);
|
|
mutex_unlock(&thermal_zone_mutex);
|
|
return;
|
|
}
|
|
@@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|
}
|
|
|
|
enable_pkg_thres_interrupt();
|
|
- spin_unlock_irq(&pkg_temp_lock);
|
|
+ raw_spin_unlock_irq(&pkg_temp_lock);
|
|
|
|
/*
|
|
* If tzone is not NULL, then thermal_zone_mutex will prevent the
|
|
@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val)
|
|
struct pkg_device *pkgdev;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&pkg_temp_lock, flags);
|
|
+ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
|
|
++pkg_interrupt_cnt;
|
|
|
|
disable_pkg_thres_interrupt();
|
|
@@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val)
|
|
pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
|
|
pkgdev->msr_pkg_therm_high);
|
|
|
|
cpumask_set_cpu(cpu, &pkgdev->cpumask);
|
|
- spin_lock_irq(&pkg_temp_lock);
|
|
+ raw_spin_lock_irq(&pkg_temp_lock);
|
|
packages[pkgid] = pkgdev;
|
|
- spin_unlock_irq(&pkg_temp_lock);
|
|
+ raw_spin_unlock_irq(&pkg_temp_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|
}
|
|
|
|
/* Protect against work and interrupts */
|
|
- spin_lock_irq(&pkg_temp_lock);
|
|
+ raw_spin_lock_irq(&pkg_temp_lock);
|
|
|
|
/*
|
|
* Check whether this cpu was the current target and store the new
|
|
@@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|
* To cancel the work we need to drop the lock, otherwise
|
|
* we might deadlock if the work needs to be flushed.
|
|
*/
|
|
- spin_unlock_irq(&pkg_temp_lock);
|
|
+ raw_spin_unlock_irq(&pkg_temp_lock);
|
|
cancel_delayed_work_sync(&pkgdev->work);
|
|
- spin_lock_irq(&pkg_temp_lock);
|
|
+ raw_spin_lock_irq(&pkg_temp_lock);
|
|
/*
|
|
* If this is not the last cpu in the package and the work
|
|
* did not run after we dropped the lock above, then we
|
|
@@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|
pkg_thermal_schedule_work(target, &pkgdev->work);
|
|
}
|
|
|
|
- spin_unlock_irq(&pkg_temp_lock);
|
|
+ raw_spin_unlock_irq(&pkg_temp_lock);
|
|
|
|
/* Final cleanup if this is the last cpu */
|
|
if (lastcpu)
|
|
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
|
|
index 66e0d5d71..0739026f7 100644
|
|
--- a/drivers/tty/serial/8250/8250_core.c
|
|
+++ b/drivers/tty/serial/8250/8250_core.c
|
|
@@ -54,7 +54,16 @@ static struct uart_driver serial8250_reg;
|
|
|
|
static unsigned int skip_txen_test; /* force skip of txen test at init time */
|
|
|
|
-#define PASS_LIMIT 512
|
|
+/*
|
|
+ * On -rt we can have a more delays, and legitimately
|
|
+ * so - so don't drop work spuriously and spam the
|
|
+ * syslog:
|
|
+ */
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define PASS_LIMIT 1000000
|
|
+#else
|
|
+# define PASS_LIMIT 512
|
|
+#endif
|
|
|
|
#include <asm/serial.h>
|
|
/*
|
|
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
|
index f920bfa16..7abd80912 100644
|
|
--- a/drivers/tty/serial/8250/8250_port.c
|
|
+++ b/drivers/tty/serial/8250/8250_port.c
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/nmi.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/kdb.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/ktime.h>
|
|
@@ -3290,9 +3291,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
|
|
|
serial8250_rpm_get(up);
|
|
|
|
- if (port->sysrq)
|
|
+ if (port->sysrq || oops_in_progress)
|
|
locked = 0;
|
|
- else if (oops_in_progress)
|
|
+ else if (in_kdb_printk())
|
|
locked = spin_trylock_irqsave(&port->lock, flags);
|
|
else
|
|
spin_lock_irqsave(&port->lock, flags);
|
|
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
|
index c987db507..7d57b8ea2 100644
|
|
--- a/drivers/tty/serial/amba-pl011.c
|
|
+++ b/drivers/tty/serial/amba-pl011.c
|
|
@@ -2288,18 +2288,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
|
{
|
|
struct uart_amba_port *uap = amba_ports[co->index];
|
|
unsigned int old_cr = 0, new_cr;
|
|
- unsigned long flags;
|
|
+ unsigned long flags = 0;
|
|
int locked = 1;
|
|
|
|
clk_enable(uap->clk);
|
|
|
|
- local_irq_save(flags);
|
|
+ /*
|
|
+ * local_irq_save(flags);
|
|
+ *
|
|
+ * This local_irq_save() is nonsense. If we come in via sysrq
|
|
+ * handling then interrupts are already disabled. Aside of
|
|
+ * that the port.sysrq check is racy on SMP regardless.
|
|
+ */
|
|
if (uap->port.sysrq)
|
|
locked = 0;
|
|
else if (oops_in_progress)
|
|
- locked = spin_trylock(&uap->port.lock);
|
|
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
|
|
else
|
|
- spin_lock(&uap->port.lock);
|
|
+ spin_lock_irqsave(&uap->port.lock, flags);
|
|
|
|
/*
|
|
* First save the CR then disable the interrupts
|
|
@@ -2325,8 +2331,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
|
pl011_write(old_cr, uap, REG_CR);
|
|
|
|
if (locked)
|
|
- spin_unlock(&uap->port.lock);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_irqrestore(&uap->port.lock, flags);
|
|
|
|
clk_disable(uap->clk);
|
|
}
|
|
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
|
|
index 6420ae581..0f4f41ed9 100644
|
|
--- a/drivers/tty/serial/omap-serial.c
|
|
+++ b/drivers/tty/serial/omap-serial.c
|
|
@@ -1307,13 +1307,10 @@ serial_omap_console_write(struct console *co, const char *s,
|
|
|
|
pm_runtime_get_sync(up->dev);
|
|
|
|
- local_irq_save(flags);
|
|
- if (up->port.sysrq)
|
|
- locked = 0;
|
|
- else if (oops_in_progress)
|
|
- locked = spin_trylock(&up->port.lock);
|
|
+ if (up->port.sysrq || oops_in_progress)
|
|
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
|
|
else
|
|
- spin_lock(&up->port.lock);
|
|
+ spin_lock_irqsave(&up->port.lock, flags);
|
|
|
|
/*
|
|
* First save the IER then disable the interrupts
|
|
@@ -1342,8 +1339,7 @@ serial_omap_console_write(struct console *co, const char *s,
|
|
pm_runtime_mark_last_busy(up->dev);
|
|
pm_runtime_put_autosuspend(up->dev);
|
|
if (locked)
|
|
- spin_unlock(&up->port.lock);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_irqrestore(&up->port.lock, flags);
|
|
}
|
|
|
|
static int __init
|
|
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
|
|
index 72a8c7009..d0b366955 100644
|
|
--- a/drivers/tty/sysrq.c
|
|
+++ b/drivers/tty/sysrq.c
|
|
@@ -215,7 +215,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
-static DEFINE_SPINLOCK(show_lock);
|
|
+static DEFINE_RAW_SPINLOCK(show_lock);
|
|
|
|
static void showacpu(void *dummy)
|
|
{
|
|
@@ -225,10 +225,10 @@ static void showacpu(void *dummy)
|
|
if (idle_cpu(smp_processor_id()))
|
|
return;
|
|
|
|
- spin_lock_irqsave(&show_lock, flags);
|
|
+ raw_spin_lock_irqsave(&show_lock, flags);
|
|
pr_info("CPU%d:\n", smp_processor_id());
|
|
show_stack(NULL, NULL);
|
|
- spin_unlock_irqrestore(&show_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&show_lock, flags);
|
|
}
|
|
|
|
static void sysrq_showregs_othercpus(struct work_struct *dummy)
|
|
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
|
|
index 62e6c526e..3e8c40cc1 100644
|
|
--- a/drivers/usb/core/hcd.c
|
|
+++ b/drivers/usb/core/hcd.c
|
|
@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
|
|
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
|
|
struct usb_anchor *anchor = urb->anchor;
|
|
int status = urb->unlinked;
|
|
- unsigned long flags;
|
|
|
|
urb->hcpriv = NULL;
|
|
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
|
|
@@ -1766,9 +1765,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
|
|
* and no one may trigger the above deadlock situation when
|
|
* running complete() in tasklet.
|
|
*/
|
|
- local_irq_save(flags);
|
|
urb->complete(urb);
|
|
- local_irq_restore(flags);
|
|
|
|
usb_anchor_resume_wakeups(anchor);
|
|
atomic_dec(&urb->use_count);
|
|
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
|
|
index 2050993fb..e2ca75a6e 100644
|
|
--- a/drivers/usb/gadget/function/f_fs.c
|
|
+++ b/drivers/usb/gadget/function/f_fs.c
|
|
@@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs)
|
|
pr_info("%s(): freeing\n", __func__);
|
|
ffs_data_clear(ffs);
|
|
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
|
|
- waitqueue_active(&ffs->ep0req_completion.wait) ||
|
|
+ swait_active(&ffs->ep0req_completion.wait) ||
|
|
waitqueue_active(&ffs->wait));
|
|
destroy_workqueue(ffs->io_completion_wq);
|
|
kfree(ffs->dev_name);
|
|
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
|
|
index 6528df6f3..8ffc59405 100644
|
|
--- a/drivers/usb/gadget/legacy/inode.c
|
|
+++ b/drivers/usb/gadget/legacy/inode.c
|
|
@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
|
|
spin_unlock_irq (&epdata->dev->lock);
|
|
|
|
if (likely (value == 0)) {
|
|
- value = wait_event_interruptible (done.wait, done.done);
|
|
+ value = swait_event_interruptible_exclusive(done.wait, done.done);
|
|
if (value != 0) {
|
|
spin_lock_irq (&epdata->dev->lock);
|
|
if (likely (epdata->ep != NULL)) {
|
|
@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
|
|
usb_ep_dequeue (epdata->ep, epdata->req);
|
|
spin_unlock_irq (&epdata->dev->lock);
|
|
|
|
- wait_event (done.wait, done.done);
|
|
+ swait_event_exclusive(done.wait, done.done);
|
|
if (epdata->status == -ECONNRESET)
|
|
epdata->status = -EINTR;
|
|
} else {
|
|
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
|
|
index 2b652df44..3737ab178 100644
|
|
--- a/drivers/watchdog/watchdog_dev.c
|
|
+++ b/drivers/watchdog/watchdog_dev.c
|
|
@@ -145,7 +145,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd)
|
|
ktime_t t = watchdog_next_keepalive(wdd);
|
|
|
|
if (t > 0)
|
|
- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
|
|
+ hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD);
|
|
} else {
|
|
hrtimer_cancel(&wd_data->timer);
|
|
}
|
|
@@ -164,7 +164,7 @@ static int __watchdog_ping(struct watchdog_device *wdd)
|
|
if (ktime_after(earliest_keepalive, now)) {
|
|
hrtimer_start(&wd_data->timer,
|
|
ktime_sub(earliest_keepalive, now),
|
|
- HRTIMER_MODE_REL);
|
|
+ HRTIMER_MODE_REL_HARD);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1022,7 +1022,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
|
|
dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
|
|
|
|
kthread_init_work(&wd_data->work, watchdog_ping_work);
|
|
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
wd_data->timer.function = watchdog_timer_expired;
|
|
|
|
if (wdd->id == 0) {
|
|
@@ -1070,7 +1070,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
|
|
__module_get(wdd->ops->owner);
|
|
get_device(&wd_data->dev);
|
|
if (handle_boot_enabled)
|
|
- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
|
|
+ hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD);
|
|
else
|
|
pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
|
|
wdd->id);
|
|
diff --git a/fs/aio.c b/fs/aio.c
|
|
index b77f0959f..38dc460c0 100644
|
|
--- a/fs/aio.c
|
|
+++ b/fs/aio.c
|
|
@@ -121,6 +121,7 @@ struct kioctx {
|
|
long nr_pages;
|
|
|
|
struct rcu_work free_rwork; /* see free_ioctx() */
|
|
+ struct work_struct free_work; /* see free_ioctx() */
|
|
|
|
/*
|
|
* signals when all in-flight requests are done
|
|
@@ -612,9 +613,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
|
|
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
|
|
* now it's safe to cancel any that need to be.
|
|
*/
|
|
-static void free_ioctx_users(struct percpu_ref *ref)
|
|
+static void free_ioctx_users_work(struct work_struct *work)
|
|
{
|
|
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
|
|
+ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
|
|
struct aio_kiocb *req;
|
|
|
|
spin_lock_irq(&ctx->ctx_lock);
|
|
@@ -632,6 +633,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
|
|
percpu_ref_put(&ctx->reqs);
|
|
}
|
|
|
|
+static void free_ioctx_users(struct percpu_ref *ref)
|
|
+{
|
|
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
|
|
+
|
|
+ INIT_WORK(&ctx->free_work, free_ioctx_users_work);
|
|
+ schedule_work(&ctx->free_work);
|
|
+}
|
|
+
|
|
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
|
|
{
|
|
unsigned i, new_nr;
|
|
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
|
|
index 70e9afe58..1a6b88ad4 100644
|
|
--- a/fs/autofs/expire.c
|
|
+++ b/fs/autofs/expire.c
|
|
@@ -8,6 +8,7 @@
|
|
* option, any later version, incorporated herein by reference.
|
|
*/
|
|
|
|
+#include <linux/delay.h>
|
|
#include "autofs_i.h"
|
|
|
|
/* Check if a dentry can be expired */
|
|
@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
|
|
parent = p->d_parent;
|
|
if (!spin_trylock(&parent->d_lock)) {
|
|
spin_unlock(&p->d_lock);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto relock;
|
|
}
|
|
spin_unlock(&p->d_lock);
|
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
|
index af88734d1..8d6ca620a 100644
|
|
--- a/fs/buffer.c
|
|
+++ b/fs/buffer.c
|
|
@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
* decide that the page is now completely done.
|
|
*/
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
|
|
/*
|
|
* If none of the buffers had errors and they are all
|
|
@@ -301,9 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
|
|
clear_buffer_async_write(bh);
|
|
unlock_buffer(bh);
|
|
@@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
}
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
end_page_writeback(page);
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
EXPORT_SYMBOL(end_buffer_async_write);
|
|
|
|
@@ -3353,6 +3345,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
|
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
|
if (ret) {
|
|
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
|
+ buffer_head_init_locks(ret);
|
|
preempt_disable();
|
|
__this_cpu_inc(bh_accounting.nr);
|
|
recalc_bh_state();
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index 3925a7bfc..33f7723fb 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
|
|
struct inode *inode;
|
|
struct super_block *sb = parent->d_sb;
|
|
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
|
|
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
|
|
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index ef1641398..a620f5553 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -2472,9 +2472,10 @@ EXPORT_SYMBOL(d_rehash);
|
|
static inline unsigned start_dir_add(struct inode *dir)
|
|
{
|
|
|
|
+ preempt_disable_rt();
|
|
for (;;) {
|
|
- unsigned n = dir->i_dir_seq;
|
|
- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
|
|
+ unsigned n = dir->__i_dir_seq;
|
|
+ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n)
|
|
return n;
|
|
cpu_relax();
|
|
}
|
|
@@ -2482,26 +2483,30 @@ static inline unsigned start_dir_add(struct inode *dir)
|
|
|
|
static inline void end_dir_add(struct inode *dir, unsigned n)
|
|
{
|
|
- smp_store_release(&dir->i_dir_seq, n + 2);
|
|
+ smp_store_release(&dir->__i_dir_seq, n + 2);
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
static void d_wait_lookup(struct dentry *dentry)
|
|
{
|
|
- if (d_in_lookup(dentry)) {
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
- add_wait_queue(dentry->d_wait, &wait);
|
|
- do {
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
- spin_unlock(&dentry->d_lock);
|
|
- schedule();
|
|
- spin_lock(&dentry->d_lock);
|
|
- } while (d_in_lookup(dentry));
|
|
- }
|
|
+ struct swait_queue __wait;
|
|
+
|
|
+ if (!d_in_lookup(dentry))
|
|
+ return;
|
|
+
|
|
+ INIT_LIST_HEAD(&__wait.task_list);
|
|
+ do {
|
|
+ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
|
|
+ spin_unlock(&dentry->d_lock);
|
|
+ schedule();
|
|
+ spin_lock(&dentry->d_lock);
|
|
+ } while (d_in_lookup(dentry));
|
|
+ finish_swait(dentry->d_wait, &__wait);
|
|
}
|
|
|
|
struct dentry *d_alloc_parallel(struct dentry *parent,
|
|
const struct qstr *name,
|
|
- wait_queue_head_t *wq)
|
|
+ struct swait_queue_head *wq)
|
|
{
|
|
unsigned int hash = name->hash;
|
|
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
|
|
@@ -2515,7 +2520,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
|
|
|
|
retry:
|
|
rcu_read_lock();
|
|
- seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
|
|
+ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq);
|
|
r_seq = read_seqbegin(&rename_lock);
|
|
dentry = __d_lookup_rcu(parent, name, &d_seq);
|
|
if (unlikely(dentry)) {
|
|
@@ -2543,7 +2548,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
|
|
}
|
|
|
|
hlist_bl_lock(b);
|
|
- if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
|
|
+ if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) {
|
|
hlist_bl_unlock(b);
|
|
rcu_read_unlock();
|
|
goto retry;
|
|
@@ -2616,7 +2621,7 @@ void __d_lookup_done(struct dentry *dentry)
|
|
hlist_bl_lock(b);
|
|
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
|
|
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
|
|
- wake_up_all(dentry->d_wait);
|
|
+ swake_up_all(dentry->d_wait);
|
|
dentry->d_wait = NULL;
|
|
hlist_bl_unlock(b);
|
|
INIT_HLIST_NODE(&dentry->d_u.d_alias);
|
|
@@ -3128,6 +3133,8 @@ __setup("dhash_entries=", set_dhash_entries);
|
|
|
|
static void __init dcache_init_early(void)
|
|
{
|
|
+ unsigned int loop;
|
|
+
|
|
/* If hashes are distributed across NUMA nodes, defer
|
|
* hash allocation until vmalloc space is available.
|
|
*/
|
|
@@ -3144,11 +3151,16 @@ static void __init dcache_init_early(void)
|
|
NULL,
|
|
0,
|
|
0);
|
|
+
|
|
+ for (loop = 0; loop < (1U << d_hash_shift); loop++)
|
|
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
|
|
+
|
|
d_hash_shift = 32 - d_hash_shift;
|
|
}
|
|
|
|
static void __init dcache_init(void)
|
|
{
|
|
+ unsigned int loop;
|
|
/*
|
|
* A constructor could be added for stable state like the lists,
|
|
* but it is probably not worth it because of the cache nature
|
|
@@ -3172,6 +3184,10 @@ static void __init dcache_init(void)
|
|
NULL,
|
|
0,
|
|
0);
|
|
+
|
|
+ for (loop = 0; loop < (1U << d_hash_shift); loop++)
|
|
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
|
|
+
|
|
d_hash_shift = 32 - d_hash_shift;
|
|
}
|
|
|
|
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
|
|
index d46007154..626baf547 100644
|
|
--- a/fs/eventpoll.c
|
|
+++ b/fs/eventpoll.c
|
|
@@ -563,12 +563,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
|
|
|
|
static void ep_poll_safewake(wait_queue_head_t *wq)
|
|
{
|
|
- int this_cpu = get_cpu();
|
|
+ int this_cpu = get_cpu_light();
|
|
|
|
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
|
|
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
|
|
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
#else
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index da6cca9bd..e7ca83a34 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -1028,6 +1028,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|
}
|
|
}
|
|
task_lock(tsk);
|
|
+ preempt_disable_rt();
|
|
|
|
local_irq_disable();
|
|
active_mm = tsk->active_mm;
|
|
@@ -1048,6 +1049,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|
local_irq_enable();
|
|
tsk->mm->vmacache_seqnum = 0;
|
|
vmacache_flush(tsk);
|
|
+ preempt_enable_rt();
|
|
task_unlock(tsk);
|
|
if (old_mm) {
|
|
up_read(&old_mm->mmap_sem);
|
|
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
|
|
index decae80ae..288c32a48 100644
|
|
--- a/fs/ext4/page-io.c
|
|
+++ b/fs/ext4/page-io.c
|
|
@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|
* We check all buffers in the page under BH_Uptodate_Lock
|
|
* to avoid races with other end io clearing async_write flags
|
|
*/
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(head);
|
|
do {
|
|
if (bh_offset(bh) < bio_start ||
|
|
bh_offset(bh) + bh->b_size > bio_end) {
|
|
@@ -110,8 +109,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|
buffer_io_error(bh);
|
|
}
|
|
} while ((bh = bh->b_this_page) != head);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(head, flags);
|
|
if (!under_io) {
|
|
#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
if (data_page)
|
|
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
|
|
index c550512ce..fe69e46ba 100644
|
|
--- a/fs/fscache/cookie.c
|
|
+++ b/fs/fscache/cookie.c
|
|
@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie,
|
|
return -ESTALE;
|
|
}
|
|
EXPORT_SYMBOL(__fscache_check_consistency);
|
|
+
|
|
+void __init fscache_cookie_init(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(fscache_cookie_hash); i++)
|
|
+ INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]);
|
|
+}
|
|
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
|
|
index 30ad89db1..1d5f1d679 100644
|
|
--- a/fs/fscache/main.c
|
|
+++ b/fs/fscache/main.c
|
|
@@ -149,6 +149,7 @@ static int __init fscache_init(void)
|
|
ret = -ENOMEM;
|
|
goto error_cookie_jar;
|
|
}
|
|
+ fscache_cookie_init();
|
|
|
|
fscache_root = kobject_create_and_add("fscache", kernel_kobj);
|
|
if (!fscache_root)
|
|
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
|
|
index 606895599..bb774a390 100644
|
|
--- a/fs/fuse/dir.c
|
|
+++ b/fs/fuse/dir.c
|
|
@@ -1234,7 +1234,7 @@ static int fuse_direntplus_link(struct file *file,
|
|
struct inode *dir = d_inode(parent);
|
|
struct fuse_conn *fc;
|
|
struct inode *inode;
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
|
|
if (!o->nodeid) {
|
|
/*
|
|
diff --git a/fs/inode.c b/fs/inode.c
|
|
index 5df2e8ee2..9cdec5f41 100644
|
|
--- a/fs/inode.c
|
|
+++ b/fs/inode.c
|
|
@@ -156,7 +156,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
|
inode->i_bdev = NULL;
|
|
inode->i_cdev = NULL;
|
|
inode->i_link = NULL;
|
|
- inode->i_dir_seq = 0;
|
|
+ inode->__i_dir_seq = 0;
|
|
inode->i_rdev = 0;
|
|
inode->dirtied_when = 0;
|
|
|
|
diff --git a/fs/locks.c b/fs/locks.c
|
|
index 9dc72fda5..bfdb9d238 100644
|
|
--- a/fs/locks.c
|
|
+++ b/fs/locks.c
|
|
@@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
if (request->fl_flags & FL_ACCESS)
|
|
goto find_conflict;
|
|
@@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
|
|
|
|
out:
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
if (new_fl)
|
|
locks_free_lock(new_fl);
|
|
locks_dispose_list(&dispose);
|
|
@@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
|
|
new_fl2 = locks_alloc_lock();
|
|
}
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
/*
|
|
* New lock request. Walk all POSIX locks and look for conflicts. If
|
|
@@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
|
|
}
|
|
out:
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
/*
|
|
* Free any unused locks.
|
|
*/
|
|
@@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
|
|
goto free_lock;
|
|
}
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
|
|
time_out_leases(inode, &dispose);
|
|
@@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
|
|
locks_insert_block(fl, new_fl);
|
|
trace_break_lease_block(inode, new_fl);
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
|
|
locks_dispose_list(&dispose);
|
|
error = wait_event_interruptible_timeout(new_fl->fl_wait,
|
|
!new_fl->fl_next, break_time);
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
trace_break_lease_unblock(inode, new_fl);
|
|
locks_delete_block(new_fl);
|
|
@@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
|
|
}
|
|
out:
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
locks_dispose_list(&dispose);
|
|
free_lock:
|
|
locks_free_lock(new_fl);
|
|
@@ -1610,7 +1610,7 @@ int fcntl_getlease(struct file *filp)
|
|
|
|
ctx = smp_load_acquire(&inode->i_flctx);
|
|
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
time_out_leases(inode, &dispose);
|
|
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
|
|
@@ -1620,7 +1620,7 @@ int fcntl_getlease(struct file *filp)
|
|
break;
|
|
}
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
|
|
locks_dispose_list(&dispose);
|
|
}
|
|
@@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
|
|
return -EINVAL;
|
|
}
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
time_out_leases(inode, &dispose);
|
|
error = check_conflicting_open(dentry, arg, lease->fl_flags);
|
|
@@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
|
|
lease->fl_lmops->lm_setup(lease, priv);
|
|
out:
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
locks_dispose_list(&dispose);
|
|
if (is_deleg)
|
|
inode_unlock(inode);
|
|
@@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
|
|
return error;
|
|
}
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
|
|
if (fl->fl_file == filp &&
|
|
@@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
|
|
if (victim)
|
|
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
locks_dispose_list(&dispose);
|
|
return error;
|
|
}
|
|
@@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
|
|
if (list_empty(&ctx->flc_lease))
|
|
return;
|
|
|
|
- percpu_down_read_preempt_disable(&file_rwsem);
|
|
+ percpu_down_read(&file_rwsem);
|
|
spin_lock(&ctx->flc_lock);
|
|
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
|
|
if (filp == fl->fl_file)
|
|
lease_modify(fl, F_UNLCK, &dispose);
|
|
spin_unlock(&ctx->flc_lock);
|
|
- percpu_up_read_preempt_enable(&file_rwsem);
|
|
+ percpu_up_read(&file_rwsem);
|
|
|
|
locks_dispose_list(&dispose);
|
|
}
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index 6144c6434..e19ee0da8 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -1653,7 +1653,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
|
|
{
|
|
struct dentry *dentry, *old;
|
|
struct inode *inode = dir->d_inode;
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
|
|
/* Don't go there if it's already dead */
|
|
if (unlikely(IS_DEADDIR(inode)))
|
|
@@ -3143,7 +3143,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
|
|
struct dentry *dentry;
|
|
int error, create_error = 0;
|
|
umode_t mode = op->mode;
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
|
|
if (unlikely(IS_DEADDIR(dir_inode)))
|
|
return -ENOENT;
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index 68e3777b9..f01e1ca39 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/mnt_namespace.h>
|
|
#include <linux/user_namespace.h>
|
|
#include <linux/namei.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/security.h>
|
|
#include <linux/cred.h>
|
|
#include <linux/idr.h>
|
|
@@ -328,8 +329,11 @@ int __mnt_want_write(struct vfsmount *m)
|
|
* incremented count after it has set MNT_WRITE_HOLD.
|
|
*/
|
|
smp_mb();
|
|
- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
|
|
- cpu_relax();
|
|
+ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
|
|
+ preempt_enable();
|
|
+ cpu_chill();
|
|
+ preempt_disable();
|
|
+ }
|
|
/*
|
|
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
|
|
* be set to match its requirements. So we must not load that until
|
|
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
|
|
index 98811a077..ec6ccb228 100644
|
|
--- a/fs/nfs/delegation.c
|
|
+++ b/fs/nfs/delegation.c
|
|
@@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
|
|
sp = state->owner;
|
|
/* Block nfs4_proc_unlck */
|
|
mutex_lock(&sp->so_delegreturn_mutex);
|
|
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
|
|
+ seq = read_seqbegin(&sp->so_reclaim_seqlock);
|
|
err = nfs4_open_delegation_recall(ctx, state, stateid);
|
|
if (!err)
|
|
err = nfs_delegation_claim_locks(state, stateid);
|
|
- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
|
|
+ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
|
|
err = -EAGAIN;
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
|
put_nfs_open_context(ctx);
|
|
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
|
|
index 12d4c7686..87a105915 100644
|
|
--- a/fs/nfs/dir.c
|
|
+++ b/fs/nfs/dir.c
|
|
@@ -457,7 +457,7 @@ static
|
|
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
|
|
{
|
|
struct qstr filename = QSTR_INIT(entry->name, entry->len);
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
struct dentry *dentry;
|
|
struct dentry *alias;
|
|
struct inode *dir = d_inode(parent);
|
|
@@ -1520,7 +1520,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
|
struct file *file, unsigned open_flags,
|
|
umode_t mode)
|
|
{
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
struct nfs_open_context *ctx;
|
|
struct dentry *res;
|
|
struct iattr attr = { .ia_valid = ATTR_OPEN };
|
|
@@ -1866,7 +1866,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|
|
|
trace_nfs_rmdir_enter(dir, dentry);
|
|
if (d_really_is_positive(dentry)) {
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ down(&NFS_I(d_inode(dentry))->rmdir_sem);
|
|
+#else
|
|
down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
|
+#endif
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
|
/* Ensure the VFS deletes this inode */
|
|
switch (error) {
|
|
@@ -1876,7 +1880,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|
case -ENOENT:
|
|
nfs_dentry_handle_enoent(dentry);
|
|
}
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ up(&NFS_I(d_inode(dentry))->rmdir_sem);
|
|
+#else
|
|
up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
|
+#endif
|
|
} else
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
|
trace_nfs_rmdir_exit(dir, dentry, error);
|
|
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
|
|
index 053e78d77..0441a63c7 100644
|
|
--- a/fs/nfs/inode.c
|
|
+++ b/fs/nfs/inode.c
|
|
@@ -2112,7 +2112,11 @@ static void init_once(void *foo)
|
|
atomic_long_set(&nfsi->nrequests, 0);
|
|
atomic_long_set(&nfsi->commit_info.ncommit, 0);
|
|
atomic_set(&nfsi->commit_info.rpcs_out, 0);
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ sema_init(&nfsi->rmdir_sem, 1);
|
|
+#else
|
|
init_rwsem(&nfsi->rmdir_sem);
|
|
+#endif
|
|
mutex_init(&nfsi->commit_mutex);
|
|
nfs4_init_once(nfsi);
|
|
}
|
|
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
|
|
index d22176d87..594cf289c 100644
|
|
--- a/fs/nfs/nfs4_fs.h
|
|
+++ b/fs/nfs/nfs4_fs.h
|
|
@@ -114,7 +114,7 @@ struct nfs4_state_owner {
|
|
unsigned long so_flags;
|
|
struct list_head so_states;
|
|
struct nfs_seqid_counter so_seqid;
|
|
- seqcount_t so_reclaim_seqcount;
|
|
+ seqlock_t so_reclaim_seqlock;
|
|
struct mutex so_delegreturn_mutex;
|
|
};
|
|
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index ef9e80928..2767dcc0c 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -2918,7 +2918,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
|
|
unsigned int seq;
|
|
int ret;
|
|
|
|
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
|
|
+ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
|
|
|
|
ret = _nfs4_proc_open(opendata, ctx);
|
|
if (ret != 0)
|
|
@@ -2959,7 +2959,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
|
|
|
|
if (d_inode(dentry) == state->inode) {
|
|
nfs_inode_attach_open_context(ctx);
|
|
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
|
|
+ if (read_seqretry(&sp->so_reclaim_seqlock, seq))
|
|
nfs4_schedule_stateid_recovery(server, state);
|
|
}
|
|
|
|
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
|
|
index c843917f8..707e2e06f 100644
|
|
--- a/fs/nfs/nfs4state.c
|
|
+++ b/fs/nfs/nfs4state.c
|
|
@@ -514,7 +514,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
|
|
nfs4_init_seqid_counter(&sp->so_seqid);
|
|
atomic_set(&sp->so_count, 1);
|
|
INIT_LIST_HEAD(&sp->so_lru);
|
|
- seqcount_init(&sp->so_reclaim_seqcount);
|
|
+ seqlock_init(&sp->so_reclaim_seqlock);
|
|
mutex_init(&sp->so_delegreturn_mutex);
|
|
return sp;
|
|
}
|
|
@@ -1582,8 +1582,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
|
|
* recovering after a network partition or a reboot from a
|
|
* server that doesn't support a grace period.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ write_seqlock(&sp->so_reclaim_seqlock);
|
|
+#else
|
|
+ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
|
|
+#endif
|
|
spin_lock(&sp->so_lock);
|
|
- raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
|
|
restart:
|
|
list_for_each_entry(state, &sp->so_states, open_states) {
|
|
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
|
|
@@ -1670,14 +1674,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
|
|
spin_lock(&sp->so_lock);
|
|
goto restart;
|
|
}
|
|
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
|
|
spin_unlock(&sp->so_lock);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ write_sequnlock(&sp->so_reclaim_seqlock);
|
|
+#else
|
|
+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
|
|
+#endif
|
|
return 0;
|
|
out_err:
|
|
nfs4_put_open_state(state);
|
|
- spin_lock(&sp->so_lock);
|
|
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
|
|
- spin_unlock(&sp->so_lock);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ write_sequnlock(&sp->so_reclaim_seqlock);
|
|
+#else
|
|
+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
|
|
+#endif
|
|
return status;
|
|
}
|
|
|
|
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
|
|
index fd61bf0fc..839bfa76f 100644
|
|
--- a/fs/nfs/unlink.c
|
|
+++ b/fs/nfs/unlink.c
|
|
@@ -13,7 +13,7 @@
|
|
#include <linux/sunrpc/clnt.h>
|
|
#include <linux/nfs_fs.h>
|
|
#include <linux/sched.h>
|
|
-#include <linux/wait.h>
|
|
+#include <linux/swait.h>
|
|
#include <linux/namei.h>
|
|
#include <linux/fsnotify.h>
|
|
|
|
@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
|
|
rpc_restart_call_prepare(task);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static void nfs_down_anon(struct semaphore *sema)
|
|
+{
|
|
+ down(sema);
|
|
+}
|
|
+
|
|
+static void nfs_up_anon(struct semaphore *sema)
|
|
+{
|
|
+ up(sema);
|
|
+}
|
|
+
|
|
+#else
|
|
+static void nfs_down_anon(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ down_read_non_owner(rwsem);
|
|
+}
|
|
+
|
|
+static void nfs_up_anon(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ up_read_non_owner(rwsem);
|
|
+}
|
|
+#endif
|
|
+
|
|
/**
|
|
* nfs_async_unlink_release - Release the sillydelete data.
|
|
* @task: rpc_task of the sillydelete
|
|
@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(void *calldata)
|
|
struct dentry *dentry = data->dentry;
|
|
struct super_block *sb = dentry->d_sb;
|
|
|
|
- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
|
|
+ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
|
|
d_lookup_done(dentry);
|
|
nfs_free_unlinkdata(data);
|
|
dput(dentry);
|
|
@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf
|
|
struct inode *dir = d_inode(dentry->d_parent);
|
|
struct dentry *alias;
|
|
|
|
- down_read_non_owner(&NFS_I(dir)->rmdir_sem);
|
|
+ nfs_down_anon(&NFS_I(dir)->rmdir_sem);
|
|
alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
|
|
if (IS_ERR(alias)) {
|
|
- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
|
|
+ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
|
|
return 0;
|
|
}
|
|
if (!d_in_lookup(alias)) {
|
|
@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf
|
|
ret = 0;
|
|
spin_unlock(&alias->d_lock);
|
|
dput(alias);
|
|
- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
|
|
+ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
|
|
/*
|
|
* If we'd displaced old cached devname, free it. At that
|
|
* point dentry is definitely not a root, so we won't need
|
|
@@ -183,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
|
|
goto out_free_name;
|
|
}
|
|
data->res.dir_attr = &data->dir_attr;
|
|
- init_waitqueue_head(&data->wq);
|
|
+ init_swait_queue_head(&data->wq);
|
|
|
|
status = -EBUSY;
|
|
spin_lock(&dentry->d_lock);
|
|
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
|
|
index 8946130c8..71d0b3ba7 100644
|
|
--- a/fs/ntfs/aops.c
|
|
+++ b/fs/ntfs/aops.c
|
|
@@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
"0x%llx.", (unsigned long long)bh->b_blocknr);
|
|
}
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
/*
|
|
* If none of the buffers had errors then we can set the page uptodate,
|
|
* but we first have to perform the post read mst fixups, if the
|
|
@@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
unlock_page(page);
|
|
return;
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/fs/proc/array.c b/fs/proc/array.c
|
|
index fd56e15b3..01a617652 100644
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
|
|
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
|
|
{
|
|
seq_printf(m, "Cpus_allowed:\t%*pb\n",
|
|
- cpumask_pr_args(&task->cpus_allowed));
|
|
+ cpumask_pr_args(task->cpus_ptr));
|
|
seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
|
|
- cpumask_pr_args(&task->cpus_allowed));
|
|
+ cpumask_pr_args(task->cpus_ptr));
|
|
}
|
|
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index dc9841826..e7458e0aa 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -96,6 +96,7 @@
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/share_pool.h>
|
|
#include <trace/events/oom.h>
|
|
+#include <linux/swait.h>
|
|
#include "internal.h"
|
|
#include "fd.h"
|
|
|
|
@@ -1979,7 +1980,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
|
|
|
|
child = d_hash_and_lookup(dir, &qname);
|
|
if (!child) {
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
child = d_alloc_parallel(dir, &qname, &wq);
|
|
if (IS_ERR(child))
|
|
goto end_instantiate;
|
|
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
|
|
index c95f32b83..75f500cb7 100644
|
|
--- a/fs/proc/proc_sysctl.c
|
|
+++ b/fs/proc/proc_sysctl.c
|
|
@@ -681,7 +681,7 @@ static bool proc_sys_fill_cache(struct file *file,
|
|
|
|
child = d_lookup(dir, &qname);
|
|
if (!child) {
|
|
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
|
|
child = d_alloc_parallel(dir, &qname, &wq);
|
|
if (IS_ERR(child))
|
|
return false;
|
|
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
|
|
index 23a9c28ad..6a73c4fa8 100644
|
|
--- a/fs/squashfs/decompressor_multi_percpu.c
|
|
+++ b/fs/squashfs/decompressor_multi_percpu.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/buffer_head.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include "squashfs_fs.h"
|
|
#include "squashfs_fs_sb.h"
|
|
@@ -25,6 +26,8 @@ struct squashfs_stream {
|
|
void *stream;
|
|
};
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(stream_lock);
|
|
+
|
|
void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
|
|
void *comp_opts)
|
|
{
|
|
@@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
|
|
{
|
|
struct squashfs_stream __percpu *percpu =
|
|
(struct squashfs_stream __percpu *) msblk->stream;
|
|
- struct squashfs_stream *stream = get_cpu_ptr(percpu);
|
|
- int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
|
|
- offset, length, output);
|
|
- put_cpu_ptr(stream);
|
|
+ struct squashfs_stream *stream;
|
|
+ int res;
|
|
+
|
|
+ stream = get_locked_ptr(stream_lock, percpu);
|
|
+
|
|
+ res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
|
|
+ offset, length, output);
|
|
+
|
|
+ put_locked_ptr(stream_lock, stream);
|
|
|
|
if (res < 0)
|
|
ERROR("%s decompression failed, data probably corrupt\n",
|
|
diff --git a/fs/timerfd.c b/fs/timerfd.c
|
|
index d69ad801e..f84509346 100644
|
|
--- a/fs/timerfd.c
|
|
+++ b/fs/timerfd.c
|
|
@@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, int flags,
|
|
break;
|
|
}
|
|
spin_unlock_irq(&ctx->wqh.lock);
|
|
- cpu_relax();
|
|
+
|
|
+ if (isalarm(ctx))
|
|
+ hrtimer_grab_expiry_lock(&ctx->t.alarm.timer);
|
|
+ else
|
|
+ hrtimer_grab_expiry_lock(&ctx->t.tmr);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
|
|
index 803c61f66..ee1191bfe 100644
|
|
--- a/fs/userfaultfd.c
|
|
+++ b/fs/userfaultfd.c
|
|
@@ -59,7 +59,7 @@ struct userfaultfd_ctx {
|
|
/* waitqueue head for events */
|
|
wait_queue_head_t event_wqh;
|
|
/* a refile sequence protected by fault_pending_wqh lock */
|
|
- struct seqcount refile_seq;
|
|
+ seqlock_t refile_seq;
|
|
/* pseudo fd refcounting */
|
|
atomic_t refcount;
|
|
/* userfaultfd syscall flags */
|
|
@@ -1071,7 +1071,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
|
* waitqueue could become empty if this is the
|
|
* only userfault.
|
|
*/
|
|
- write_seqcount_begin(&ctx->refile_seq);
|
|
+ write_seqlock(&ctx->refile_seq);
|
|
|
|
/*
|
|
* The fault_pending_wqh.lock prevents the uwq
|
|
@@ -1097,7 +1097,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
|
list_del(&uwq->wq.entry);
|
|
add_wait_queue(&ctx->fault_wqh, &uwq->wq);
|
|
|
|
- write_seqcount_end(&ctx->refile_seq);
|
|
+ write_sequnlock(&ctx->refile_seq);
|
|
|
|
/* careful to always initialize msg if ret == 0 */
|
|
*msg = uwq->msg;
|
|
@@ -1270,11 +1270,11 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
|
|
* sure we've userfaults to wake.
|
|
*/
|
|
do {
|
|
- seq = read_seqcount_begin(&ctx->refile_seq);
|
|
+ seq = read_seqbegin(&ctx->refile_seq);
|
|
need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
|
|
waitqueue_active(&ctx->fault_wqh);
|
|
cond_resched();
|
|
- } while (read_seqcount_retry(&ctx->refile_seq, seq));
|
|
+ } while (read_seqretry(&ctx->refile_seq, seq));
|
|
if (need_wakeup)
|
|
__wake_userfault(ctx, range);
|
|
}
|
|
@@ -1970,7 +1970,7 @@ static void init_once_userfaultfd_ctx(void *mem)
|
|
init_waitqueue_head(&ctx->fault_wqh);
|
|
init_waitqueue_head(&ctx->event_wqh);
|
|
init_waitqueue_head(&ctx->fd_wqh);
|
|
- seqcount_init(&ctx->refile_seq);
|
|
+ seqlock_init(&ctx->refile_seq);
|
|
}
|
|
|
|
SYSCALL_DEFINE1(userfaultfd, int, flags)
|
|
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
|
|
index 1817a8415..9d5d51167 100644
|
|
--- a/include/asm-generic/percpu.h
|
|
+++ b/include/asm-generic/percpu.h
|
|
@@ -5,6 +5,7 @@
|
|
#include <linux/compiler.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/percpu-defs.h>
|
|
+//#include <linux/irqflags.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
|
|
index 5435f97bc..d425ebc34 100644
|
|
--- a/include/linux/blk-cgroup.h
|
|
+++ b/include/linux/blk-cgroup.h
|
|
@@ -14,7 +14,7 @@
|
|
* Nauman Rafique <nauman@google.com>
|
|
*/
|
|
|
|
-#include <linux/cgroup.h>
|
|
+#include <linux/kthread.h>
|
|
#include <linux/percpu_counter.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/radix-tree.h>
|
|
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
|
|
index 3695a43eb..c4dfc638a 100644
|
|
--- a/include/linux/blk-mq.h
|
|
+++ b/include/linux/blk-mq.h
|
|
@@ -287,6 +287,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
|
|
}
|
|
|
|
|
|
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
|
|
int blk_mq_request_started(struct request *rq);
|
|
int blk_mq_request_completed(struct request *rq);
|
|
void blk_mq_start_request(struct request *rq);
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index c848f4205..12f11feea 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/llist.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/workqueue.h>
|
|
+#include <linux/kthread.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/backing-dev-defs.h>
|
|
#include <linux/wait.h>
|
|
@@ -158,6 +159,9 @@ enum mq_rq_state {
|
|
*/
|
|
struct request {
|
|
struct request_queue *q;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct work_struct work;
|
|
+#endif
|
|
struct blk_mq_ctx *mq_ctx;
|
|
|
|
int cpu;
|
|
@@ -682,6 +686,7 @@ struct request_queue {
|
|
#endif
|
|
struct rcu_head rcu_head;
|
|
wait_queue_head_t mq_freeze_wq;
|
|
+ struct work_struct mq_pcpu_wake;
|
|
struct percpu_ref q_usage_counter;
|
|
struct list_head all_q_node;
|
|
|
|
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
|
|
index a19519f42..40dd5ef9c 100644
|
|
--- a/include/linux/bottom_half.h
|
|
+++ b/include/linux/bottom_half.h
|
|
@@ -4,6 +4,39 @@
|
|
|
|
#include <linux/preempt.h>
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+extern void __local_bh_disable(void);
|
|
+extern void _local_bh_enable(void);
|
|
+extern void __local_bh_enable(void);
|
|
+
|
|
+static inline void local_bh_disable(void)
|
|
+{
|
|
+ __local_bh_disable();
|
|
+}
|
|
+
|
|
+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
|
|
+{
|
|
+ __local_bh_disable();
|
|
+}
|
|
+
|
|
+static inline void local_bh_enable(void)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+static inline void local_bh_enable_ip(unsigned long ip)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
|
|
#else
|
|
@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
|
|
{
|
|
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
|
|
}
|
|
+#endif
|
|
|
|
#endif /* _LINUX_BH_H */
|
|
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
|
|
index 9168fc33a..703bf3335 100644
|
|
--- a/include/linux/buffer_head.h
|
|
+++ b/include/linux/buffer_head.h
|
|
@@ -76,8 +76,50 @@ struct buffer_head {
|
|
struct address_space *b_assoc_map; /* mapping this buffer is
|
|
associated with */
|
|
atomic_t b_count; /* users using this buffer_head */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t b_uptodate_lock;
|
|
+#if IS_ENABLED(CONFIG_JBD2)
|
|
+ spinlock_t b_state_lock;
|
|
+ spinlock_t b_journal_head_lock;
|
|
+#endif
|
|
+#endif
|
|
};
|
|
|
|
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ local_irq_save(flags);
|
|
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
|
|
+#else
|
|
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+ return flags;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
|
|
+{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
|
|
+ local_irq_restore(flags);
|
|
+#else
|
|
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void buffer_head_init_locks(struct buffer_head *bh)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spin_lock_init(&bh->b_uptodate_lock);
|
|
+#if IS_ENABLED(CONFIG_JBD2)
|
|
+ spin_lock_init(&bh->b_state_lock);
|
|
+ spin_lock_init(&bh->b_journal_head_lock);
|
|
+#endif
|
|
+#endif
|
|
+}
|
|
+
|
|
/*
|
|
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
|
|
* and buffer_foo() functions.
|
|
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
|
|
index cad1a82ca..3febaa98d 100644
|
|
--- a/include/linux/cgroup-defs.h
|
|
+++ b/include/linux/cgroup-defs.h
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/u64_stats_sync.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/bpf-cgroup.h>
|
|
+#include <linux/swork.h>
|
|
|
|
#ifdef CONFIG_CGROUPS
|
|
|
|
@@ -157,6 +158,7 @@ struct cgroup_subsys_state {
|
|
|
|
/* percpu_ref killing and RCU release */
|
|
struct work_struct destroy_work;
|
|
+ struct swork_event destroy_swork;
|
|
struct rcu_work destroy_rwork;
|
|
|
|
/*
|
|
diff --git a/include/linux/completion.h b/include/linux/completion.h
|
|
index 519e94915..bf8e77001 100644
|
|
--- a/include/linux/completion.h
|
|
+++ b/include/linux/completion.h
|
|
@@ -9,7 +9,7 @@
|
|
* See kernel/sched/completion.c for details.
|
|
*/
|
|
|
|
-#include <linux/wait.h>
|
|
+#include <linux/swait.h>
|
|
|
|
/*
|
|
* struct completion - structure used to maintain state for a "completion"
|
|
@@ -25,7 +25,7 @@
|
|
*/
|
|
struct completion {
|
|
unsigned int done;
|
|
- wait_queue_head_t wait;
|
|
+ struct swait_queue_head wait;
|
|
};
|
|
|
|
#define init_completion_map(x, m) __init_completion(x)
|
|
@@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {}
|
|
static inline void complete_release(struct completion *x) {}
|
|
|
|
#define COMPLETION_INITIALIZER(work) \
|
|
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
|
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
|
|
|
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
|
|
(*({ init_completion_map(&(work), &(map)); &(work); }))
|
|
@@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {}
|
|
static inline void __init_completion(struct completion *x)
|
|
{
|
|
x->done = 0;
|
|
- init_waitqueue_head(&x->wait);
|
|
+ init_swait_queue_head(&x->wait);
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index 9af7ab2c2..5ac53b297 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -137,6 +137,7 @@ static inline int cpus_read_trylock(void) { return true; }
|
|
static inline void lockdep_assert_cpus_held(void) { }
|
|
static inline void cpu_hotplug_disable(void) { }
|
|
static inline void cpu_hotplug_enable(void) { }
|
|
+
|
|
#endif /* !CONFIG_HOTPLUG_CPU */
|
|
|
|
/* Wrappers which go away once all code is converted */
|
|
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
|
|
index d4d030340..00afcb4b0 100644
|
|
--- a/include/linux/dcache.h
|
|
+++ b/include/linux/dcache.h
|
|
@@ -107,7 +107,7 @@ struct dentry {
|
|
|
|
union {
|
|
struct list_head d_lru; /* LRU list */
|
|
- wait_queue_head_t *d_wait; /* in-lookup ones only */
|
|
+ struct swait_queue_head *d_wait; /* in-lookup ones only */
|
|
};
|
|
struct list_head d_child; /* child of parent list */
|
|
struct list_head d_subdirs; /* our children */
|
|
@@ -249,7 +249,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
|
|
extern struct dentry * d_alloc_anon(struct super_block *);
|
|
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
|
|
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
|
|
- wait_queue_head_t *);
|
|
+ struct swait_queue_head *);
|
|
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
|
|
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
|
|
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
|
|
diff --git a/include/linux/delay.h b/include/linux/delay.h
|
|
index b78bab439..7c4bc414a 100644
|
|
--- a/include/linux/delay.h
|
|
+++ b/include/linux/delay.h
|
|
@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int seconds)
|
|
msleep(seconds * 1000);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void cpu_chill(void);
|
|
+#else
|
|
+# define cpu_chill() cpu_relax()
|
|
+#endif
|
|
+
|
|
#endif /* defined(_LINUX_DELAY_H) */
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index 7dc4508d1..30fcf55c2 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -714,7 +714,7 @@ struct inode {
|
|
struct block_device *i_bdev;
|
|
struct cdev *i_cdev;
|
|
char *i_link;
|
|
- unsigned i_dir_seq;
|
|
+ unsigned __i_dir_seq;
|
|
};
|
|
|
|
__u32 i_generation;
|
|
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
|
|
index 84b90a79d..87a9330ea 100644
|
|
--- a/include/linux/fscache.h
|
|
+++ b/include/linux/fscache.h
|
|
@@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
|
|
extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
|
|
extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
|
|
bool (*)(void *), void *);
|
|
+extern void fscache_cookie_init(void);
|
|
|
|
/**
|
|
* fscache_register_netfs - Register a filesystem as desiring caching services
|
|
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
|
|
index 1fed918bb..c3d13515e 100644
|
|
--- a/include/linux/highmem.h
|
|
+++ b/include/linux/highmem.h
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/mm.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/hardirq.h>
|
|
+#include <linux/sched.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
@@ -66,7 +67,7 @@ static inline void kunmap(struct page *page)
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
- preempt_disable();
|
|
+ preempt_disable_nort();
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
@@ -75,7 +76,7 @@ static inline void *kmap_atomic(struct page *page)
|
|
static inline void __kunmap_atomic(void *addr)
|
|
{
|
|
pagefault_enable();
|
|
- preempt_enable();
|
|
+ preempt_enable_nort();
|
|
}
|
|
|
|
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
|
@@ -87,32 +88,51 @@ static inline void __kunmap_atomic(void *addr)
|
|
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
DECLARE_PER_CPU(int, __kmap_atomic_idx);
|
|
+#endif
|
|
|
|
static inline int kmap_atomic_idx_push(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
|
|
|
|
-#ifdef CONFIG_DEBUG_HIGHMEM
|
|
+# ifdef CONFIG_DEBUG_HIGHMEM
|
|
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
|
BUG_ON(idx >= KM_TYPE_NR);
|
|
-#endif
|
|
+# endif
|
|
return idx;
|
|
+#else
|
|
+ current->kmap_idx++;
|
|
+ BUG_ON(current->kmap_idx > KM_TYPE_NR);
|
|
+ return current->kmap_idx - 1;
|
|
+#endif
|
|
}
|
|
|
|
static inline int kmap_atomic_idx(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
return __this_cpu_read(__kmap_atomic_idx) - 1;
|
|
+#else
|
|
+ return current->kmap_idx - 1;
|
|
+#endif
|
|
}
|
|
|
|
static inline void kmap_atomic_idx_pop(void)
|
|
{
|
|
-#ifdef CONFIG_DEBUG_HIGHMEM
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# ifdef CONFIG_DEBUG_HIGHMEM
|
|
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
|
|
|
|
BUG_ON(idx < 0);
|
|
-#else
|
|
+# else
|
|
__this_cpu_dec(__kmap_atomic_idx);
|
|
+# endif
|
|
+#else
|
|
+ current->kmap_idx--;
|
|
+# ifdef CONFIG_DEBUG_HIGHMEM
|
|
+ BUG_ON(current->kmap_idx < 0);
|
|
+# endif
|
|
#endif
|
|
}
|
|
|
|
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
|
|
index 542b4fa2c..aee31b1f0 100644
|
|
--- a/include/linux/hrtimer.h
|
|
+++ b/include/linux/hrtimer.h
|
|
@@ -41,6 +41,7 @@ enum hrtimer_mode {
|
|
HRTIMER_MODE_REL = 0x01,
|
|
HRTIMER_MODE_PINNED = 0x02,
|
|
HRTIMER_MODE_SOFT = 0x04,
|
|
+ HRTIMER_MODE_HARD = 0x08,
|
|
|
|
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
|
|
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
|
|
@@ -51,6 +52,11 @@ enum hrtimer_mode {
|
|
HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
|
|
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
|
|
|
|
+ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
|
|
+ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
|
|
+
|
|
+ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
|
|
+ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
|
|
};
|
|
|
|
/*
|
|
@@ -186,6 +192,8 @@ enum hrtimer_base_type {
|
|
* @nr_retries: Total number of hrtimer interrupt retries
|
|
* @nr_hangs: Total number of hrtimer interrupt hangs
|
|
* @max_hang_time: Maximum time spent in hrtimer_interrupt
|
|
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
|
|
+ * expired
|
|
* @expires_next: absolute time of the next event, is required for remote
|
|
* hrtimer enqueue; it is the total first expiry time (hard
|
|
* and soft hrtimer are taken into account)
|
|
@@ -213,6 +221,7 @@ struct hrtimer_cpu_base {
|
|
unsigned short nr_hangs;
|
|
unsigned int max_hang_time;
|
|
#endif
|
|
+ spinlock_t softirq_expiry_lock;
|
|
ktime_t expires_next;
|
|
struct hrtimer *next_timer;
|
|
ktime_t softirq_expires_next;
|
|
@@ -364,10 +373,17 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
|
/* Initialize timers: */
|
|
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
|
|
enum hrtimer_mode mode);
|
|
+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
|
|
+ enum hrtimer_mode mode,
|
|
+ struct task_struct *task);
|
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
|
extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
|
|
enum hrtimer_mode mode);
|
|
+extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
|
|
+ clockid_t clock_id,
|
|
+ enum hrtimer_mode mode,
|
|
+ struct task_struct *task);
|
|
|
|
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
|
|
#else
|
|
@@ -377,6 +393,15 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer,
|
|
{
|
|
hrtimer_init(timer, which_clock, mode);
|
|
}
|
|
+
|
|
+static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
|
|
+ clockid_t clock_id,
|
|
+ enum hrtimer_mode mode,
|
|
+ struct task_struct *task)
|
|
+{
|
|
+ hrtimer_init_sleeper(sl, clock_id, mode, task);
|
|
+}
|
|
+
|
|
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
|
|
#endif
|
|
|
|
@@ -400,6 +425,7 @@ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
|
|
|
|
extern int hrtimer_cancel(struct hrtimer *timer);
|
|
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
|
|
+extern void hrtimer_grab_expiry_lock(const struct hrtimer *timer);
|
|
|
|
static inline void hrtimer_start_expires(struct hrtimer *timer,
|
|
enum hrtimer_mode mode)
|
|
@@ -486,9 +512,6 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
|
|
const enum hrtimer_mode mode,
|
|
const clockid_t clockid);
|
|
|
|
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
|
|
- struct task_struct *tsk);
|
|
-
|
|
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
|
|
const enum hrtimer_mode mode);
|
|
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
|
|
diff --git a/include/linux/idr.h b/include/linux/idr.h
|
|
index b6c6151c7..81c9df5c0 100644
|
|
--- a/include/linux/idr.h
|
|
+++ b/include/linux/idr.h
|
|
@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr)
|
|
* Each idr_preload() should be matched with an invocation of this
|
|
* function. See idr_preload() for details.
|
|
*/
|
|
-static inline void idr_preload_end(void)
|
|
-{
|
|
- preempt_enable();
|
|
-}
|
|
+void idr_preload_end(void);
|
|
|
|
/**
|
|
* idr_for_each_entry() - Iterate over an IDR's elements of a given type.
|
|
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
|
|
index 97de36a38..d05ef847f 100644
|
|
--- a/include/linux/interrupt.h
|
|
+++ b/include/linux/interrupt.h
|
|
@@ -61,6 +61,7 @@
|
|
* interrupt handler after suspending interrupts. For system
|
|
* wakeup devices users need to implement wakeup detection in
|
|
* their interrupt handlers.
|
|
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
|
|
*/
|
|
#define IRQF_SHARED 0x00000080
|
|
#define IRQF_PROBE_SHARED 0x00000100
|
|
@@ -74,6 +75,7 @@
|
|
#define IRQF_NO_THREAD 0x00010000
|
|
#define IRQF_EARLY_RESUME 0x00020000
|
|
#define IRQF_COND_SUSPEND 0x00040000
|
|
+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
|
|
|
|
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
|
|
|
|
@@ -447,7 +449,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
|
|
bool state);
|
|
|
|
#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
+# ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define force_irqthreads (true)
|
|
+# else
|
|
extern bool force_irqthreads;
|
|
+# endif
|
|
#else
|
|
#define force_irqthreads (0)
|
|
#endif
|
|
@@ -513,9 +519,10 @@ struct softirq_action
|
|
void (*action)(struct softirq_action *);
|
|
};
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
asmlinkage void do_softirq(void);
|
|
asmlinkage void __do_softirq(void);
|
|
-
|
|
+static inline void thread_do_softirq(void) { do_softirq(); }
|
|
#ifdef __ARCH_HAS_DO_SOFTIRQ
|
|
void do_softirq_own_stack(void);
|
|
#else
|
|
@@ -524,13 +531,25 @@ static inline void do_softirq_own_stack(void)
|
|
__do_softirq();
|
|
}
|
|
#endif
|
|
+#else
|
|
+extern void thread_do_softirq(void);
|
|
+#endif
|
|
|
|
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
|
extern void softirq_init(void);
|
|
extern void __raise_softirq_irqoff(unsigned int nr);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
|
|
+#else
|
|
+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
|
|
+{
|
|
+ __raise_softirq_irqoff(nr);
|
|
+}
|
|
+#endif
|
|
|
|
extern void raise_softirq_irqoff(unsigned int nr);
|
|
extern void raise_softirq(unsigned int nr);
|
|
+extern void softirq_check_pending_idle(void);
|
|
|
|
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
|
|
|
@@ -552,8 +571,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
|
|
to be executed on some cpu at least once after this.
|
|
* If the tasklet is already scheduled, but its execution is still not
|
|
started, it will be executed only once.
|
|
- * If this tasklet is already running on another CPU (or schedule is called
|
|
- from tasklet itself), it is rescheduled for later.
|
|
+ * If this tasklet is already running on another CPU, it is rescheduled
|
|
+ for later.
|
|
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
|
|
* Tasklet is strictly serialized wrt itself, but not
|
|
wrt another tasklets. If client needs some intertask synchronization,
|
|
he makes it with spinlocks.
|
|
@@ -578,27 +598,39 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
|
|
enum
|
|
{
|
|
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
|
|
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
|
|
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
|
|
+ TASKLET_STATE_PENDING, /* Tasklet is pending */
|
|
+ TASKLET_STATE_CHAINED /* Tasklet is chained */
|
|
};
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
|
|
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
|
|
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
|
|
+#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
|
|
+#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
|
|
+
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
static inline int tasklet_trylock(struct tasklet_struct *t)
|
|
{
|
|
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
|
|
}
|
|
|
|
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
|
|
+{
|
|
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
|
|
+}
|
|
+
|
|
static inline void tasklet_unlock(struct tasklet_struct *t)
|
|
{
|
|
smp_mb__before_atomic();
|
|
clear_bit(TASKLET_STATE_RUN, &(t)->state);
|
|
}
|
|
|
|
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
|
|
-{
|
|
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
|
|
-}
|
|
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
|
|
+
|
|
#else
|
|
#define tasklet_trylock(t) 1
|
|
+#define tasklet_tryunlock(t) 1
|
|
#define tasklet_unlock_wait(t) do { } while (0)
|
|
#define tasklet_unlock(t) do { } while (0)
|
|
#endif
|
|
@@ -632,17 +664,18 @@ static inline void tasklet_disable(struct tasklet_struct *t)
|
|
smp_mb();
|
|
}
|
|
|
|
-static inline void tasklet_enable(struct tasklet_struct *t)
|
|
-{
|
|
- smp_mb__before_atomic();
|
|
- atomic_dec(&t->count);
|
|
-}
|
|
-
|
|
+extern void tasklet_enable(struct tasklet_struct *t);
|
|
extern void tasklet_kill(struct tasklet_struct *t);
|
|
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
|
|
extern void tasklet_init(struct tasklet_struct *t,
|
|
void (*func)(unsigned long), unsigned long data);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void softirq_early_init(void);
|
|
+#else
|
|
+static inline void softirq_early_init(void) { }
|
|
+#endif
|
|
+
|
|
struct tasklet_hrtimer {
|
|
struct hrtimer timer;
|
|
struct tasklet_struct tasklet;
|
|
diff --git a/include/linux/irq.h b/include/linux/irq.h
|
|
index ff291f273..ea25f5c14 100644
|
|
--- a/include/linux/irq.h
|
|
+++ b/include/linux/irq.h
|
|
@@ -69,6 +69,7 @@ enum irqchip_irq_state;
|
|
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
|
|
* it from the spurious interrupt detection
|
|
* mechanism and from core side polling.
|
|
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
|
|
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
|
|
*/
|
|
enum {
|
|
@@ -96,13 +97,14 @@ enum {
|
|
IRQ_PER_CPU_DEVID = (1 << 17),
|
|
IRQ_IS_POLLED = (1 << 18),
|
|
IRQ_DISABLE_UNLAZY = (1 << 19),
|
|
+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
|
|
};
|
|
|
|
#define IRQF_MODIFY_MASK \
|
|
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
|
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
|
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
|
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
|
|
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
|
|
|
|
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
|
|
|
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
|
|
index b11fcdfd0..0c5055998 100644
|
|
--- a/include/linux/irq_work.h
|
|
+++ b/include/linux/irq_work.h
|
|
@@ -18,6 +18,8 @@
|
|
|
|
/* Doesn't want IPI, wait for tick: */
|
|
#define IRQ_WORK_LAZY BIT(2)
|
|
+/* Run hard IRQ context, even on RT */
|
|
+#define IRQ_WORK_HARD_IRQ BIT(3)
|
|
|
|
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
|
|
|
|
@@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
|
|
static inline void irq_work_run(void) { }
|
|
#endif
|
|
|
|
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
|
+void irq_work_tick_soft(void);
|
|
+#else
|
|
+static inline void irq_work_tick_soft(void) { }
|
|
+#endif
|
|
+
|
|
#endif /* _LINUX_IRQ_WORK_H */
|
|
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
|
|
index 8140d8ca5..cbb4fff3a 100644
|
|
--- a/include/linux/irqdesc.h
|
|
+++ b/include/linux/irqdesc.h
|
|
@@ -71,6 +71,7 @@ struct irq_desc {
|
|
unsigned int irqs_unhandled;
|
|
atomic_t threads_handled;
|
|
int threads_handled_last;
|
|
+ u64 random_ip;
|
|
raw_spinlock_t lock;
|
|
struct cpumask *percpu_enabled;
|
|
const struct cpumask *percpu_affinity;
|
|
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
|
|
index 21619c92c..b20eeb25e 100644
|
|
--- a/include/linux/irqflags.h
|
|
+++ b/include/linux/irqflags.h
|
|
@@ -43,14 +43,6 @@ do { \
|
|
do { \
|
|
current->hardirq_context--; \
|
|
} while (0)
|
|
-# define lockdep_softirq_enter() \
|
|
-do { \
|
|
- current->softirq_context++; \
|
|
-} while (0)
|
|
-# define lockdep_softirq_exit() \
|
|
-do { \
|
|
- current->softirq_context--; \
|
|
-} while (0)
|
|
#else
|
|
# define trace_hardirqs_on() do { } while (0)
|
|
# define trace_hardirqs_off() do { } while (0)
|
|
@@ -64,6 +56,21 @@ do { \
|
|
# define lockdep_softirq_exit() do { } while (0)
|
|
#endif
|
|
|
|
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
+# define lockdep_softirq_enter() \
|
|
+do { \
|
|
+ current->softirq_context++; \
|
|
+} while (0)
|
|
+# define lockdep_softirq_exit() \
|
|
+do { \
|
|
+ current->softirq_context--; \
|
|
+} while (0)
|
|
+
|
|
+#else
|
|
+# define lockdep_softirq_enter() do { } while (0)
|
|
+# define lockdep_softirq_exit() do { } while (0)
|
|
+#endif
|
|
+
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || \
|
|
defined(CONFIG_PREEMPT_TRACER)
|
|
extern void stop_critical_timings(void);
|
|
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
|
|
index 7d1bfa4cf..ccef2e110 100644
|
|
--- a/include/linux/jbd2.h
|
|
+++ b/include/linux/jbd2.h
|
|
@@ -349,32 +349,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
|
|
|
|
static inline void jbd_lock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ spin_lock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
return bit_spin_trylock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ return spin_trylock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
return bit_spin_is_locked(BH_State, &bh->b_state);
|
|
+#else
|
|
+ return spin_is_locked(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_unlock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ spin_unlock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(BH_JournalHead, &bh->b_state);
|
|
+#else
|
|
+ spin_lock(&bh->b_journal_head_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_unlock(BH_JournalHead, &bh->b_state);
|
|
+#else
|
|
+ spin_unlock(&bh->b_journal_head_lock);
|
|
+#endif
|
|
}
|
|
|
|
#define J_ASSERT(assert) BUG_ON(!(assert))
|
|
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
|
|
index 68bd88223..e033b25b0 100644
|
|
--- a/include/linux/kdb.h
|
|
+++ b/include/linux/kdb.h
|
|
@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
|
|
extern __printf(1, 2) int kdb_printf(const char *, ...);
|
|
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
|
|
|
|
+#define in_kdb_printk() (kdb_trap_printk)
|
|
extern void kdb_init(int level);
|
|
|
|
/* Access to kdb specific polling devices */
|
|
@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
|
|
extern int kdb_unregister(char *);
|
|
#else /* ! CONFIG_KGDB_KDB */
|
|
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
|
|
+#define in_kdb_printk() (0)
|
|
static inline void kdb_init(int level) {}
|
|
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
|
|
char *help, short minlen) { return 0; }
|
|
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
|
|
index 06c738d39..f36ee1f1a 100644
|
|
--- a/include/linux/kernel.h
|
|
+++ b/include/linux/kernel.h
|
|
@@ -260,6 +260,9 @@ extern int _cond_resched(void);
|
|
*/
|
|
# define might_sleep() \
|
|
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
|
|
+
|
|
+# define might_sleep_no_state_check() \
|
|
+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
|
|
# define sched_annotate_sleep() (current->task_state_change = 0)
|
|
#else
|
|
static inline void ___might_sleep(const char *file, int line,
|
|
@@ -267,6 +270,7 @@ extern int _cond_resched(void);
|
|
static inline void __might_sleep(const char *file, int line,
|
|
int preempt_offset) { }
|
|
# define might_sleep() do { might_resched(); } while (0)
|
|
+# define might_sleep_no_state_check() do { might_resched(); } while (0)
|
|
# define sched_annotate_sleep() do { } while (0)
|
|
#endif
|
|
|
|
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
|
|
index 8613e4981..a1b0b661e 100644
|
|
--- a/include/linux/kthread.h
|
|
+++ b/include/linux/kthread.h
|
|
@@ -89,7 +89,7 @@ enum {
|
|
|
|
struct kthread_worker {
|
|
unsigned int flags;
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
struct list_head work_list;
|
|
struct list_head delayed_work_list;
|
|
struct task_struct *task;
|
|
@@ -110,7 +110,7 @@ struct kthread_delayed_work {
|
|
};
|
|
|
|
#define KTHREAD_WORKER_INIT(worker) { \
|
|
- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
|
|
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
|
|
.work_list = LIST_HEAD_INIT((worker).work_list), \
|
|
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
|
|
}
|
|
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
|
|
index 3fc2cc57b..0b5de7d9f 100644
|
|
--- a/include/linux/list_bl.h
|
|
+++ b/include/linux/list_bl.h
|
|
@@ -3,6 +3,7 @@
|
|
#define _LINUX_LIST_BL_H
|
|
|
|
#include <linux/list.h>
|
|
+#include <linux/spinlock.h>
|
|
#include <linux/bit_spinlock.h>
|
|
|
|
/*
|
|
@@ -33,13 +34,24 @@
|
|
|
|
struct hlist_bl_head {
|
|
struct hlist_bl_node *first;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ raw_spinlock_t lock;
|
|
+#endif
|
|
};
|
|
|
|
struct hlist_bl_node {
|
|
struct hlist_bl_node *next, **pprev;
|
|
};
|
|
-#define INIT_HLIST_BL_HEAD(ptr) \
|
|
- ((ptr)->first = NULL)
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+#define INIT_HLIST_BL_HEAD(h) \
|
|
+do { \
|
|
+ (h)->first = NULL; \
|
|
+ raw_spin_lock_init(&(h)->lock); \
|
|
+} while (0)
|
|
+#else
|
|
+#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
|
|
+#endif
|
|
|
|
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
|
{
|
|
@@ -119,12 +131,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
|
|
|
|
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(0, (unsigned long *)b);
|
|
+#else
|
|
+ raw_spin_lock(&b->lock);
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
+ __set_bit(0, (unsigned long *)b);
|
|
+#endif
|
|
+#endif
|
|
}
|
|
|
|
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
__bit_spin_unlock(0, (unsigned long *)b);
|
|
+#else
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
+ __clear_bit(0, (unsigned long *)b);
|
|
+#endif
|
|
+ raw_spin_unlock(&b->lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
|
|
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
|
|
new file mode 100644
|
|
index 000000000..81c89d877
|
|
--- /dev/null
|
|
+++ b/include/linux/locallock.h
|
|
@@ -0,0 +1,282 @@
|
|
+#ifndef _LINUX_LOCALLOCK_H
|
|
+#define _LINUX_LOCALLOCK_H
|
|
+
|
|
+#include <linux/percpu.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <asm/current.h>
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+# define LL_WARN(cond) WARN_ON(cond)
|
|
+#else
|
|
+# define LL_WARN(cond) do { } while (0)
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * per cpu lock based substitute for local_irq_*()
|
|
+ */
|
|
+struct local_irq_lock {
|
|
+ spinlock_t lock;
|
|
+ struct task_struct *owner;
|
|
+ int nestcnt;
|
|
+ unsigned long flags;
|
|
+};
|
|
+
|
|
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
|
|
+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
|
|
+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
|
|
+
|
|
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
|
|
+ DECLARE_PER_CPU(struct local_irq_lock, lvar)
|
|
+
|
|
+#define local_irq_lock_init(lvar) \
|
|
+ do { \
|
|
+ int __cpu; \
|
|
+ for_each_possible_cpu(__cpu) \
|
|
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
|
|
+ } while (0)
|
|
+
|
|
+static inline void __local_lock(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current) {
|
|
+ spin_lock(&lv->lock);
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ }
|
|
+ lv->nestcnt++;
|
|
+}
|
|
+
|
|
+#define local_lock(lvar) \
|
|
+ do { __local_lock(&get_local_var(lvar)); } while (0)
|
|
+
|
|
+#define local_lock_on(lvar, cpu) \
|
|
+ do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
|
|
+
|
|
+static inline int __local_trylock(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current && spin_trylock(&lv->lock)) {
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ lv->nestcnt = 1;
|
|
+ return 1;
|
|
+ } else if (lv->owner == current) {
|
|
+ lv->nestcnt++;
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define local_trylock(lvar) \
|
|
+ ({ \
|
|
+ int __locked; \
|
|
+ __locked = __local_trylock(&get_local_var(lvar)); \
|
|
+ if (!__locked) \
|
|
+ put_local_var(lvar); \
|
|
+ __locked; \
|
|
+ })
|
|
+
|
|
+static inline void __local_unlock(struct local_irq_lock *lv)
|
|
+{
|
|
+ LL_WARN(lv->nestcnt == 0);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ if (--lv->nestcnt)
|
|
+ return;
|
|
+
|
|
+ lv->owner = NULL;
|
|
+ spin_unlock(&lv->lock);
|
|
+}
|
|
+
|
|
+#define local_unlock(lvar) \
|
|
+ do { \
|
|
+ __local_unlock(this_cpu_ptr(&lvar)); \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_unlock_on(lvar, cpu) \
|
|
+ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
|
|
+
|
|
+static inline void __local_lock_irq(struct local_irq_lock *lv)
|
|
+{
|
|
+ spin_lock_irqsave(&lv->lock, lv->flags);
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ lv->nestcnt = 1;
|
|
+}
|
|
+
|
|
+#define local_lock_irq(lvar) \
|
|
+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
|
|
+
|
|
+#define local_lock_irq_on(lvar, cpu) \
|
|
+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
|
|
+
|
|
+static inline void __local_unlock_irq(struct local_irq_lock *lv)
|
|
+{
|
|
+ LL_WARN(!lv->nestcnt);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ lv->owner = NULL;
|
|
+ lv->nestcnt = 0;
|
|
+ spin_unlock_irq(&lv->lock);
|
|
+}
|
|
+
|
|
+#define local_unlock_irq(lvar) \
|
|
+ do { \
|
|
+ __local_unlock_irq(this_cpu_ptr(&lvar)); \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_unlock_irq_on(lvar, cpu) \
|
|
+ do { \
|
|
+ __local_unlock_irq(&per_cpu(lvar, cpu)); \
|
|
+ } while (0)
|
|
+
|
|
+static inline int __local_lock_irqsave(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current) {
|
|
+ __local_lock_irq(lv);
|
|
+ return 0;
|
|
+ } else {
|
|
+ lv->nestcnt++;
|
|
+ return 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+#define local_lock_irqsave(lvar, _flags) \
|
|
+ do { \
|
|
+ if (__local_lock_irqsave(&get_local_var(lvar))) \
|
|
+ put_local_var(lvar); \
|
|
+ _flags = __this_cpu_read(lvar.flags); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_lock_irqsave_on(lvar, _flags, cpu) \
|
|
+ do { \
|
|
+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
|
|
+ _flags = per_cpu(lvar, cpu).flags; \
|
|
+ } while (0)
|
|
+
|
|
+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ LL_WARN(!lv->nestcnt);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ if (--lv->nestcnt)
|
|
+ return 0;
|
|
+
|
|
+ lv->owner = NULL;
|
|
+ spin_unlock_irqrestore(&lv->lock, lv->flags);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#define local_unlock_irqrestore(lvar, flags) \
|
|
+ do { \
|
|
+ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_unlock_irqrestore_on(lvar, flags, cpu) \
|
|
+ do { \
|
|
+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_trylock_irq(lvar, lock) \
|
|
+ ({ \
|
|
+ int __locked; \
|
|
+ local_lock_irq(lvar); \
|
|
+ __locked = spin_trylock(lock); \
|
|
+ if (!__locked) \
|
|
+ local_unlock_irq(lvar); \
|
|
+ __locked; \
|
|
+ })
|
|
+
|
|
+#define local_spin_lock_irq(lvar, lock) \
|
|
+ do { \
|
|
+ local_lock_irq(lvar); \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_unlock_irq(lvar, lock) \
|
|
+ do { \
|
|
+ spin_unlock(lock); \
|
|
+ local_unlock_irq(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_lock_irqsave(lvar, lock, flags) \
|
|
+ do { \
|
|
+ local_lock_irqsave(lvar, flags); \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
|
|
+ do { \
|
|
+ spin_unlock(lock); \
|
|
+ local_unlock_irqrestore(lvar, flags); \
|
|
+ } while (0)
|
|
+
|
|
+#define get_locked_var(lvar, var) \
|
|
+ (*({ \
|
|
+ local_lock(lvar); \
|
|
+ this_cpu_ptr(&var); \
|
|
+ }))
|
|
+
|
|
+#define put_locked_var(lvar, var) local_unlock(lvar);
|
|
+
|
|
+#define get_locked_ptr(lvar, var) \
|
|
+ ({ \
|
|
+ local_lock(lvar); \
|
|
+ this_cpu_ptr(var); \
|
|
+ })
|
|
+
|
|
+#define put_locked_ptr(lvar, var) local_unlock(lvar);
|
|
+
|
|
+#define local_lock_cpu(lvar) \
|
|
+ ({ \
|
|
+ local_lock(lvar); \
|
|
+ smp_processor_id(); \
|
|
+ })
|
|
+
|
|
+#define local_unlock_cpu(lvar) local_unlock(lvar)
|
|
+
|
|
+#else /* PREEMPT_RT_BASE */
|
|
+
|
|
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
|
|
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
|
|
+
|
|
+static inline void local_irq_lock_init(int lvar) { }
|
|
+
|
|
+#define local_trylock(lvar) \
|
|
+ ({ \
|
|
+ preempt_disable(); \
|
|
+ 1; \
|
|
+ })
|
|
+
|
|
+#define local_lock(lvar) preempt_disable()
|
|
+#define local_unlock(lvar) preempt_enable()
|
|
+#define local_lock_irq(lvar) local_irq_disable()
|
|
+#define local_lock_irq_on(lvar, cpu) local_irq_disable()
|
|
+#define local_unlock_irq(lvar) local_irq_enable()
|
|
+#define local_unlock_irq_on(lvar, cpu) local_irq_enable()
|
|
+#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
|
|
+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
|
|
+
|
|
+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
|
|
+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
|
|
+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
|
|
+#define local_spin_lock_irqsave(lvar, lock, flags) \
|
|
+ spin_lock_irqsave(lock, flags)
|
|
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
|
|
+ spin_unlock_irqrestore(lock, flags)
|
|
+
|
|
+#define get_locked_var(lvar, var) get_cpu_var(var)
|
|
+#define put_locked_var(lvar, var) put_cpu_var(var)
|
|
+#define get_locked_ptr(lvar, var) get_cpu_ptr(var)
|
|
+#define put_locked_ptr(lvar, var) put_cpu_ptr(var)
|
|
+
|
|
+#define local_lock_cpu(lvar) get_cpu()
|
|
+#define local_unlock_cpu(lvar) put_cpu()
|
|
+
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
|
|
index f2142a5ea..d0ff067ec 100644
|
|
--- a/include/linux/mm_types.h
|
|
+++ b/include/linux/mm_types.h
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/uprobes.h>
|
|
+#include <linux/rcupdate.h>
|
|
#include <linux/page-flags-layout.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
@@ -503,6 +504,9 @@ struct mm_struct {
|
|
bool tlb_flush_batched;
|
|
#endif
|
|
struct uprobes_state uprobes_state;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct rcu_head delayed_drop;
|
|
+#endif
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
atomic_long_t hugetlb_usage;
|
|
#endif
|
|
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
|
|
index 8f7cdf83f..6aa217c6e 100644
|
|
--- a/include/linux/mutex.h
|
|
+++ b/include/linux/mutex.h
|
|
@@ -22,6 +22,17 @@
|
|
|
|
struct ww_acquire_ctx;
|
|
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
|
+ , .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/mutex_rt.h>
|
|
+#else
|
|
+
|
|
/*
|
|
* Simple, straightforward mutexes with strict semantics:
|
|
*
|
|
@@ -118,13 +129,6 @@ do { \
|
|
__mutex_init((mutex), #mutex, &__key); \
|
|
} while (0)
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
|
- , .dep_map = { .name = #lockname }
|
|
-#else
|
|
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
|
|
-#endif
|
|
-
|
|
#define __MUTEX_INITIALIZER(lockname) \
|
|
{ .owner = ATOMIC_LONG_INIT(0) \
|
|
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
|
@@ -229,4 +233,6 @@ mutex_trylock_recursive(struct mutex *lock)
|
|
return mutex_trylock(lock);
|
|
}
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
#endif /* __LINUX_MUTEX_H */
|
|
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
|
|
new file mode 100644
|
|
index 000000000..3fcb5edb1
|
|
--- /dev/null
|
|
+++ b/include/linux/mutex_rt.h
|
|
@@ -0,0 +1,130 @@
|
|
+#ifndef __LINUX_MUTEX_RT_H
|
|
+#define __LINUX_MUTEX_RT_H
|
|
+
|
|
+#ifndef __LINUX_MUTEX_H
|
|
+#error "Please include mutex.h"
|
|
+#endif
|
|
+
|
|
+#include <linux/rtmutex.h>
|
|
+
|
|
+/* FIXME: Just for __lockfunc */
|
|
+#include <linux/spinlock.h>
|
|
+
|
|
+struct mutex {
|
|
+ struct rt_mutex lock;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#define __MUTEX_INITIALIZER(mutexname) \
|
|
+ { \
|
|
+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
|
|
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
|
|
+ }
|
|
+
|
|
+#define DEFINE_MUTEX(mutexname) \
|
|
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
|
|
+
|
|
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
|
|
+extern void __lockfunc _mutex_lock(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_lock_io(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass);
|
|
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
|
|
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
|
|
+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
|
+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
|
|
+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
|
|
+extern int __lockfunc _mutex_trylock(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_unlock(struct mutex *lock);
|
|
+
|
|
+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
|
|
+#define mutex_lock(l) _mutex_lock(l)
|
|
+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
|
|
+#define mutex_lock_killable(l) _mutex_lock_killable(l)
|
|
+#define mutex_trylock(l) _mutex_trylock(l)
|
|
+#define mutex_unlock(l) _mutex_unlock(l)
|
|
+#define mutex_lock_io(l) _mutex_lock_io(l);
|
|
+
|
|
+#define __mutex_owner(l) ((l)->lock.owner)
|
|
+
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
|
|
+#else
|
|
+static inline void mutex_destroy(struct mutex *lock) {}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
|
|
+# define mutex_lock_interruptible_nested(l, s) \
|
|
+ _mutex_lock_interruptible_nested(l, s)
|
|
+# define mutex_lock_killable_nested(l, s) \
|
|
+ _mutex_lock_killable_nested(l, s)
|
|
+# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s)
|
|
+
|
|
+# define mutex_lock_nest_lock(lock, nest_lock) \
|
|
+do { \
|
|
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
|
|
+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
|
+} while (0)
|
|
+
|
|
+#else
|
|
+# define mutex_lock_nested(l, s) _mutex_lock(l)
|
|
+# define mutex_lock_interruptible_nested(l, s) \
|
|
+ _mutex_lock_interruptible(l)
|
|
+# define mutex_lock_killable_nested(l, s) \
|
|
+ _mutex_lock_killable(l)
|
|
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
|
+# define mutex_lock_io_nested(l, s) _mutex_lock_io(l)
|
|
+#endif
|
|
+
|
|
+# define mutex_init(mutex) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(mutex)->lock); \
|
|
+ __mutex_do_init((mutex), #mutex, &__key); \
|
|
+} while (0)
|
|
+
|
|
+# define __mutex_init(mutex, name, key) \
|
|
+do { \
|
|
+ rt_mutex_init(&(mutex)->lock); \
|
|
+ __mutex_do_init((mutex), name, key); \
|
|
+} while (0)
|
|
+
|
|
+/**
|
|
+ * These values are chosen such that FAIL and SUCCESS match the
|
|
+ * values of the regular mutex_trylock().
|
|
+ */
|
|
+enum mutex_trylock_recursive_enum {
|
|
+ MUTEX_TRYLOCK_FAILED = 0,
|
|
+ MUTEX_TRYLOCK_SUCCESS = 1,
|
|
+ MUTEX_TRYLOCK_RECURSIVE,
|
|
+};
|
|
+/**
|
|
+ * mutex_trylock_recursive - trylock variant that allows recursive locking
|
|
+ * @lock: mutex to be locked
|
|
+ *
|
|
+ * This function should not be used, _ever_. It is purely for hysterical GEM
|
|
+ * raisins, and once those are gone this will be removed.
|
|
+ *
|
|
+ * Returns:
|
|
+ * MUTEX_TRYLOCK_FAILED - trylock failed,
|
|
+ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
|
|
+ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
|
|
+ */
|
|
+int __rt_mutex_owner_current(struct rt_mutex *lock);
|
|
+
|
|
+static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
|
|
+mutex_trylock_recursive(struct mutex *lock)
|
|
+{
|
|
+ if (unlikely(__rt_mutex_owner_current(&lock->lock)))
|
|
+ return MUTEX_TRYLOCK_RECURSIVE;
|
|
+
|
|
+ return mutex_trylock(lock);
|
|
+}
|
|
+
|
|
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 5f110d6a0..8761c728c 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -435,7 +435,19 @@ typedef enum rx_handler_result rx_handler_result_t;
|
|
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
|
|
|
|
void __napi_schedule(struct napi_struct *n);
|
|
+
|
|
+/*
|
|
+ * When PREEMPT_RT_FULL is defined, all device interrupt handlers
|
|
+ * run as threads, and they can also be preempted (without PREEMPT_RT
|
|
+ * interrupt threads can not be preempted). Which means that calling
|
|
+ * __napi_schedule_irqoff() from an interrupt handler can be preempted
|
|
+ * and can corrupt the napi->poll_list.
|
|
+ */
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#define __napi_schedule_irqoff(n) __napi_schedule(n)
|
|
+#else
|
|
void __napi_schedule_irqoff(struct napi_struct *n);
|
|
+#endif
|
|
|
|
static inline bool napi_disable_pending(struct napi_struct *n)
|
|
{
|
|
@@ -600,7 +612,11 @@ struct netdev_queue {
|
|
* write-mostly part
|
|
*/
|
|
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct task_struct *xmit_lock_owner;
|
|
+#else
|
|
int xmit_lock_owner;
|
|
+#endif
|
|
/*
|
|
* Time (in jiffies) of last Tx
|
|
*/
|
|
@@ -3109,6 +3125,7 @@ struct softnet_data {
|
|
unsigned int dropped;
|
|
struct sk_buff_head input_pkt_queue;
|
|
struct napi_struct backlog;
|
|
+ struct sk_buff_head tofree_queue;
|
|
|
|
};
|
|
|
|
@@ -3127,14 +3144,38 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
|
|
#endif
|
|
}
|
|
|
|
+#define XMIT_RECURSION_LIMIT 8
|
|
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int dev_recursion_level(void)
|
|
+{
|
|
+ return current->xmit_recursion;
|
|
+}
|
|
+
|
|
+static inline bool dev_xmit_recursion(void)
|
|
+{
|
|
+ return unlikely(current->xmit_recursion >
|
|
+ XMIT_RECURSION_LIMIT);
|
|
+}
|
|
+
|
|
+static inline void dev_xmit_recursion_inc(void)
|
|
+{
|
|
+ current->xmit_recursion++;
|
|
+}
|
|
+
|
|
+static inline void dev_xmit_recursion_dec(void)
|
|
+{
|
|
+ current->xmit_recursion--;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
static inline int dev_recursion_level(void)
|
|
{
|
|
return this_cpu_read(softnet_data.xmit.recursion);
|
|
}
|
|
|
|
-#define XMIT_RECURSION_LIMIT 8
|
|
static inline bool dev_xmit_recursion(void)
|
|
{
|
|
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
|
|
@@ -3150,6 +3191,7 @@ static inline void dev_xmit_recursion_dec(void)
|
|
{
|
|
__this_cpu_dec(softnet_data.xmit.recursion);
|
|
}
|
|
+#endif
|
|
|
|
void __netif_schedule(struct Qdisc *q);
|
|
void netif_schedule_queue(struct netdev_queue *txq);
|
|
@@ -3955,13 +3997,52 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
|
|
return (1U << debug_value) - 1;
|
|
}
|
|
|
|
-static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
|
+{
|
|
+ WRITE_ONCE(txq->xmit_lock_owner, current);
|
|
+}
|
|
+
|
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ WRITE_ONCE(txq->xmit_lock_owner, NULL);
|
|
+}
|
|
+
|
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ if (READ_ONCE(txq->xmit_lock_owner) != NULL)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
|
{
|
|
- spin_lock(&txq->_xmit_lock);
|
|
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
WRITE_ONCE(txq->xmit_lock_owner, cpu);
|
|
}
|
|
|
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
|
|
+}
|
|
+
|
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ if (READ_ONCE(txq->xmit_lock_owner != -1))
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
|
+{
|
|
+ spin_lock(&txq->_xmit_lock);
|
|
+ netdev_queue_set_owner(txq, cpu);
|
|
+}
|
|
+
|
|
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
|
|
{
|
|
__acquire(&txq->_xmit_lock);
|
|
@@ -3976,8 +4057,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
|
|
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
|
{
|
|
spin_lock_bh(&txq->_xmit_lock);
|
|
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
|
}
|
|
|
|
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
|
@@ -3985,29 +4065,26 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
|
bool ok = spin_trylock(&txq->_xmit_lock);
|
|
|
|
if (likely(ok)) {
|
|
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
|
}
|
|
return ok;
|
|
}
|
|
|
|
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
|
{
|
|
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
- WRITE_ONCE(txq->xmit_lock_owner, -1);
|
|
+ netdev_queue_clear_owner(txq);
|
|
spin_unlock(&txq->_xmit_lock);
|
|
}
|
|
|
|
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
|
{
|
|
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
|
- WRITE_ONCE(txq->xmit_lock_owner, -1);
|
|
+ netdev_queue_clear_owner(txq);
|
|
spin_unlock_bh(&txq->_xmit_lock);
|
|
}
|
|
|
|
static inline void txq_trans_update(struct netdev_queue *txq)
|
|
{
|
|
- if (txq->xmit_lock_owner != -1)
|
|
+ if (netdev_queue_has_owner(txq))
|
|
txq->trans_start = jiffies;
|
|
}
|
|
|
|
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
|
|
index 0ade4d1e4..3e21ce64c 100644
|
|
--- a/include/linux/netfilter/x_tables.h
|
|
+++ b/include/linux/netfilter/x_tables.h
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/netdevice.h>
|
|
#include <linux/static_key.h>
|
|
#include <linux/netfilter.h>
|
|
+#include <linux/locallock.h>
|
|
#include <uapi/linux/netfilter/x_tables.h>
|
|
|
|
/* Test a struct->invflags and a boolean for inequality */
|
|
@@ -345,6 +346,8 @@ void xt_free_table_info(struct xt_table_info *info);
|
|
*/
|
|
DECLARE_PER_CPU(seqcount_t, xt_recseq);
|
|
|
|
+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
|
|
+
|
|
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
|
|
*
|
|
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
|
|
@@ -365,6 +368,9 @@ static inline unsigned int xt_write_recseq_begin(void)
|
|
{
|
|
unsigned int addend;
|
|
|
|
+ /* RT protection */
|
|
+ local_lock(xt_write_lock);
|
|
+
|
|
/*
|
|
* Low order bit of sequence is set if we already
|
|
* called xt_write_recseq_begin().
|
|
@@ -395,6 +401,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
|
|
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
|
|
smp_wmb();
|
|
__this_cpu_add(xt_recseq.sequence, addend);
|
|
+ local_unlock(xt_write_lock);
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
|
|
index 22e4ce639..c3de24074 100644
|
|
--- a/include/linux/nfs_fs.h
|
|
+++ b/include/linux/nfs_fs.h
|
|
@@ -164,7 +164,11 @@ struct nfs_inode {
|
|
|
|
/* Readers: in-flight sillydelete RPC calls */
|
|
/* Writers: rmdir */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct semaphore rmdir_sem;
|
|
+#else
|
|
struct rw_semaphore rmdir_sem;
|
|
+#endif
|
|
struct mutex commit_mutex;
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
|
|
index 62cf39e74..db4c05957 100644
|
|
--- a/include/linux/nfs_xdr.h
|
|
+++ b/include/linux/nfs_xdr.h
|
|
@@ -1551,7 +1551,7 @@ struct nfs_unlinkdata {
|
|
struct nfs_removeargs args;
|
|
struct nfs_removeres res;
|
|
struct dentry *dentry;
|
|
- wait_queue_head_t wq;
|
|
+ struct swait_queue_head wq;
|
|
struct rpc_cred *cred;
|
|
struct nfs_fattr dir_attr;
|
|
long timeout;
|
|
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
|
|
index 78abe15f7..9d48079be 100644
|
|
--- a/include/linux/percpu-rwsem.h
|
|
+++ b/include/linux/percpu-rwsem.h
|
|
@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
|
|
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
|
|
extern void __percpu_up_read(struct percpu_rw_semaphore *);
|
|
|
|
-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
|
|
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
|
{
|
|
might_sleep();
|
|
|
|
@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
|
|
this_cpu_inc(*sem->read_count);
|
|
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
|
|
__percpu_down_read(sem, false); /* Unconditional memory barrier */
|
|
- barrier();
|
|
/*
|
|
- * The barrier() prevents the compiler from
|
|
+ * The preempt_enable() prevents the compiler from
|
|
* bleeding the critical section out.
|
|
*/
|
|
-}
|
|
-
|
|
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
|
-{
|
|
- percpu_down_read_preempt_disable(sem);
|
|
preempt_enable();
|
|
}
|
|
|
|
@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
|
return ret;
|
|
}
|
|
|
|
-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
|
|
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
|
{
|
|
- /*
|
|
- * The barrier() prevents the compiler from
|
|
- * bleeding the critical section out.
|
|
- */
|
|
- barrier();
|
|
+ preempt_disable();
|
|
/*
|
|
* Same as in percpu_down_read().
|
|
*/
|
|
@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
|
|
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
|
|
}
|
|
|
|
-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
|
-{
|
|
- preempt_disable();
|
|
- percpu_up_read_preempt_enable(sem);
|
|
-}
|
|
-
|
|
extern void percpu_down_write(struct percpu_rw_semaphore *);
|
|
extern void percpu_up_write(struct percpu_rw_semaphore *);
|
|
|
|
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
|
|
index 70b7123f3..24421bf8c 100644
|
|
--- a/include/linux/percpu.h
|
|
+++ b/include/linux/percpu.h
|
|
@@ -19,6 +19,35 @@
|
|
#define PERCPU_MODULE_RESERVE 0
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+#define get_local_var(var) (*({ \
|
|
+ migrate_disable(); \
|
|
+ this_cpu_ptr(&var); }))
|
|
+
|
|
+#define put_local_var(var) do { \
|
|
+ (void)&(var); \
|
|
+ migrate_enable(); \
|
|
+} while (0)
|
|
+
|
|
+# define get_local_ptr(var) ({ \
|
|
+ migrate_disable(); \
|
|
+ this_cpu_ptr(var); })
|
|
+
|
|
+# define put_local_ptr(var) do { \
|
|
+ (void)(var); \
|
|
+ migrate_enable(); \
|
|
+} while (0)
|
|
+
|
|
+#else
|
|
+
|
|
+#define get_local_var(var) get_cpu_var(var)
|
|
+#define put_local_var(var) put_cpu_var(var)
|
|
+#define get_local_ptr(var) get_cpu_ptr(var)
|
|
+#define put_local_ptr(var) put_cpu_ptr(var)
|
|
+
|
|
+#endif
|
|
+
|
|
/* minimum unit size, also is the maximum supported allocation size */
|
|
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
|
|
|
|
diff --git a/include/linux/pid.h b/include/linux/pid.h
|
|
index 14a9a39da..a9026a5da 100644
|
|
--- a/include/linux/pid.h
|
|
+++ b/include/linux/pid.h
|
|
@@ -3,6 +3,7 @@
|
|
#define _LINUX_PID_H
|
|
|
|
#include <linux/rculist.h>
|
|
+#include <linux/atomic.h>
|
|
|
|
enum pid_type
|
|
{
|
|
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
|
|
index ee7e987ea..3e6c91bdf 100644
|
|
--- a/include/linux/posix-timers.h
|
|
+++ b/include/linux/posix-timers.h
|
|
@@ -15,6 +15,7 @@ struct cpu_timer_list {
|
|
u64 expires, incr;
|
|
struct task_struct *task;
|
|
int firing;
|
|
+ int firing_cpu;
|
|
};
|
|
|
|
/*
|
|
@@ -114,8 +115,8 @@ struct k_itimer {
|
|
struct {
|
|
struct alarm alarmtimer;
|
|
} alarm;
|
|
- struct rcu_head rcu;
|
|
} it;
|
|
+ struct rcu_head rcu;
|
|
};
|
|
|
|
void run_posix_cpu_timers(struct task_struct *task);
|
|
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
|
index f10333a2b..d71f2111b 100644
|
|
--- a/include/linux/preempt.h
|
|
+++ b/include/linux/preempt.h
|
|
@@ -51,7 +51,11 @@
|
|
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
|
|
#define NMI_OFFSET (1UL << NMI_SHIFT)
|
|
|
|
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define SOFTIRQ_DISABLE_OFFSET (0)
|
|
+#endif
|
|
|
|
/* We use the MSB mostly because its available */
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
@@ -81,9 +85,15 @@
|
|
#include <asm/preempt.h>
|
|
|
|
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
|
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
|
|
| NMI_MASK))
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define softirq_count() ((unsigned long)current->softirq_nestcnt)
|
|
+extern int in_serving_softirq(void);
|
|
+#endif
|
|
|
|
/*
|
|
* Are we doing bottom half or hardware interrupt processing?
|
|
@@ -101,7 +111,6 @@
|
|
#define in_irq() (hardirq_count())
|
|
#define in_softirq() (softirq_count())
|
|
#define in_interrupt() (irq_count())
|
|
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
#define in_nmi() (preempt_count() & NMI_MASK)
|
|
#define in_task() (!(preempt_count() & \
|
|
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
|
|
@@ -118,7 +127,11 @@
|
|
/*
|
|
* The preempt_count offset after spin_lock()
|
|
*/
|
|
+#if !defined(CONFIG_PREEMPT_RT_FULL)
|
|
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
|
+#else
|
|
+#define PREEMPT_LOCK_OFFSET 0
|
|
+#endif
|
|
|
|
/*
|
|
* The preempt_count offset needed for things like:
|
|
@@ -167,6 +180,20 @@ extern void preempt_count_sub(int val);
|
|
#define preempt_count_inc() preempt_count_add(1)
|
|
#define preempt_count_dec() preempt_count_sub(1)
|
|
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
|
|
+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
|
|
+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
|
|
+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
|
|
+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
|
|
+#else
|
|
+#define add_preempt_lazy_count(val) do { } while (0)
|
|
+#define sub_preempt_lazy_count(val) do { } while (0)
|
|
+#define inc_preempt_lazy_count() do { } while (0)
|
|
+#define dec_preempt_lazy_count() do { } while (0)
|
|
+#define preempt_lazy_count() (0)
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
|
#define preempt_disable() \
|
|
@@ -175,16 +202,53 @@ do { \
|
|
barrier(); \
|
|
} while (0)
|
|
|
|
+#define preempt_lazy_disable() \
|
|
+do { \
|
|
+ inc_preempt_lazy_count(); \
|
|
+ barrier(); \
|
|
+} while (0)
|
|
+
|
|
#define sched_preempt_enable_no_resched() \
|
|
do { \
|
|
barrier(); \
|
|
preempt_count_dec(); \
|
|
} while (0)
|
|
|
|
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
+# define preempt_check_resched_rt() preempt_check_resched()
|
|
+#else
|
|
+# define preempt_enable_no_resched() preempt_enable()
|
|
+# define preempt_check_resched_rt() barrier();
|
|
+#endif
|
|
|
|
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
|
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+
|
|
+extern void migrate_disable(void);
|
|
+extern void migrate_enable(void);
|
|
+
|
|
+int __migrate_disabled(struct task_struct *p);
|
|
+
|
|
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+
|
|
+extern void migrate_disable(void);
|
|
+extern void migrate_enable(void);
|
|
+static inline int __migrate_disabled(struct task_struct *p)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#else
|
|
+#define migrate_disable() preempt_disable()
|
|
+#define migrate_enable() preempt_enable()
|
|
+static inline int __migrate_disabled(struct task_struct *p)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PREEMPT
|
|
#define preempt_enable() \
|
|
do { \
|
|
@@ -206,6 +270,13 @@ do { \
|
|
__preempt_schedule(); \
|
|
} while (0)
|
|
|
|
+#define preempt_lazy_enable() \
|
|
+do { \
|
|
+ dec_preempt_lazy_count(); \
|
|
+ barrier(); \
|
|
+ preempt_check_resched(); \
|
|
+} while (0)
|
|
+
|
|
#else /* !CONFIG_PREEMPT */
|
|
#define preempt_enable() \
|
|
do { \
|
|
@@ -213,6 +284,12 @@ do { \
|
|
preempt_count_dec(); \
|
|
} while (0)
|
|
|
|
+#define preempt_lazy_enable() \
|
|
+do { \
|
|
+ dec_preempt_lazy_count(); \
|
|
+ barrier(); \
|
|
+} while (0)
|
|
+
|
|
#define preempt_enable_notrace() \
|
|
do { \
|
|
barrier(); \
|
|
@@ -251,8 +328,16 @@ do { \
|
|
#define preempt_disable_notrace() barrier()
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
#define preempt_enable_notrace() barrier()
|
|
+#define preempt_check_resched_rt() barrier()
|
|
#define preemptible() 0
|
|
|
|
+#define migrate_disable() barrier()
|
|
+#define migrate_enable() barrier()
|
|
+
|
|
+static inline int __migrate_disabled(struct task_struct *p)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
|
|
|
#ifdef MODULE
|
|
@@ -271,10 +356,22 @@ do { \
|
|
} while (0)
|
|
#define preempt_fold_need_resched() \
|
|
do { \
|
|
- if (tif_need_resched()) \
|
|
+ if (tif_need_resched_now()) \
|
|
set_preempt_need_resched(); \
|
|
} while (0)
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define preempt_disable_rt() preempt_disable()
|
|
+# define preempt_enable_rt() preempt_enable()
|
|
+# define preempt_disable_nort() barrier()
|
|
+# define preempt_enable_nort() barrier()
|
|
+#else
|
|
+# define preempt_disable_rt() barrier()
|
|
+# define preempt_enable_rt() barrier()
|
|
+# define preempt_disable_nort() preempt_disable()
|
|
+# define preempt_enable_nort() preempt_enable()
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
struct preempt_notifier;
|
|
diff --git a/include/linux/printk.h b/include/linux/printk.h
|
|
index c2e583c0b..8ba1d7e2e 100644
|
|
--- a/include/linux/printk.h
|
|
+++ b/include/linux/printk.h
|
|
@@ -140,9 +140,11 @@ struct va_format {
|
|
#ifdef CONFIG_EARLY_PRINTK
|
|
extern asmlinkage __printf(1, 2)
|
|
void early_printk(const char *fmt, ...);
|
|
+extern void printk_kill(void);
|
|
#else
|
|
static inline __printf(1, 2) __cold
|
|
void early_printk(const char *s, ...) { }
|
|
+static inline void printk_kill(void) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_PRINTK_NMI
|
|
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
|
|
index 34149e8b5..affb0fc4c 100644
|
|
--- a/include/linux/radix-tree.h
|
|
+++ b/include/linux/radix-tree.h
|
|
@@ -330,6 +330,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
|
|
int radix_tree_preload(gfp_t gfp_mask);
|
|
int radix_tree_maybe_preload(gfp_t gfp_mask);
|
|
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
|
|
+void radix_tree_preload_end(void);
|
|
+
|
|
void radix_tree_init(void);
|
|
void *radix_tree_tag_set(struct radix_tree_root *,
|
|
unsigned long index, unsigned int tag);
|
|
@@ -349,11 +351,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
|
|
unsigned int max_items, unsigned int tag);
|
|
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
|
|
|
|
-static inline void radix_tree_preload_end(void)
|
|
-{
|
|
- preempt_enable();
|
|
-}
|
|
-
|
|
int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
|
|
int radix_tree_split(struct radix_tree_root *, unsigned long index,
|
|
unsigned new_order);
|
|
diff --git a/include/linux/random.h b/include/linux/random.h
|
|
index d05e70d56..7b74d6935 100644
|
|
--- a/include/linux/random.h
|
|
+++ b/include/linux/random.h
|
|
@@ -32,7 +32,7 @@ static inline void add_latent_entropy(void)
|
|
|
|
extern void add_input_randomness(unsigned int type, unsigned int code,
|
|
unsigned int value) __latent_entropy;
|
|
-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
|
|
+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
|
|
|
|
extern void get_random_bytes(void *buf, int nbytes);
|
|
extern int wait_for_random_bytes(void);
|
|
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
|
|
index fcbeed405..2aa2aec35 100644
|
|
--- a/include/linux/rbtree.h
|
|
+++ b/include/linux/rbtree.h
|
|
@@ -31,7 +31,7 @@
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/stddef.h>
|
|
-#include <linux/rcupdate.h>
|
|
+#include <linux/rcu_assign_pointer.h>
|
|
|
|
struct rb_node {
|
|
unsigned long __rb_parent_color;
|
|
diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
|
|
new file mode 100644
|
|
index 000000000..7066962a4
|
|
--- /dev/null
|
|
+++ b/include/linux/rcu_assign_pointer.h
|
|
@@ -0,0 +1,54 @@
|
|
+#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
|
|
+#define __LINUX_RCU_ASSIGN_POINTER_H__
|
|
+#include <linux/compiler.h>
|
|
+#include <asm/barrier.h>
|
|
+
|
|
+/**
|
|
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
|
|
+ * @v: The value to statically initialize with.
|
|
+ */
|
|
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
|
|
+
|
|
+/**
|
|
+ * rcu_assign_pointer() - assign to RCU-protected pointer
|
|
+ * @p: pointer to assign to
|
|
+ * @v: value to assign (publish)
|
|
+ *
|
|
+ * Assigns the specified value to the specified RCU-protected
|
|
+ * pointer, ensuring that any concurrent RCU readers will see
|
|
+ * any prior initialization.
|
|
+ *
|
|
+ * Inserts memory barriers on architectures that require them
|
|
+ * (which is most of them), and also prevents the compiler from
|
|
+ * reordering the code that initializes the structure after the pointer
|
|
+ * assignment. More importantly, this call documents which pointers
|
|
+ * will be dereferenced by RCU read-side code.
|
|
+ *
|
|
+ * In some special cases, you may use RCU_INIT_POINTER() instead
|
|
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
|
|
+ * to the fact that it does not constrain either the CPU or the compiler.
|
|
+ * That said, using RCU_INIT_POINTER() when you should have used
|
|
+ * rcu_assign_pointer() is a very bad thing that results in
|
|
+ * impossible-to-diagnose memory corruption. So please be careful.
|
|
+ * See the RCU_INIT_POINTER() comment header for details.
|
|
+ *
|
|
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
|
|
+ * once, appearances notwithstanding. One of the "extra" evaluations
|
|
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
|
|
+ * neither of which actually execute the argument. As with most cpp
|
|
+ * macros, this execute-arguments-only-once property is important, so
|
|
+ * please be careful when making changes to rcu_assign_pointer() and the
|
|
+ * other macros that it invokes.
|
|
+ */
|
|
+#define rcu_assign_pointer(p, v) \
|
|
+({ \
|
|
+ uintptr_t _r_a_p__v = (uintptr_t)(v); \
|
|
+ \
|
|
+ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
|
|
+ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
|
|
+ else \
|
|
+ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
|
|
+ _r_a_p__v; \
|
|
+})
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
|
|
index 7ecef0b5c..e4b4f47e9 100644
|
|
--- a/include/linux/rcupdate.h
|
|
+++ b/include/linux/rcupdate.h
|
|
@@ -42,6 +42,7 @@
|
|
#include <linux/lockdep.h>
|
|
#include <asm/processor.h>
|
|
#include <linux/cpumask.h>
|
|
+#include <linux/rcu_assign_pointer.h>
|
|
|
|
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
|
|
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
|
|
@@ -55,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
|
#define call_rcu call_rcu_sched
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#define call_rcu_bh call_rcu
|
|
+#else
|
|
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
|
|
+#endif
|
|
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
|
|
void synchronize_sched(void);
|
|
void rcu_barrier_tasks(void);
|
|
@@ -73,6 +78,11 @@ void synchronize_rcu(void);
|
|
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
|
|
*/
|
|
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
|
|
+#else
|
|
+static inline int sched_rcu_preempt_depth(void) { return 0; }
|
|
+#endif
|
|
|
|
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
@@ -96,6 +106,8 @@ static inline int rcu_preempt_depth(void)
|
|
return 0;
|
|
}
|
|
|
|
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
|
|
+
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
/* Internal to kernel */
|
|
@@ -253,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map;
|
|
extern struct lockdep_map rcu_callback_map;
|
|
int debug_lockdep_rcu_enabled(void);
|
|
int rcu_read_lock_held(void);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int rcu_read_lock_bh_held(void)
|
|
+{
|
|
+ return rcu_read_lock_held();
|
|
+}
|
|
+#else
|
|
int rcu_read_lock_bh_held(void);
|
|
+#endif
|
|
int rcu_read_lock_sched_held(void);
|
|
|
|
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
@@ -362,71 +381,7 @@ static inline void rcu_preempt_sleep_check(void) { }
|
|
((typeof(*p) __force __kernel *)(________p1)); \
|
|
})
|
|
|
|
-/**
|
|
- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
|
|
- * @v: The value to statically initialize with.
|
|
- */
|
|
-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
|
|
|
|
-/**
|
|
- * rcu_assign_pointer() - assign to RCU-protected pointer
|
|
- * @p: pointer to assign to
|
|
- * @v: value to assign (publish)
|
|
- *
|
|
- * Assigns the specified value to the specified RCU-protected
|
|
- * pointer, ensuring that any concurrent RCU readers will see
|
|
- * any prior initialization.
|
|
- *
|
|
- * Inserts memory barriers on architectures that require them
|
|
- * (which is most of them), and also prevents the compiler from
|
|
- * reordering the code that initializes the structure after the pointer
|
|
- * assignment. More importantly, this call documents which pointers
|
|
- * will be dereferenced by RCU read-side code.
|
|
- *
|
|
- * In some special cases, you may use RCU_INIT_POINTER() instead
|
|
- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
|
|
- * to the fact that it does not constrain either the CPU or the compiler.
|
|
- * That said, using RCU_INIT_POINTER() when you should have used
|
|
- * rcu_assign_pointer() is a very bad thing that results in
|
|
- * impossible-to-diagnose memory corruption. So please be careful.
|
|
- * See the RCU_INIT_POINTER() comment header for details.
|
|
- *
|
|
- * Note that rcu_assign_pointer() evaluates each of its arguments only
|
|
- * once, appearances notwithstanding. One of the "extra" evaluations
|
|
- * is in typeof() and the other visible only to sparse (__CHECKER__),
|
|
- * neither of which actually execute the argument. As with most cpp
|
|
- * macros, this execute-arguments-only-once property is important, so
|
|
- * please be careful when making changes to rcu_assign_pointer() and the
|
|
- * other macros that it invokes.
|
|
- */
|
|
-#define rcu_assign_pointer(p, v) \
|
|
-({ \
|
|
- uintptr_t _r_a_p__v = (uintptr_t)(v); \
|
|
- \
|
|
- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
|
|
- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
|
|
- else \
|
|
- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
|
|
- _r_a_p__v; \
|
|
-})
|
|
-
|
|
-/**
|
|
- * rcu_replace_pointer() - replace an RCU pointer, returning its old value
|
|
- * @rcu_ptr: RCU pointer, whose old value is returned
|
|
- * @ptr: regular pointer
|
|
- * @c: the lockdep conditions under which the dereference will take place
|
|
- *
|
|
- * Perform a replacement, where @rcu_ptr is an RCU-annotated
|
|
- * pointer and @c is the lockdep argument that is passed to the
|
|
- * rcu_dereference_protected() call used to read that pointer. The old
|
|
- * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
|
|
- */
|
|
-#define rcu_replace_pointer(rcu_ptr, ptr, c) \
|
|
-({ \
|
|
- typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
|
|
- rcu_assign_pointer((rcu_ptr), (ptr)); \
|
|
- __tmp; \
|
|
-})
|
|
|
|
/**
|
|
* rcu_swap_protected() - swap an RCU and a regular pointer
|
|
@@ -719,10 +674,14 @@ static inline void rcu_read_unlock(void)
|
|
static inline void rcu_read_lock_bh(void)
|
|
{
|
|
local_bh_disable();
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ rcu_read_lock();
|
|
+#else
|
|
__acquire(RCU_BH);
|
|
rcu_lock_acquire(&rcu_bh_lock_map);
|
|
RCU_LOCKDEP_WARN(!rcu_is_watching(),
|
|
"rcu_read_lock_bh() used illegally while idle");
|
|
+#endif
|
|
}
|
|
|
|
/*
|
|
@@ -732,10 +691,14 @@ static inline void rcu_read_lock_bh(void)
|
|
*/
|
|
static inline void rcu_read_unlock_bh(void)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ rcu_read_unlock();
|
|
+#else
|
|
RCU_LOCKDEP_WARN(!rcu_is_watching(),
|
|
"rcu_read_unlock_bh() used illegally while idle");
|
|
rcu_lock_release(&rcu_bh_lock_map);
|
|
__release(RCU_BH);
|
|
+#endif
|
|
local_bh_enable();
|
|
}
|
|
|
|
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
|
|
index 914655848..462ce061b 100644
|
|
--- a/include/linux/rcutree.h
|
|
+++ b/include/linux/rcutree.h
|
|
@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
|
|
rcu_note_context_switch(false);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define synchronize_rcu_bh synchronize_rcu
|
|
+#else
|
|
void synchronize_rcu_bh(void);
|
|
+#endif
|
|
void synchronize_sched_expedited(void);
|
|
void synchronize_rcu_expedited(void);
|
|
|
|
@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
|
|
}
|
|
|
|
void rcu_barrier(void);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define rcu_barrier_bh rcu_barrier
|
|
+#else
|
|
void rcu_barrier_bh(void);
|
|
+#endif
|
|
void rcu_barrier_sched(void);
|
|
bool rcu_eqs_special_set(int cpu);
|
|
unsigned long get_state_synchronize_rcu(void);
|
|
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
|
|
index 02166e815..0b31df1af 100644
|
|
--- a/include/linux/reservation.h
|
|
+++ b/include/linux/reservation.h
|
|
@@ -72,7 +72,7 @@ struct reservation_object_list {
|
|
*/
|
|
struct reservation_object {
|
|
struct ww_mutex lock;
|
|
- seqcount_t seq;
|
|
+ seqlock_t seq;
|
|
|
|
struct dma_fence __rcu *fence_excl;
|
|
struct reservation_object_list __rcu *fence;
|
|
@@ -92,7 +92,7 @@ reservation_object_init(struct reservation_object *obj)
|
|
{
|
|
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
|
|
|
- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
|
|
+ seqlock_init(&obj->seq);
|
|
RCU_INIT_POINTER(obj->fence, NULL);
|
|
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
|
obj->staged = NULL;
|
|
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
|
|
index 6fd615a0e..138bd1e18 100644
|
|
--- a/include/linux/rtmutex.h
|
|
+++ b/include/linux/rtmutex.h
|
|
@@ -14,11 +14,15 @@
|
|
#define __LINUX_RT_MUTEX_H
|
|
|
|
#include <linux/linkage.h>
|
|
+#include <linux/spinlock_types_raw.h>
|
|
#include <linux/rbtree.h>
|
|
-#include <linux/spinlock_types.h>
|
|
|
|
extern int max_lock_depth; /* for sysctl */
|
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
+#include <linux/debug_locks.h>
|
|
+#endif
|
|
+
|
|
/**
|
|
* The rt_mutex structure
|
|
*
|
|
@@ -31,8 +35,8 @@ struct rt_mutex {
|
|
raw_spinlock_t wait_lock;
|
|
struct rb_root_cached waiters;
|
|
struct task_struct *owner;
|
|
-#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
int save_state;
|
|
+#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
const char *name, *file;
|
|
int line;
|
|
void *magic;
|
|
@@ -82,16 +86,23 @@ do { \
|
|
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
|
|
#endif
|
|
|
|
-#define __RT_MUTEX_INITIALIZER(mutexname) \
|
|
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
|
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
|
, .waiters = RB_ROOT_CACHED \
|
|
, .owner = NULL \
|
|
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
|
|
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
|
|
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
|
|
+
|
|
+#define __RT_MUTEX_INITIALIZER(mutexname) \
|
|
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
|
|
|
|
#define DEFINE_RT_MUTEX(mutexname) \
|
|
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
|
|
|
|
+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
|
|
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
|
|
+ , .save_state = 1 }
|
|
+
|
|
/**
|
|
* rt_mutex_is_locked - is the mutex locked
|
|
* @lock: the mutex to be queried
|
|
@@ -115,6 +126,7 @@ extern void rt_mutex_lock(struct rt_mutex *lock);
|
|
#endif
|
|
|
|
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
|
|
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
|
|
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
|
|
struct hrtimer_sleeper *timeout);
|
|
|
|
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
|
|
new file mode 100644
|
|
index 000000000..a9c4c2ac4
|
|
--- /dev/null
|
|
+++ b/include/linux/rwlock_rt.h
|
|
@@ -0,0 +1,119 @@
|
|
+#ifndef __LINUX_RWLOCK_RT_H
|
|
+#define __LINUX_RWLOCK_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_H
|
|
+#error Do not include directly. Use spinlock.h
|
|
+#endif
|
|
+
|
|
+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_read_can_lock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_write_can_lock(rwlock_t *rwlock);
|
|
+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
|
|
+
|
|
+#define read_can_lock(rwlock) rt_read_can_lock(rwlock)
|
|
+#define write_can_lock(rwlock) rt_write_can_lock(rwlock)
|
|
+
|
|
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
|
|
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
|
|
+
|
|
+static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags)
|
|
+{
|
|
+ /* XXX ARCH_IRQ_ENABLED */
|
|
+ *flags = 0;
|
|
+ return rt_write_trylock(lock);
|
|
+}
|
|
+
|
|
+#define write_trylock_irqsave(lock, flags) \
|
|
+ __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags)))
|
|
+
|
|
+#define read_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ rt_read_lock(lock); \
|
|
+ flags = 0; \
|
|
+ } while (0)
|
|
+
|
|
+#define write_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ rt_write_lock(lock); \
|
|
+ flags = 0; \
|
|
+ } while (0)
|
|
+
|
|
+#define read_lock(lock) rt_read_lock(lock)
|
|
+
|
|
+#define read_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ rt_read_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_lock_irq(lock) read_lock(lock)
|
|
+
|
|
+#define write_lock(lock) rt_write_lock(lock)
|
|
+
|
|
+#define write_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ rt_write_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_lock_irq(lock) write_lock(lock)
|
|
+
|
|
+#define read_unlock(lock) rt_read_unlock(lock)
|
|
+
|
|
+#define read_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_read_unlock(lock); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_unlock_irq(lock) read_unlock(lock)
|
|
+
|
|
+#define write_unlock(lock) rt_write_unlock(lock)
|
|
+
|
|
+#define write_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_write_unlock(lock); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_unlock_irq(lock) write_unlock(lock)
|
|
+
|
|
+#define read_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ rt_read_unlock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ rt_write_unlock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define rwlock_init(rwl) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ __rt_rwlock_init(rwl, #rwl, &__key); \
|
|
+} while (0)
|
|
+
|
|
+/*
|
|
+ * Internal functions made global for CPU pinning
|
|
+ */
|
|
+void __read_rt_lock(struct rt_rw_lock *lock);
|
|
+int __read_rt_trylock(struct rt_rw_lock *lock);
|
|
+void __write_rt_lock(struct rt_rw_lock *lock);
|
|
+int __write_rt_trylock(struct rt_rw_lock *lock);
|
|
+void __read_rt_unlock(struct rt_rw_lock *lock);
|
|
+void __write_rt_unlock(struct rt_rw_lock *lock);
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
|
|
index 857a72ceb..c21683f3e 100644
|
|
--- a/include/linux/rwlock_types.h
|
|
+++ b/include/linux/rwlock_types.h
|
|
@@ -1,6 +1,10 @@
|
|
#ifndef __LINUX_RWLOCK_TYPES_H
|
|
#define __LINUX_RWLOCK_TYPES_H
|
|
|
|
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
|
|
+# error "Do not include directly, include spinlock_types.h"
|
|
+#endif
|
|
+
|
|
/*
|
|
* include/linux/rwlock_types.h - generic rwlock type definitions
|
|
* and initializers
|
|
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
|
|
new file mode 100644
|
|
index 000000000..546a1f8f1
|
|
--- /dev/null
|
|
+++ b/include/linux/rwlock_types_rt.h
|
|
@@ -0,0 +1,55 @@
|
|
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
|
|
+#define __LINUX_RWLOCK_TYPES_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define RW_DEP_MAP_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+typedef struct rt_rw_lock rwlock_t;
|
|
+
|
|
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
|
|
+
|
|
+#define DEFINE_RWLOCK(name) \
|
|
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
|
|
+
|
|
+/*
|
|
+ * A reader biased implementation primarily for CPU pinning.
|
|
+ *
|
|
+ * Can be selected as general replacement for the single reader RT rwlock
|
|
+ * variant
|
|
+ */
|
|
+struct rt_rw_lock {
|
|
+ struct rt_mutex rtmutex;
|
|
+ atomic_t readers;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#define READER_BIAS (1U << 31)
|
|
+#define WRITER_BIAS (1U << 30)
|
|
+
|
|
+#define __RWLOCK_RT_INITIALIZER(name) \
|
|
+{ \
|
|
+ .readers = ATOMIC_INIT(READER_BIAS), \
|
|
+ .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \
|
|
+ RW_DEP_MAP_INIT(name) \
|
|
+}
|
|
+
|
|
+void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
|
|
+ struct lock_class_key *key);
|
|
+
|
|
+#define rwlock_biased_rt_init(rwlock) \
|
|
+ do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \
|
|
+ } while (0)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
|
|
index ab93b6eae..b1e32373f 100644
|
|
--- a/include/linux/rwsem.h
|
|
+++ b/include/linux/rwsem.h
|
|
@@ -20,6 +20,10 @@
|
|
#include <linux/osq_lock.h>
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#include <linux/rwsem_rt.h>
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+
|
|
struct rw_semaphore;
|
|
|
|
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
|
@@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
|
|
return !list_empty(&sem->wait_list);
|
|
}
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
+/*
|
|
+ * The functions below are the same for all rwsem implementations including
|
|
+ * the RT specific variant.
|
|
+ */
|
|
+
|
|
/*
|
|
* lock for reading
|
|
*/
|
|
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
|
|
new file mode 100644
|
|
index 000000000..3fb092b7b
|
|
--- /dev/null
|
|
+++ b/include/linux/rwsem_rt.h
|
|
@@ -0,0 +1,69 @@
|
|
+#ifndef _LINUX_RWSEM_RT_H
|
|
+#define _LINUX_RWSEM_RT_H
|
|
+
|
|
+#ifndef _LINUX_RWSEM_H
|
|
+#error "Include rwsem.h"
|
|
+#endif
|
|
+
|
|
+#include <linux/rtmutex.h>
|
|
+#include <linux/swait.h>
|
|
+
|
|
+#define READER_BIAS (1U << 31)
|
|
+#define WRITER_BIAS (1U << 30)
|
|
+
|
|
+struct rw_semaphore {
|
|
+ atomic_t readers;
|
|
+ struct rt_mutex rtmutex;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#define __RWSEM_INITIALIZER(name) \
|
|
+{ \
|
|
+ .readers = ATOMIC_INIT(READER_BIAS), \
|
|
+ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \
|
|
+ RW_DEP_MAP_INIT(name) \
|
|
+}
|
|
+
|
|
+#define DECLARE_RWSEM(lockname) \
|
|
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
|
|
+
|
|
+extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
|
|
+ struct lock_class_key *key);
|
|
+
|
|
+#define __init_rwsem(sem, name, key) \
|
|
+do { \
|
|
+ rt_mutex_init(&(sem)->rtmutex); \
|
|
+ __rwsem_init((sem), (name), (key)); \
|
|
+} while (0)
|
|
+
|
|
+#define init_rwsem(sem) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ __init_rwsem((sem), #sem, &__key); \
|
|
+} while (0)
|
|
+
|
|
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
+{
|
|
+ return atomic_read(&sem->readers) != READER_BIAS;
|
|
+}
|
|
+
|
|
+static inline int rwsem_is_contended(struct rw_semaphore *sem)
|
|
+{
|
|
+ return atomic_read(&sem->readers) > 0;
|
|
+}
|
|
+
|
|
+extern void __down_read(struct rw_semaphore *sem);
|
|
+extern int __down_read_interruptible(struct rw_semaphore *sem);
|
|
+extern int __down_read_killable(struct rw_semaphore *sem);
|
|
+extern int __down_read_trylock(struct rw_semaphore *sem);
|
|
+extern void __down_write(struct rw_semaphore *sem);
|
|
+extern int __must_check __down_write_killable(struct rw_semaphore *sem);
|
|
+extern int __down_write_trylock(struct rw_semaphore *sem);
|
|
+extern void __up_read(struct rw_semaphore *sem);
|
|
+extern void __up_write(struct rw_semaphore *sem);
|
|
+extern void __downgrade_write(struct rw_semaphore *sem);
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 8fd8c5b7c..481b58e8d 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -29,6 +29,7 @@
|
|
#include <linux/task_io_accounting.h>
|
|
#include <linux/rseq.h>
|
|
#include <linux/thread_bits.h>
|
|
+#include <asm/kmap_types.h>
|
|
|
|
/* task_struct member predeclarations (sorted alphabetically): */
|
|
struct audit_context;
|
|
@@ -102,12 +103,8 @@ struct task_group;
|
|
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
|
|
TASK_PARKED)
|
|
|
|
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
|
-
|
|
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
|
|
|
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
|
-
|
|
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
|
(task->flags & PF_FROZEN) == 0 && \
|
|
(task->state & TASK_NOLOAD) == 0)
|
|
@@ -135,6 +132,9 @@ struct task_group;
|
|
smp_store_mb(current->state, (state_value)); \
|
|
} while (0)
|
|
|
|
+#define __set_current_state_no_track(state_value) \
|
|
+ current->state = (state_value);
|
|
+
|
|
#define set_special_state(state_value) \
|
|
do { \
|
|
unsigned long flags; /* may shadow */ \
|
|
@@ -144,6 +144,7 @@ struct task_group;
|
|
current->state = (state_value); \
|
|
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
|
} while (0)
|
|
+
|
|
#else
|
|
/*
|
|
* set_current_state() includes a barrier so that the write of current->state
|
|
@@ -188,6 +189,9 @@ struct task_group;
|
|
#define set_current_state(state_value) \
|
|
smp_store_mb(current->state, (state_value))
|
|
|
|
+#define __set_current_state_no_track(state_value) \
|
|
+ __set_current_state(state_value)
|
|
+
|
|
/*
|
|
* set_special_state() should be used for those states when the blocking task
|
|
* can not use the regular condition based wait-loop. In that case we must
|
|
@@ -224,6 +228,8 @@ extern void io_schedule_finish(int token);
|
|
extern long io_schedule_timeout(long timeout);
|
|
extern void io_schedule(void);
|
|
|
|
+int cpu_nr_pinned(int cpu);
|
|
+
|
|
/**
|
|
* struct prev_cputime - snapshot of system and user cputime
|
|
* @utime: time spent in user mode
|
|
@@ -630,6 +636,8 @@ struct task_struct {
|
|
#endif
|
|
/* -1 unrunnable, 0 runnable, >0 stopped: */
|
|
volatile long state;
|
|
+ /* saved state for "spinlock sleepers" */
|
|
+ volatile long saved_state;
|
|
|
|
/*
|
|
* This begins the randomizable portion of task_struct. Only
|
|
@@ -690,7 +698,22 @@ struct task_struct {
|
|
|
|
unsigned int policy;
|
|
int nr_cpus_allowed;
|
|
- cpumask_t cpus_allowed;
|
|
+ const cpumask_t *cpus_ptr;
|
|
+ cpumask_t cpus_mask;
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+ int migrate_disable;
|
|
+ bool migrate_disable_scheduled;
|
|
+# ifdef CONFIG_SCHED_DEBUG
|
|
+ int pinned_on_cpu;
|
|
+# endif
|
|
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+# ifdef CONFIG_SCHED_DEBUG
|
|
+ int migrate_disable;
|
|
+# endif
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int sleeping_lock;
|
|
+#endif
|
|
|
|
#ifdef CONFIG_PREEMPT_RCU
|
|
int rcu_read_lock_nesting;
|
|
@@ -854,6 +877,9 @@ struct task_struct {
|
|
#ifdef CONFIG_POSIX_TIMERS
|
|
struct task_cputime cputime_expires;
|
|
struct list_head cpu_timers[3];
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct task_struct *posix_timer_list;
|
|
+#endif
|
|
#endif
|
|
|
|
/* Process credentials: */
|
|
@@ -898,11 +924,17 @@ struct task_struct {
|
|
/* Signal handlers: */
|
|
struct signal_struct *signal;
|
|
struct sighand_struct *sighand;
|
|
+ struct sigqueue *sigqueue_cache;
|
|
+
|
|
sigset_t blocked;
|
|
sigset_t real_blocked;
|
|
/* Restored if set_restore_sigmask() was used: */
|
|
sigset_t saved_sigmask;
|
|
struct sigpending pending;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /* TODO: move me into ->restart_block ? */
|
|
+ struct siginfo forced_info;
|
|
+#endif
|
|
unsigned long sas_ss_sp;
|
|
size_t sas_ss_size;
|
|
unsigned int sas_ss_flags;
|
|
@@ -927,6 +959,7 @@ struct task_struct {
|
|
raw_spinlock_t pi_lock;
|
|
|
|
struct wake_q_node wake_q;
|
|
+ struct wake_q_node wake_q_sleeper;
|
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
/* PI waiters blocked on a rt_mutex held by this task: */
|
|
@@ -1216,8 +1249,22 @@ struct task_struct {
|
|
unsigned int sequential_io;
|
|
unsigned int sequential_io_avg;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct rcu_head put_rcu;
|
|
+ int softirq_nestcnt;
|
|
+ unsigned int softirqs_raised;
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
|
|
+ int kmap_idx;
|
|
+ pte_t kmap_pte[KM_TYPE_NR];
|
|
+# endif
|
|
+#endif
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
unsigned long task_state_change;
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int xmit_recursion;
|
|
#endif
|
|
int pagefault_disabled;
|
|
#ifdef CONFIG_MMU
|
|
@@ -1456,6 +1503,7 @@ extern struct pid *cad_pid;
|
|
/*
|
|
* Per process flags
|
|
*/
|
|
+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
|
|
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
|
|
#define PF_EXITING 0x00000004 /* Getting shut down */
|
|
#define PF_RELIABLE 0x00000008 /* Allocate from reliable memory */
|
|
@@ -1480,7 +1528,7 @@ extern struct pid *cad_pid;
|
|
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
|
|
#define PF_UCE_KERNEL_COREDUMP 0x01000000 /* Task in coredump process which is used in uce kernel recovery */
|
|
#define PF_UCE_KERNEL_RECOVERY 0x02000000 /* Task in uce kernel recovery state */
|
|
-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
|
|
+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
|
|
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
|
|
#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
|
|
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
|
@@ -1685,6 +1733,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
|
|
|
|
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
|
|
extern int wake_up_process(struct task_struct *tsk);
|
|
+extern int wake_up_lock_sleeper(struct task_struct *tsk);
|
|
extern void wake_up_new_task(struct task_struct *tsk);
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -1767,6 +1816,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
|
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
|
|
+{
|
|
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
|
|
+}
|
|
+
|
|
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
|
|
+{
|
|
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
|
|
+}
|
|
+
|
|
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
|
|
+{
|
|
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
|
|
+}
|
|
+
|
|
+static inline int need_resched_lazy(void)
|
|
+{
|
|
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
|
+}
|
|
+
|
|
+static inline int need_resched_now(void)
|
|
+{
|
|
+ return test_thread_flag(TIF_NEED_RESCHED);
|
|
+}
|
|
+
|
|
+#else
|
|
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
|
|
+static inline int need_resched_lazy(void) { return 0; }
|
|
+
|
|
+static inline int need_resched_now(void)
|
|
+{
|
|
+ return test_thread_flag(TIF_NEED_RESCHED);
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
|
|
+{
|
|
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
|
|
+ return true;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
|
|
+ return true;
|
|
+#endif
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
|
|
+{
|
|
+ bool traced_stopped;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+ traced_stopped = __task_is_stopped_or_traced(task);
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+#else
|
|
+ traced_stopped = __task_is_stopped_or_traced(task);
|
|
+#endif
|
|
+ return traced_stopped;
|
|
+}
|
|
+
|
|
+static inline bool task_is_traced(struct task_struct *task)
|
|
+{
|
|
+ bool traced = false;
|
|
+
|
|
+ if (task->state & __TASK_TRACED)
|
|
+ return true;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /* in case the task is sleeping on tasklist_lock */
|
|
+ raw_spin_lock_irq(&task->pi_lock);
|
|
+ if (task->state & __TASK_TRACED)
|
|
+ traced = true;
|
|
+ else if (task->saved_state & __TASK_TRACED)
|
|
+ traced = true;
|
|
+ raw_spin_unlock_irq(&task->pi_lock);
|
|
+#endif
|
|
+ return traced;
|
|
+}
|
|
+
|
|
/*
|
|
* cond_resched() and cond_resched_lock(): latency reduction via
|
|
* explicit rescheduling in places that are safe. The return
|
|
@@ -1819,6 +1951,23 @@ static __always_inline bool need_resched(void)
|
|
return unlikely(tif_need_resched());
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline void sleeping_lock_inc(void)
|
|
+{
|
|
+ current->sleeping_lock++;
|
|
+}
|
|
+
|
|
+static inline void sleeping_lock_dec(void)
|
|
+{
|
|
+ current->sleeping_lock--;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline void sleeping_lock_inc(void) { }
|
|
+static inline void sleeping_lock_dec(void) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Wrappers for p->thread_info->cpu access. No-op on UP.
|
|
*/
|
|
@@ -1990,6 +2139,8 @@ static inline void rseq_syscall(struct pt_regs *regs)
|
|
|
|
#endif
|
|
|
|
+extern struct task_struct *takedown_cpu_task;
|
|
+
|
|
#ifdef CONFIG_QOS_SCHED
|
|
void sched_move_offline_task(struct task_struct *p);
|
|
void sched_qos_offline_wait(void);
|
|
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
|
|
index bd762e7a2..c671d19ff 100644
|
|
--- a/include/linux/sched/mm.h
|
|
+++ b/include/linux/sched/mm.h
|
|
@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm)
|
|
__mmdrop(mm);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+extern void __mmdrop_delayed(struct rcu_head *rhp);
|
|
+static inline void mmdrop_delayed(struct mm_struct *mm)
|
|
+{
|
|
+ if (atomic_dec_and_test(&mm->mm_count))
|
|
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
|
|
+}
|
|
+#else
|
|
+# define mmdrop_delayed(mm) mmdrop(mm)
|
|
+#endif
|
|
+
|
|
void mmdrop(struct mm_struct *mm);
|
|
|
|
/*
|
|
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
|
|
index 8b02ee423..756ba93f2 100644
|
|
--- a/include/linux/sched/task.h
|
|
+++ b/include/linux/sched/task.h
|
|
@@ -90,6 +90,21 @@ extern void sched_exec(void);
|
|
|
|
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+extern void __put_task_struct_cb(struct rcu_head *rhp);
|
|
+
|
|
+static inline void put_task_struct(struct task_struct *t)
|
|
+{
|
|
+ if (atomic_dec_and_test(&t->usage))
|
|
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
|
|
+}
|
|
+static inline void put_task_struct_many(struct task_struct *t, int nr)
|
|
+{
|
|
+ if (atomic_sub_and_test(nr, &t->usage))
|
|
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
|
|
+}
|
|
+
|
|
+#else
|
|
extern void __put_task_struct(struct task_struct *t);
|
|
|
|
static inline void put_task_struct(struct task_struct *t)
|
|
@@ -103,6 +118,7 @@ static inline void put_task_struct_many(struct task_struct *t, int nr)
|
|
if (atomic_sub_and_test(nr, &t->usage))
|
|
__put_task_struct(t);
|
|
}
|
|
+#endif
|
|
|
|
void put_task_struct_rcu_user(struct task_struct *task);
|
|
|
|
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
|
|
index 545f37138..e33edee35 100644
|
|
--- a/include/linux/sched/wake_q.h
|
|
+++ b/include/linux/sched/wake_q.h
|
|
@@ -51,8 +51,29 @@ static inline void wake_q_init(struct wake_q_head *head)
|
|
head->lastp = &head->first;
|
|
}
|
|
|
|
-extern void wake_q_add(struct wake_q_head *head,
|
|
- struct task_struct *task);
|
|
-extern void wake_up_q(struct wake_q_head *head);
|
|
+extern void __wake_q_add(struct wake_q_head *head,
|
|
+ struct task_struct *task, bool sleeper);
|
|
+static inline void wake_q_add(struct wake_q_head *head,
|
|
+ struct task_struct *task)
|
|
+{
|
|
+ __wake_q_add(head, task, false);
|
|
+}
|
|
+
|
|
+static inline void wake_q_add_sleeper(struct wake_q_head *head,
|
|
+ struct task_struct *task)
|
|
+{
|
|
+ __wake_q_add(head, task, true);
|
|
+}
|
|
+
|
|
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
|
|
+static inline void wake_up_q(struct wake_q_head *head)
|
|
+{
|
|
+ __wake_up_q(head, false);
|
|
+}
|
|
+
|
|
+static inline void wake_up_q_sleeper(struct wake_q_head *head)
|
|
+{
|
|
+ __wake_up_q(head, true);
|
|
+}
|
|
|
|
#endif /* _LINUX_SCHED_WAKE_Q_H */
|
|
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
|
|
index bcf4cf26b..58f9909d6 100644
|
|
--- a/include/linux/seqlock.h
|
|
+++ b/include/linux/seqlock.h
|
|
@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|
return __read_seqcount_retry(s, start);
|
|
}
|
|
|
|
-
|
|
-
|
|
-static inline void raw_write_seqcount_begin(seqcount_t *s)
|
|
+static inline void __raw_write_seqcount_begin(seqcount_t *s)
|
|
{
|
|
s->sequence++;
|
|
smp_wmb();
|
|
}
|
|
|
|
-static inline void raw_write_seqcount_end(seqcount_t *s)
|
|
+static inline void raw_write_seqcount_begin(seqcount_t *s)
|
|
+{
|
|
+ preempt_disable_rt();
|
|
+ __raw_write_seqcount_begin(s);
|
|
+}
|
|
+
|
|
+static inline void __raw_write_seqcount_end(seqcount_t *s)
|
|
{
|
|
smp_wmb();
|
|
s->sequence++;
|
|
}
|
|
|
|
+static inline void raw_write_seqcount_end(seqcount_t *s)
|
|
+{
|
|
+ __raw_write_seqcount_end(s);
|
|
+ preempt_enable_rt();
|
|
+}
|
|
+
|
|
/**
|
|
* raw_write_seqcount_barrier - do a seq write barrier
|
|
* @s: pointer to seqcount_t
|
|
@@ -428,10 +438,33 @@ typedef struct {
|
|
/*
|
|
* Read side functions for starting and finalizing a read side section.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
static inline unsigned read_seqbegin(const seqlock_t *sl)
|
|
{
|
|
return read_seqcount_begin(&sl->seqcount);
|
|
}
|
|
+#else
|
|
+/*
|
|
+ * Starvation safe read side for RT
|
|
+ */
|
|
+static inline unsigned read_seqbegin(seqlock_t *sl)
|
|
+{
|
|
+ unsigned ret;
|
|
+
|
|
+repeat:
|
|
+ ret = READ_ONCE(sl->seqcount.sequence);
|
|
+ if (unlikely(ret & 1)) {
|
|
+ /*
|
|
+ * Take the lock and let the writer proceed (i.e. evtl
|
|
+ * boost it), otherwise we could loop here forever.
|
|
+ */
|
|
+ spin_unlock_wait(&sl->lock);
|
|
+ goto repeat;
|
|
+ }
|
|
+ smp_rmb();
|
|
+ return ret;
|
|
+}
|
|
+#endif
|
|
|
|
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|
{
|
|
@@ -446,36 +479,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|
static inline void write_seqlock(seqlock_t *sl)
|
|
{
|
|
spin_lock(&sl->lock);
|
|
- write_seqcount_begin(&sl->seqcount);
|
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline int try_write_seqlock(seqlock_t *sl)
|
|
+{
|
|
+ if (spin_trylock(&sl->lock)) {
|
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
}
|
|
|
|
static inline void write_sequnlock(seqlock_t *sl)
|
|
{
|
|
- write_seqcount_end(&sl->seqcount);
|
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
|
spin_unlock(&sl->lock);
|
|
}
|
|
|
|
static inline void write_seqlock_bh(seqlock_t *sl)
|
|
{
|
|
spin_lock_bh(&sl->lock);
|
|
- write_seqcount_begin(&sl->seqcount);
|
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
|
}
|
|
|
|
static inline void write_sequnlock_bh(seqlock_t *sl)
|
|
{
|
|
- write_seqcount_end(&sl->seqcount);
|
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
|
spin_unlock_bh(&sl->lock);
|
|
}
|
|
|
|
static inline void write_seqlock_irq(seqlock_t *sl)
|
|
{
|
|
spin_lock_irq(&sl->lock);
|
|
- write_seqcount_begin(&sl->seqcount);
|
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
|
}
|
|
|
|
static inline void write_sequnlock_irq(seqlock_t *sl)
|
|
{
|
|
- write_seqcount_end(&sl->seqcount);
|
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
|
spin_unlock_irq(&sl->lock);
|
|
}
|
|
|
|
@@ -484,7 +526,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sl->lock, flags);
|
|
- write_seqcount_begin(&sl->seqcount);
|
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
|
return flags;
|
|
}
|
|
|
|
@@ -494,7 +536,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|
static inline void
|
|
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
|
{
|
|
- write_seqcount_end(&sl->seqcount);
|
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
|
spin_unlock_irqrestore(&sl->lock, flags);
|
|
}
|
|
|
|
diff --git a/include/linux/signal.h b/include/linux/signal.h
|
|
index 0be5ce237..6495fda18 100644
|
|
--- a/include/linux/signal.h
|
|
+++ b/include/linux/signal.h
|
|
@@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig)
|
|
}
|
|
|
|
extern void flush_sigqueue(struct sigpending *queue);
|
|
+extern void flush_task_sigqueue(struct task_struct *tsk);
|
|
|
|
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
|
|
static inline int valid_signal(unsigned long sig)
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index b67d42871..ed4b509e1 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -287,6 +287,7 @@ struct sk_buff_head {
|
|
|
|
__u32 qlen;
|
|
spinlock_t lock;
|
|
+ raw_spinlock_t raw_lock;
|
|
};
|
|
|
|
struct sk_buff;
|
|
@@ -1740,6 +1741,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
|
|
__skb_queue_head_init(list);
|
|
}
|
|
|
|
+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
|
|
+{
|
|
+ raw_spin_lock_init(&list->raw_lock);
|
|
+ __skb_queue_head_init(list);
|
|
+}
|
|
+
|
|
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
|
|
struct lock_class_key *class)
|
|
{
|
|
diff --git a/include/linux/smp.h b/include/linux/smp.h
|
|
index 9fb239e12..5801e516b 100644
|
|
--- a/include/linux/smp.h
|
|
+++ b/include/linux/smp.h
|
|
@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void)
|
|
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
|
|
#define put_cpu() preempt_enable()
|
|
|
|
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
|
|
+#define put_cpu_light() migrate_enable()
|
|
+
|
|
/*
|
|
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
|
* boot command line:
|
|
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
|
|
index e089157dc..5f5ad0630 100644
|
|
--- a/include/linux/spinlock.h
|
|
+++ b/include/linux/spinlock.h
|
|
@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|
})
|
|
|
|
/* Include rwlock functions */
|
|
-#include <linux/rwlock.h>
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/rwlock_rt.h>
|
|
+#else
|
|
+# include <linux/rwlock.h>
|
|
+#endif
|
|
|
|
/*
|
|
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
|
|
@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|
# include <linux/spinlock_api_up.h>
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/spinlock_rt.h>
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+
|
|
/*
|
|
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
|
|
*/
|
|
@@ -429,6 +437,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
|
|
|
|
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
/*
|
|
* Pull the atomic_t declaration:
|
|
* (asm-mips/atomic.h needs above definitions)
|
|
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
|
|
index 42dfab89e..29d99ae5a 100644
|
|
--- a/include/linux/spinlock_api_smp.h
|
|
+++ b/include/linux/spinlock_api_smp.h
|
|
@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
|
|
return 0;
|
|
}
|
|
|
|
-#include <linux/rwlock_api_smp.h>
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/rwlock_api_smp.h>
|
|
+#endif
|
|
|
|
#endif /* __LINUX_SPINLOCK_API_SMP_H */
|
|
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
|
|
new file mode 100644
|
|
index 000000000..3696a77fa
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_rt.h
|
|
@@ -0,0 +1,156 @@
|
|
+#ifndef __LINUX_SPINLOCK_RT_H
|
|
+#define __LINUX_SPINLOCK_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_H
|
|
+#error Do not include directly. Use spinlock.h
|
|
+#endif
|
|
+
|
|
+#include <linux/bug.h>
|
|
+
|
|
+extern void
|
|
+__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key);
|
|
+
|
|
+#define spin_lock_init(slock) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(slock)->lock); \
|
|
+ __rt_spin_lock_init(slock, #slock, &__key); \
|
|
+} while (0)
|
|
+
|
|
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
|
|
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
|
|
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
|
|
+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
|
|
+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
|
|
+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
|
|
+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
|
|
+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
|
|
+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
|
|
+
|
|
+/*
|
|
+ * lockdep-less calls, for derived types like rwlock:
|
|
+ * (for trylock they can use rt_mutex_trylock() directly.
|
|
+ * Migrate disable handling must be done at the call site.
|
|
+ */
|
|
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
|
|
+extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
|
|
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
|
|
+
|
|
+#define spin_lock(lock) rt_spin_lock(lock)
|
|
+
|
|
+#define spin_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ rt_spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_lock_irq(lock) spin_lock(lock)
|
|
+
|
|
+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
|
|
+
|
|
+#define spin_trylock(lock) \
|
|
+({ \
|
|
+ int __locked; \
|
|
+ __locked = spin_do_trylock(lock); \
|
|
+ __locked; \
|
|
+})
|
|
+
|
|
+#ifdef CONFIG_LOCKDEP
|
|
+# define spin_lock_nested(lock, subclass) \
|
|
+ do { \
|
|
+ rt_spin_lock_nested(lock, subclass); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_lock_bh_nested(lock, subclass) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ rt_spin_lock_nested(lock, subclass); \
|
|
+ } while (0)
|
|
+
|
|
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ rt_spin_lock_nested(lock, subclass); \
|
|
+ } while (0)
|
|
+#else
|
|
+# define spin_lock_nested(lock, subclass) spin_lock(lock)
|
|
+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
|
|
+
|
|
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+#endif
|
|
+
|
|
+#define spin_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+#ifdef CONFIG_TRACE_IRQFLAGS
|
|
+ flags = rt_spin_lock_trace_flags(lock);
|
|
+#else
|
|
+ spin_lock(lock); /* lock_local */
|
|
+#endif
|
|
+ return flags;
|
|
+}
|
|
+
|
|
+/* FIXME: we need rt_spin_lock_nest_lock */
|
|
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
|
|
+
|
|
+#define spin_unlock(lock) rt_spin_unlock(lock)
|
|
+
|
|
+#define spin_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_spin_unlock(lock); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_unlock_irq(lock) spin_unlock(lock)
|
|
+
|
|
+#define spin_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ spin_unlock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
|
|
+#define spin_trylock_irq(lock) spin_trylock(lock)
|
|
+
|
|
+#define spin_trylock_irqsave(lock, flags) \
|
|
+ rt_spin_trylock_irqsave(lock, &(flags))
|
|
+
|
|
+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
|
|
+
|
|
+#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
+# define spin_is_contended(lock) ((lock)->break_lock)
|
|
+#else
|
|
+# define spin_is_contended(lock) (((void)(lock), 0))
|
|
+#endif
|
|
+
|
|
+static inline int spin_can_lock(spinlock_t *lock)
|
|
+{
|
|
+ return !rt_mutex_is_locked(&lock->lock);
|
|
+}
|
|
+
|
|
+static inline int spin_is_locked(spinlock_t *lock)
|
|
+{
|
|
+ return rt_mutex_is_locked(&lock->lock);
|
|
+}
|
|
+
|
|
+static inline void assert_spin_locked(spinlock_t *lock)
|
|
+{
|
|
+ BUG_ON(!spin_is_locked(lock));
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
|
|
index 24b4e6f2c..10bac715e 100644
|
|
--- a/include/linux/spinlock_types.h
|
|
+++ b/include/linux/spinlock_types.h
|
|
@@ -9,77 +9,15 @@
|
|
* Released under the General Public License (GPL).
|
|
*/
|
|
|
|
-#if defined(CONFIG_SMP)
|
|
-# include <asm/spinlock_types.h>
|
|
-#else
|
|
-# include <linux/spinlock_types_up.h>
|
|
-#endif
|
|
-
|
|
-#include <linux/lockdep.h>
|
|
-
|
|
-typedef struct raw_spinlock {
|
|
- arch_spinlock_t raw_lock;
|
|
-#ifdef CONFIG_DEBUG_SPINLOCK
|
|
- unsigned int magic, owner_cpu;
|
|
- void *owner;
|
|
-#endif
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
- struct lockdep_map dep_map;
|
|
-#endif
|
|
-} raw_spinlock_t;
|
|
-
|
|
-#define SPINLOCK_MAGIC 0xdead4ead
|
|
-
|
|
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
|
-
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
-#else
|
|
-# define SPIN_DEP_MAP_INIT(lockname)
|
|
-#endif
|
|
+#include <linux/spinlock_types_raw.h>
|
|
|
|
-#ifdef CONFIG_DEBUG_SPINLOCK
|
|
-# define SPIN_DEBUG_INIT(lockname) \
|
|
- .magic = SPINLOCK_MAGIC, \
|
|
- .owner_cpu = -1, \
|
|
- .owner = SPINLOCK_OWNER_INIT,
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/spinlock_types_nort.h>
|
|
+# include <linux/rwlock_types.h>
|
|
#else
|
|
-# define SPIN_DEBUG_INIT(lockname)
|
|
+# include <linux/rtmutex.h>
|
|
+# include <linux/spinlock_types_rt.h>
|
|
+# include <linux/rwlock_types_rt.h>
|
|
#endif
|
|
|
|
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
|
|
- { \
|
|
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
|
- SPIN_DEBUG_INIT(lockname) \
|
|
- SPIN_DEP_MAP_INIT(lockname) }
|
|
-
|
|
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
|
|
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
|
|
-
|
|
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
|
|
-
|
|
-typedef struct spinlock {
|
|
- union {
|
|
- struct raw_spinlock rlock;
|
|
-
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
|
|
- struct {
|
|
- u8 __padding[LOCK_PADSIZE];
|
|
- struct lockdep_map dep_map;
|
|
- };
|
|
-#endif
|
|
- };
|
|
-} spinlock_t;
|
|
-
|
|
-#define __SPIN_LOCK_INITIALIZER(lockname) \
|
|
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
|
|
-
|
|
-#define __SPIN_LOCK_UNLOCKED(lockname) \
|
|
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
|
|
-
|
|
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
|
-
|
|
-#include <linux/rwlock_types.h>
|
|
-
|
|
#endif /* __LINUX_SPINLOCK_TYPES_H */
|
|
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
|
|
new file mode 100644
|
|
index 000000000..f1dac1fb1
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_nort.h
|
|
@@ -0,0 +1,33 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
|
|
+#define __LINUX_SPINLOCK_TYPES_NORT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * The non RT version maps spinlocks to raw_spinlocks
|
|
+ */
|
|
+typedef struct spinlock {
|
|
+ union {
|
|
+ struct raw_spinlock rlock;
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
|
|
+ struct {
|
|
+ u8 __padding[LOCK_PADSIZE];
|
|
+ struct lockdep_map dep_map;
|
|
+ };
|
|
+#endif
|
|
+ };
|
|
+} spinlock_t;
|
|
+
|
|
+#define __SPIN_LOCK_INITIALIZER(lockname) \
|
|
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
|
|
+
|
|
+#define __SPIN_LOCK_UNLOCKED(lockname) \
|
|
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
|
|
+
|
|
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
|
|
new file mode 100644
|
|
index 000000000..822bf64a6
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_raw.h
|
|
@@ -0,0 +1,55 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
|
|
+#define __LINUX_SPINLOCK_TYPES_RAW_H
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+#if defined(CONFIG_SMP)
|
|
+# include <asm/spinlock_types.h>
|
|
+#else
|
|
+# include <linux/spinlock_types_up.h>
|
|
+#endif
|
|
+
|
|
+#include <linux/lockdep.h>
|
|
+
|
|
+typedef struct raw_spinlock {
|
|
+ arch_spinlock_t raw_lock;
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+ unsigned int magic, owner_cpu;
|
|
+ void *owner;
|
|
+#endif
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+} raw_spinlock_t;
|
|
+
|
|
+#define SPINLOCK_MAGIC 0xdead4ead
|
|
+
|
|
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define SPIN_DEP_MAP_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+# define SPIN_DEBUG_INIT(lockname) \
|
|
+ .magic = SPINLOCK_MAGIC, \
|
|
+ .owner_cpu = -1, \
|
|
+ .owner = SPINLOCK_OWNER_INIT,
|
|
+#else
|
|
+# define SPIN_DEBUG_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
|
|
+ { \
|
|
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
|
+ SPIN_DEBUG_INIT(lockname) \
|
|
+ SPIN_DEP_MAP_INIT(lockname) }
|
|
+
|
|
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
|
|
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
|
|
+
|
|
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
|
|
new file mode 100644
|
|
index 000000000..3e3d8c5f7
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_rt.h
|
|
@@ -0,0 +1,48 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
|
|
+#define __LINUX_SPINLOCK_TYPES_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+#include <linux/cache.h>
|
|
+
|
|
+/*
|
|
+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
|
|
+ */
|
|
+typedef struct spinlock {
|
|
+ struct rt_mutex lock;
|
|
+ unsigned int break_lock;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+} spinlock_t;
|
|
+
|
|
+#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
+# define __RT_SPIN_INITIALIZER(name) \
|
|
+ { \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
|
+ .save_state = 1, \
|
|
+ .file = __FILE__, \
|
|
+ .line = __LINE__ , \
|
|
+ }
|
|
+#else
|
|
+# define __RT_SPIN_INITIALIZER(name) \
|
|
+ { \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
|
+ .save_state = 1, \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+/*
|
|
+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
|
|
+*/
|
|
+
|
|
+#define __SPIN_LOCK_UNLOCKED(name) \
|
|
+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
|
|
+ SPIN_DEP_MAP_INIT(name) }
|
|
+
|
|
+#define DEFINE_SPINLOCK(name) \
|
|
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
|
|
index c09b6407a..b0243ba07 100644
|
|
--- a/include/linux/spinlock_types_up.h
|
|
+++ b/include/linux/spinlock_types_up.h
|
|
@@ -1,10 +1,6 @@
|
|
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
|
|
#define __LINUX_SPINLOCK_TYPES_UP_H
|
|
|
|
-#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
-# error "please don't include this file directly"
|
|
-#endif
|
|
-
|
|
/*
|
|
* include/linux/spinlock_types_up.h - spinlock type definitions for UP
|
|
*
|
|
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
|
|
index 6d3635c86..82fc686dd 100644
|
|
--- a/include/linux/stop_machine.h
|
|
+++ b/include/linux/stop_machine.h
|
|
@@ -26,6 +26,8 @@ struct cpu_stop_work {
|
|
cpu_stop_fn_t fn;
|
|
void *arg;
|
|
struct cpu_stop_done *done;
|
|
+ /* Did not run due to disabled stopper; for nowait debug checks */
|
|
+ bool disabled;
|
|
};
|
|
|
|
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
|
|
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
|
|
index aff248925..87c21a106 100644
|
|
--- a/include/linux/suspend.h
|
|
+++ b/include/linux/suspend.h
|
|
@@ -196,6 +196,12 @@ struct platform_s2idle_ops {
|
|
void (*end)(void);
|
|
};
|
|
|
|
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
|
|
+extern bool pm_in_action;
|
|
+#else
|
|
+# define pm_in_action false
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_SUSPEND
|
|
extern suspend_state_t mem_sleep_current;
|
|
extern suspend_state_t mem_sleep_default;
|
|
diff --git a/include/linux/swait.h b/include/linux/swait.h
|
|
index 73e06e998..21ae66cd4 100644
|
|
--- a/include/linux/swait.h
|
|
+++ b/include/linux/swait.h
|
|
@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
|
|
extern void swake_up_one(struct swait_queue_head *q);
|
|
extern void swake_up_all(struct swait_queue_head *q);
|
|
extern void swake_up_locked(struct swait_queue_head *q);
|
|
+extern void swake_up_all_locked(struct swait_queue_head *q);
|
|
|
|
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
|
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
|
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
|
|
|
@@ -297,4 +299,18 @@ do { \
|
|
__ret; \
|
|
})
|
|
|
|
+#define __swait_event_lock_irq(wq, condition, lock, cmd) \
|
|
+ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
|
|
+ raw_spin_unlock_irq(&lock); \
|
|
+ cmd; \
|
|
+ schedule(); \
|
|
+ raw_spin_lock_irq(&lock))
|
|
+
|
|
+#define swait_event_lock_irq(wq_head, condition, lock) \
|
|
+ do { \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ __swait_event_lock_irq(wq_head, condition, lock, ); \
|
|
+ } while (0)
|
|
+
|
|
#endif /* _LINUX_SWAIT_H */
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index 959a2e381..d625689b1 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/fs.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/page-flags.h>
|
|
+#include <linux/locallock.h>
|
|
#include <asm/page.h>
|
|
|
|
struct notifier_block;
|
|
@@ -354,6 +355,7 @@ extern unsigned long nr_free_pagecache_pages(void);
|
|
|
|
|
|
/* linux/mm/swap.c */
|
|
+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
|
|
extern void lru_cache_add(struct page *);
|
|
extern void lru_cache_add_anon(struct page *page);
|
|
extern void lru_cache_add_file(struct page *page);
|
|
diff --git a/include/linux/swork.h b/include/linux/swork.h
|
|
new file mode 100644
|
|
index 000000000..f175fa9a6
|
|
--- /dev/null
|
|
+++ b/include/linux/swork.h
|
|
@@ -0,0 +1,24 @@
|
|
+#ifndef _LINUX_SWORK_H
|
|
+#define _LINUX_SWORK_H
|
|
+
|
|
+#include <linux/list.h>
|
|
+
|
|
+struct swork_event {
|
|
+ struct list_head item;
|
|
+ unsigned long flags;
|
|
+ void (*func)(struct swork_event *);
|
|
+};
|
|
+
|
|
+static inline void INIT_SWORK(struct swork_event *event,
|
|
+ void (*func)(struct swork_event *))
|
|
+{
|
|
+ event->flags = 0;
|
|
+ event->func = func;
|
|
+}
|
|
+
|
|
+bool swork_queue(struct swork_event *sev);
|
|
+
|
|
+int swork_get(void);
|
|
+void swork_put(void);
|
|
+
|
|
+#endif /* _LINUX_SWORK_H */
|
|
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
|
index e22fdce95..ad373939e 100644
|
|
--- a/include/linux/thread_info.h
|
|
+++ b/include/linux/thread_info.h
|
|
@@ -37,7 +37,17 @@ static inline long set_restart_fn(struct restart_block *restart,
|
|
|
|
#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
|
|
|
|
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
|
|
+ test_thread_flag(TIF_NEED_RESCHED_LAZY))
|
|
+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
|
|
+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
|
|
+
|
|
+#else
|
|
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
|
|
+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
|
|
+#define tif_need_resched_lazy() 0
|
|
+#endif
|
|
|
|
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
|
|
static inline int arch_within_stack_frames(const void * const stack,
|
|
diff --git a/include/linux/timer.h b/include/linux/timer.h
|
|
index 4759f2d94..d350bac62 100644
|
|
--- a/include/linux/timer.h
|
|
+++ b/include/linux/timer.h
|
|
@@ -178,7 +178,7 @@ extern void add_timer(struct timer_list *timer);
|
|
|
|
extern int try_to_del_timer_sync(struct timer_list *timer);
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
extern int del_timer_sync(struct timer_list *timer);
|
|
#else
|
|
# define del_timer_sync(t) del_timer(t)
|
|
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
|
|
index 133cfea84..49f04d7a9 100644
|
|
--- a/include/linux/trace_events.h
|
|
+++ b/include/linux/trace_events.h
|
|
@@ -62,6 +62,8 @@ struct trace_entry {
|
|
unsigned char flags;
|
|
unsigned char preempt_count;
|
|
int pid;
|
|
+ unsigned char migrate_disable;
|
|
+ unsigned char preempt_lazy_count;
|
|
};
|
|
|
|
#define TRACE_EVENT_TYPE_MAX \
|
|
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
|
|
index a0061e018..77d48dd75 100644
|
|
--- a/include/linux/uaccess.h
|
|
+++ b/include/linux/uaccess.h
|
|
@@ -182,6 +182,7 @@ static __always_inline void pagefault_disabled_dec(void)
|
|
*/
|
|
static inline void pagefault_disable(void)
|
|
{
|
|
+ migrate_disable();
|
|
pagefault_disabled_inc();
|
|
/*
|
|
* make sure to have issued the store before a pagefault
|
|
@@ -198,6 +199,7 @@ static inline void pagefault_enable(void)
|
|
*/
|
|
barrier();
|
|
pagefault_disabled_dec();
|
|
+ migrate_enable();
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
|
|
index 0beaea0e5..d521f42bf 100644
|
|
--- a/include/linux/vmstat.h
|
|
+++ b/include/linux/vmstat.h
|
|
@@ -55,7 +55,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|
*/
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
+ preempt_disable_rt();
|
|
raw_cpu_inc(vm_event_states.event[item]);
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
@@ -65,7 +67,9 @@ static inline void count_vm_event(enum vm_event_item item)
|
|
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
+ preempt_disable_rt();
|
|
raw_cpu_add(vm_event_states.event[item], delta);
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
diff --git a/include/linux/wait.h b/include/linux/wait.h
|
|
index 60a62d3ad..6f33ccf6b 100644
|
|
--- a/include/linux/wait.h
|
|
+++ b/include/linux/wait.h
|
|
@@ -10,6 +10,7 @@
|
|
|
|
#include <asm/current.h>
|
|
#include <uapi/linux/wait.h>
|
|
+#include <linux/atomic.h>
|
|
|
|
typedef struct wait_queue_entry wait_queue_entry_t;
|
|
|
|
@@ -527,8 +528,8 @@ do { \
|
|
int __ret = 0; \
|
|
struct hrtimer_sleeper __t; \
|
|
\
|
|
- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
|
|
- hrtimer_init_sleeper(&__t, current); \
|
|
+ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \
|
|
+ current); \
|
|
if ((timeout) != KTIME_MAX) \
|
|
hrtimer_start_range_ns(&__t.timer, timeout, \
|
|
current->timer_slack_ns, \
|
|
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
|
|
index 3ac0fa0c8..c0a6055da 100644
|
|
--- a/include/linux/workqueue.h
|
|
+++ b/include/linux/workqueue.h
|
|
@@ -473,10 +473,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|
|
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
|
|
|
-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
|
|
-void free_workqueue_attrs(struct workqueue_attrs *attrs);
|
|
-int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
- const struct workqueue_attrs *attrs);
|
|
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
|
|
|
|
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
|
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
|
|
index 883bb9085..3b593cdeb 100644
|
|
--- a/include/net/gen_stats.h
|
|
+++ b/include/net/gen_stats.h
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/socket.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/pkt_sched.h>
|
|
+#include <net/net_seq_lock.h>
|
|
|
|
struct gnet_stats_basic_cpu {
|
|
struct gnet_stats_basic_packed bstats;
|
|
@@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
|
|
spinlock_t *lock, struct gnet_dump *d,
|
|
int padattr);
|
|
|
|
-int gnet_stats_copy_basic(const seqcount_t *running,
|
|
+int gnet_stats_copy_basic(net_seqlock_t *running,
|
|
struct gnet_dump *d,
|
|
struct gnet_stats_basic_cpu __percpu *cpu,
|
|
struct gnet_stats_basic_packed *b);
|
|
-void __gnet_stats_copy_basic(const seqcount_t *running,
|
|
+void __gnet_stats_copy_basic(net_seqlock_t *running,
|
|
struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu,
|
|
struct gnet_stats_basic_packed *b);
|
|
@@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **rate_est,
|
|
spinlock_t *lock,
|
|
- seqcount_t *running, struct nlattr *opt);
|
|
+ net_seqlock_t *running, struct nlattr *opt);
|
|
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
|
|
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **ptr,
|
|
spinlock_t *lock,
|
|
- seqcount_t *running, struct nlattr *opt);
|
|
+ net_seqlock_t *running, struct nlattr *opt);
|
|
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
|
|
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
|
|
struct gnet_stats_rate_est64 *sample);
|
|
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
|
|
index bec7e96a3..18f351426 100644
|
|
--- a/include/net/neighbour.h
|
|
+++ b/include/net/neighbour.h
|
|
@@ -455,7 +455,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
|
|
}
|
|
#endif
|
|
|
|
-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
|
|
+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
|
|
{
|
|
unsigned int hh_alen = 0;
|
|
unsigned int seq;
|
|
@@ -497,7 +497,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
|
|
|
|
static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
|
|
{
|
|
- const struct hh_cache *hh = &n->hh;
|
|
+ struct hh_cache *hh = &n->hh;
|
|
|
|
if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
|
|
return neigh_hh_output(hh, skb);
|
|
@@ -538,7 +538,7 @@ struct neighbour_cb {
|
|
|
|
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
|
|
|
|
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
|
|
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
|
|
const struct net_device *dev)
|
|
{
|
|
unsigned int seq;
|
|
diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
|
|
new file mode 100644
|
|
index 000000000..a7034298a
|
|
--- /dev/null
|
|
+++ b/include/net/net_seq_lock.h
|
|
@@ -0,0 +1,15 @@
|
|
+#ifndef __NET_NET_SEQ_LOCK_H__
|
|
+#define __NET_NET_SEQ_LOCK_H__
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define net_seqlock_t seqlock_t
|
|
+# define net_seq_begin(__r) read_seqbegin(__r)
|
|
+# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
|
|
+
|
|
+#else
|
|
+# define net_seqlock_t seqcount_t
|
|
+# define net_seq_begin(__r) read_seqcount_begin(__r)
|
|
+# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
|
|
index a8dcb16c2..251059dca 100644
|
|
--- a/include/net/sch_generic.h
|
|
+++ b/include/net/sch_generic.h
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/percpu.h>
|
|
#include <linux/dynamic_queue_limits.h>
|
|
#include <linux/list.h>
|
|
+#include <net/net_seq_lock.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/workqueue.h>
|
|
#include <net/gen_stats.h>
|
|
@@ -106,7 +107,7 @@ struct Qdisc {
|
|
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
|
|
struct qdisc_skb_head q;
|
|
struct gnet_stats_basic_packed bstats;
|
|
- seqcount_t running;
|
|
+ net_seqlock_t running;
|
|
struct gnet_stats_queue qstats;
|
|
unsigned long state;
|
|
struct Qdisc *next_sched;
|
|
@@ -140,7 +141,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
|
|
{
|
|
if (qdisc->flags & TCQ_F_NOLOCK)
|
|
return spin_is_locked(&qdisc->seqlock);
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ return spin_is_locked(&qdisc->running.lock) ? true : false;
|
|
+#else
|
|
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
|
|
+#endif
|
|
}
|
|
|
|
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
|
@@ -151,17 +156,27 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
|
} else if (qdisc_is_running(qdisc)) {
|
|
return false;
|
|
}
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ if (try_write_seqlock(&qdisc->running))
|
|
+ return true;
|
|
+ return false;
|
|
+#else
|
|
/* Variant of write_seqcount_begin() telling lockdep a trylock
|
|
* was attempted.
|
|
*/
|
|
raw_write_seqcount_begin(&qdisc->running);
|
|
seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
|
|
return true;
|
|
+#endif
|
|
}
|
|
|
|
static inline void qdisc_run_end(struct Qdisc *qdisc)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ write_sequnlock(&qdisc->running);
|
|
+#else
|
|
write_seqcount_end(&qdisc->running);
|
|
+#endif
|
|
if (qdisc->flags & TCQ_F_NOLOCK)
|
|
spin_unlock(&qdisc->seqlock);
|
|
}
|
|
@@ -477,7 +492,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
|
|
return qdisc_lock(root);
|
|
}
|
|
|
|
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
|
|
+static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
|
|
{
|
|
struct Qdisc *root = qdisc_root_sleeping(qdisc);
|
|
|
|
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
|
|
new file mode 100644
|
|
index 000000000..657e234b1
|
|
--- /dev/null
|
|
+++ b/include/soc/at91/atmel_tcb.h
|
|
@@ -0,0 +1,183 @@
|
|
+//SPDX-License-Identifier: GPL-2.0
|
|
+/* Copyright (C) 2018 Microchip */
|
|
+
|
|
+#ifndef __SOC_ATMEL_TCB_H
|
|
+#define __SOC_ATMEL_TCB_H
|
|
+
|
|
+/* Channel registers */
|
|
+#define ATMEL_TC_COFFS(c) ((c) * 0x40)
|
|
+#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c)
|
|
+#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4)
|
|
+#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8)
|
|
+#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc)
|
|
+#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10)
|
|
+#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14)
|
|
+#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18)
|
|
+#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c)
|
|
+#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20)
|
|
+#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24)
|
|
+#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28)
|
|
+#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c)
|
|
+#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30)
|
|
+
|
|
+/* Block registers */
|
|
+#define ATMEL_TC_BCR 0xc0
|
|
+#define ATMEL_TC_BMR 0xc4
|
|
+#define ATMEL_TC_QIER 0xc8
|
|
+#define ATMEL_TC_QIDR 0xcc
|
|
+#define ATMEL_TC_QIMR 0xd0
|
|
+#define ATMEL_TC_QISR 0xd4
|
|
+#define ATMEL_TC_FMR 0xd8
|
|
+#define ATMEL_TC_WPMR 0xe4
|
|
+
|
|
+/* CCR fields */
|
|
+#define ATMEL_TC_CCR_CLKEN BIT(0)
|
|
+#define ATMEL_TC_CCR_CLKDIS BIT(1)
|
|
+#define ATMEL_TC_CCR_SWTRG BIT(2)
|
|
+
|
|
+/* Common CMR fields */
|
|
+#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0)
|
|
+#define ATMEL_TC_CMR_TCLK(x) (x)
|
|
+#define ATMEL_TC_CMR_XC(x) ((x) + 5)
|
|
+#define ATMEL_TC_CMR_CLKI BIT(3)
|
|
+#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4)
|
|
+#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4)
|
|
+#define ATMEL_TC_CMR_WAVE BIT(15)
|
|
+
|
|
+/* Capture mode CMR fields */
|
|
+#define ATMEL_TC_CMR_LDBSTOP BIT(6)
|
|
+#define ATMEL_TC_CMR_LDBDIS BIT(7)
|
|
+#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8)
|
|
+#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8)
|
|
+#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8)
|
|
+#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8)
|
|
+#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8)
|
|
+#define ATMEL_TC_CMR_ABETRG BIT(10)
|
|
+#define ATMEL_TC_CMR_CPCTRG BIT(14)
|
|
+#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16)
|
|
+#define ATMEL_TC_CMR_LDRA_NONE (0 << 16)
|
|
+#define ATMEL_TC_CMR_LDRA_RISING (1 << 16)
|
|
+#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16)
|
|
+#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16)
|
|
+#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18)
|
|
+#define ATMEL_TC_CMR_LDRB_NONE (0 << 18)
|
|
+#define ATMEL_TC_CMR_LDRB_RISING (1 << 18)
|
|
+#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18)
|
|
+#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18)
|
|
+#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20)
|
|
+#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20)
|
|
+
|
|
+/* Waveform mode CMR fields */
|
|
+#define ATMEL_TC_CMR_CPCSTOP BIT(6)
|
|
+#define ATMEL_TC_CMR_CPCDIS BIT(7)
|
|
+#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8)
|
|
+#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8)
|
|
+#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8)
|
|
+#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8)
|
|
+#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8)
|
|
+#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10)
|
|
+#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10)
|
|
+#define ATMEL_TC_CMR_ENETRG BIT(12)
|
|
+#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13)
|
|
+#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13)
|
|
+#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13)
|
|
+#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13)
|
|
+#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13)
|
|
+#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16)
|
|
+#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16)
|
|
+#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18)
|
|
+#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18)
|
|
+#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20)
|
|
+#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20)
|
|
+#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22)
|
|
+#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22)
|
|
+#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24)
|
|
+#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24)
|
|
+#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26)
|
|
+#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26)
|
|
+#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28)
|
|
+#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28)
|
|
+#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30)
|
|
+#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30)
|
|
+#define ATMEL_TC_CMR_ACTION_NONE 0
|
|
+#define ATMEL_TC_CMR_ACTION_SET 1
|
|
+#define ATMEL_TC_CMR_ACTION_CLEAR 2
|
|
+#define ATMEL_TC_CMR_ACTION_TOGGLE 3
|
|
+
|
|
+/* SMMR fields */
|
|
+#define ATMEL_TC_SMMR_GCEN BIT(0)
|
|
+#define ATMEL_TC_SMMR_DOWN BIT(1)
|
|
+
|
|
+/* SR/IER/IDR/IMR fields */
|
|
+#define ATMEL_TC_COVFS BIT(0)
|
|
+#define ATMEL_TC_LOVRS BIT(1)
|
|
+#define ATMEL_TC_CPAS BIT(2)
|
|
+#define ATMEL_TC_CPBS BIT(3)
|
|
+#define ATMEL_TC_CPCS BIT(4)
|
|
+#define ATMEL_TC_LDRAS BIT(5)
|
|
+#define ATMEL_TC_LDRBS BIT(6)
|
|
+#define ATMEL_TC_ETRGS BIT(7)
|
|
+#define ATMEL_TC_CLKSTA BIT(16)
|
|
+#define ATMEL_TC_MTIOA BIT(17)
|
|
+#define ATMEL_TC_MTIOB BIT(18)
|
|
+
|
|
+/* EMR fields */
|
|
+#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0)
|
|
+#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0
|
|
+#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1
|
|
+#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4)
|
|
+#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4)
|
|
+#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4)
|
|
+#define ATMEL_TC_EMR_NOCLKDIV BIT(8)
|
|
+
|
|
+/* BCR fields */
|
|
+#define ATMEL_TC_BCR_SYNC BIT(0)
|
|
+
|
|
+/* BMR fields */
|
|
+#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2)
|
|
+#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c)))
|
|
+#define ATMEL_TC_BMR_QDEN BIT(8)
|
|
+#define ATMEL_TC_BMR_POSEN BIT(9)
|
|
+#define ATMEL_TC_BMR_SPEEDEN BIT(10)
|
|
+#define ATMEL_TC_BMR_QDTRANS BIT(11)
|
|
+#define ATMEL_TC_BMR_EDGPHA BIT(12)
|
|
+#define ATMEL_TC_BMR_INVA BIT(13)
|
|
+#define ATMEL_TC_BMR_INVB BIT(14)
|
|
+#define ATMEL_TC_BMR_INVIDX BIT(15)
|
|
+#define ATMEL_TC_BMR_SWAP BIT(16)
|
|
+#define ATMEL_TC_BMR_IDXPHB BIT(17)
|
|
+#define ATMEL_TC_BMR_AUTOC BIT(18)
|
|
+#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20)
|
|
+#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20)
|
|
+#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26)
|
|
+#define ATMEL_TC_MAXCMP(x) ((x) << 26)
|
|
+
|
|
+/* QEDC fields */
|
|
+#define ATMEL_TC_QEDC_IDX BIT(0)
|
|
+#define ATMEL_TC_QEDC_DIRCHG BIT(1)
|
|
+#define ATMEL_TC_QEDC_QERR BIT(2)
|
|
+#define ATMEL_TC_QEDC_MPE BIT(3)
|
|
+#define ATMEL_TC_QEDC_DIR BIT(8)
|
|
+
|
|
+/* FMR fields */
|
|
+#define ATMEL_TC_FMR_ENCF(x) BIT(x)
|
|
+
|
|
+/* WPMR fields */
|
|
+#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8)
|
|
+#define ATMEL_TC_WPMR_WPEN BIT(0)
|
|
+
|
|
+static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
|
|
+
|
|
+static const struct of_device_id atmel_tcb_dt_ids[] = {
|
|
+ {
|
|
+ .compatible = "atmel,at91rm9200-tcb",
|
|
+ .data = (void *)16,
|
|
+ }, {
|
|
+ .compatible = "atmel,at91sam9x5-tcb",
|
|
+ .data = (void *)32,
|
|
+ }, {
|
|
+ /* sentinel */
|
|
+ }
|
|
+};
|
|
+
|
|
+#endif /* __SOC_ATMEL_TCB_H */
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index ef1768d36..1c433fc50 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -814,6 +814,7 @@ config CFS_BANDWIDTH
|
|
config RT_GROUP_SCHED
|
|
bool "Group scheduling for SCHED_RR/FIFO"
|
|
depends on CGROUP_SCHED
|
|
+ depends on !PREEMPT_RT_FULL
|
|
default n
|
|
help
|
|
This feature lets you explicitly allocate real CPU bandwidth
|
|
@@ -1735,6 +1736,7 @@ choice
|
|
|
|
config SLAB
|
|
bool "SLAB"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
|
help
|
|
The regular slab allocator that is established and known to work
|
|
@@ -1755,6 +1757,7 @@ config SLUB
|
|
config SLOB
|
|
depends on EXPERT
|
|
bool "SLOB (Simple Allocator)"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
help
|
|
SLOB replaces the stock allocator with a drastically simpler
|
|
allocator. SLOB is generally more space efficient but
|
|
@@ -1796,7 +1799,7 @@ config SLAB_FREELIST_HARDENED
|
|
|
|
config SLUB_CPU_PARTIAL
|
|
default y
|
|
- depends on SLUB && SMP
|
|
+ depends on SLUB && SMP && !PREEMPT_RT_FULL
|
|
bool "SLUB per cpu partial cache"
|
|
help
|
|
Per cpu partial caches accellerate objects allocation and freeing
|
|
diff --git a/init/Makefile b/init/Makefile
|
|
index a3e5ce2bc..777923256 100644
|
|
--- a/init/Makefile
|
|
+++ b/init/Makefile
|
|
@@ -34,4 +34,4 @@ silent_chk_compile.h = :
|
|
include/generated/compile.h: FORCE
|
|
@$($(quiet)chk_compile.h)
|
|
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
|
|
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
|
|
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
|
|
diff --git a/init/init_task.c b/init/init_task.c
|
|
index b312a045f..788784cd7 100644
|
|
--- a/init/init_task.c
|
|
+++ b/init/init_task.c
|
|
@@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = {
|
|
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
|
|
};
|
|
|
|
+#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+# define INIT_TIMER_LIST .posix_timer_list = NULL,
|
|
+#else
|
|
+# define INIT_TIMER_LIST
|
|
+#endif
|
|
+
|
|
/*
|
|
* Set up the first task table, touch at your own risk!. Base=0,
|
|
* limit=0x1fffff (=2MB)
|
|
@@ -71,8 +77,13 @@ struct task_struct init_task
|
|
.static_prio = MAX_PRIO - 20,
|
|
.normal_prio = MAX_PRIO - 20,
|
|
.policy = SCHED_NORMAL,
|
|
- .cpus_allowed = CPU_MASK_ALL,
|
|
+ .cpus_ptr = &init_task.cpus_mask,
|
|
+ .cpus_mask = CPU_MASK_ALL,
|
|
.nr_cpus_allowed= NR_CPUS,
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) && \
|
|
+ defined(CONFIG_SCHED_DEBUG)
|
|
+ .pinned_on_cpu = -1,
|
|
+#endif
|
|
.mm = NULL,
|
|
.active_mm = &init_mm,
|
|
.restart_block = {
|
|
@@ -118,6 +129,7 @@ struct task_struct init_task
|
|
INIT_CPU_TIMERS(init_task)
|
|
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
|
|
.timer_slack_ns = 50000, /* 50 usec default slack */
|
|
+ INIT_TIMER_LIST
|
|
.thread_pid = &init_struct_pid,
|
|
.thread_group = LIST_HEAD_INIT(init_task.thread_group),
|
|
.thread_node = LIST_HEAD_INIT(init_signals.thread_head),
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 50af60ff0..04710896e 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -560,6 +560,7 @@ asmlinkage __visible void __init start_kernel(void)
|
|
setup_command_line(command_line);
|
|
setup_nr_cpu_ids();
|
|
setup_per_cpu_areas();
|
|
+ softirq_early_init();
|
|
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
|
|
boot_cpu_hotplug_init();
|
|
|
|
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
|
|
index 84d882f3e..af27c4000 100644
|
|
--- a/kernel/Kconfig.locks
|
|
+++ b/kernel/Kconfig.locks
|
|
@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
|
|
|
|
config MUTEX_SPIN_ON_OWNER
|
|
def_bool y
|
|
- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
|
|
+ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
|
|
|
|
config RWSEM_SPIN_ON_OWNER
|
|
def_bool y
|
|
- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
|
|
+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
|
|
|
|
config LOCK_SPIN_ON_OWNER
|
|
def_bool y
|
|
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
|
|
index cd1655122..fd924c0bc 100644
|
|
--- a/kernel/Kconfig.preempt
|
|
+++ b/kernel/Kconfig.preempt
|
|
@@ -1,3 +1,19 @@
|
|
+config PREEMPT
|
|
+ bool
|
|
+ select PREEMPT_COUNT
|
|
+
|
|
+config PREEMPT_RT_BASE
|
|
+ bool
|
|
+ select PREEMPT
|
|
+
|
|
+config PREEMPT_RT
|
|
+ bool
|
|
+
|
|
+config HAVE_PREEMPT_LAZY
|
|
+ bool
|
|
+
|
|
+config PREEMPT_LAZY
|
|
+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
|
|
|
|
choice
|
|
prompt "Preemption Model"
|
|
@@ -34,10 +50,10 @@ config PREEMPT_VOLUNTARY
|
|
|
|
Select this if you are building a kernel for a desktop system.
|
|
|
|
-config PREEMPT
|
|
+config PREEMPT__LL
|
|
bool "Preemptible Kernel (Low-Latency Desktop)"
|
|
depends on !ARCH_NO_PREEMPT
|
|
- select PREEMPT_COUNT
|
|
+ select PREEMPT
|
|
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
|
|
help
|
|
This option reduces the latency of the kernel by making
|
|
@@ -54,7 +70,24 @@ config PREEMPT
|
|
embedded system with latency requirements in the milliseconds
|
|
range.
|
|
|
|
+config PREEMPT_RTB
|
|
+ bool "Preemptible Kernel (Basic RT)"
|
|
+ select PREEMPT_RT_BASE
|
|
+ help
|
|
+ This option is basically the same as (Low-Latency Desktop) but
|
|
+ enables changes which are preliminary for the full preemptible
|
|
+ RT kernel.
|
|
+
|
|
+config PREEMPT_RT_FULL
|
|
+ bool "Fully Preemptible Kernel (RT)"
|
|
+ depends on IRQ_FORCED_THREADING
|
|
+ select PREEMPT_RT_BASE
|
|
+ select PREEMPT_RCU
|
|
+ select PREEMPT_RT
|
|
+ help
|
|
+ All and everything
|
|
+
|
|
endchoice
|
|
|
|
config PREEMPT_COUNT
|
|
- bool
|
|
\ No newline at end of file
|
|
+ bool
|
|
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
|
|
index 1339b93f7..81dee3a6a 100644
|
|
--- a/kernel/cgroup/cgroup.c
|
|
+++ b/kernel/cgroup/cgroup.c
|
|
@@ -4821,10 +4821,10 @@ static void css_free_rwork_fn(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
-static void css_release_work_fn(struct work_struct *work)
|
|
+static void css_release_work_fn(struct swork_event *sev)
|
|
{
|
|
struct cgroup_subsys_state *css =
|
|
- container_of(work, struct cgroup_subsys_state, destroy_work);
|
|
+ container_of(sev, struct cgroup_subsys_state, destroy_swork);
|
|
struct cgroup_subsys *ss = css->ss;
|
|
struct cgroup *cgrp = css->cgroup;
|
|
|
|
@@ -4886,8 +4886,8 @@ static void css_release(struct percpu_ref *ref)
|
|
struct cgroup_subsys_state *css =
|
|
container_of(ref, struct cgroup_subsys_state, refcnt);
|
|
|
|
- INIT_WORK(&css->destroy_work, css_release_work_fn);
|
|
- queue_work(cgroup_destroy_wq, &css->destroy_work);
|
|
+ INIT_SWORK(&css->destroy_swork, css_release_work_fn);
|
|
+ swork_queue(&css->destroy_swork);
|
|
}
|
|
|
|
static void init_and_link_css(struct cgroup_subsys_state *css,
|
|
@@ -5612,6 +5612,7 @@ static int __init cgroup_wq_init(void)
|
|
*/
|
|
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
|
|
BUG_ON(!cgroup_destroy_wq);
|
|
+ BUG_ON(swork_get());
|
|
return 0;
|
|
}
|
|
core_initcall(cgroup_wq_init);
|
|
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
|
|
index ae2b1ad23..1afc006a3 100644
|
|
--- a/kernel/cgroup/cpuset.c
|
|
+++ b/kernel/cgroup/cpuset.c
|
|
@@ -291,7 +291,7 @@ static struct cpuset top_cpuset = {
|
|
*/
|
|
|
|
static DEFINE_MUTEX(cpuset_mutex);
|
|
-static DEFINE_SPINLOCK(callback_lock);
|
|
+static DEFINE_RAW_SPINLOCK(callback_lock);
|
|
|
|
static struct workqueue_struct *cpuset_migrate_mm_wq;
|
|
|
|
@@ -628,9 +628,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|
if (!cpumask_empty(trialcs->prefer_cpus))
|
|
dynamic_affinity_enable();
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cpumask_copy(cs->prefer_cpus, trialcs->prefer_cpus);
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1009,9 +1009,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
|
|
continue;
|
|
rcu_read_unlock();
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cpumask_copy(cp->effective_cpus, new_cpus);
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
WARN_ON(!is_in_v2_mode() &&
|
|
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
|
|
@@ -1076,9 +1076,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|
if (retval < 0)
|
|
return retval;
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
/* use trialcs->cpus_allowed as a temp variable */
|
|
update_cpumasks_hier(cs, trialcs->cpus_allowed);
|
|
@@ -1262,9 +1262,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|
continue;
|
|
rcu_read_unlock();
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cp->effective_mems = *new_mems;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
WARN_ON(!is_in_v2_mode() &&
|
|
!nodes_equal(cp->mems_allowed, cp->effective_mems));
|
|
@@ -1332,9 +1332,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|
if (retval < 0)
|
|
goto done;
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cs->mems_allowed = trialcs->mems_allowed;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
/* use trialcs->mems_allowed as a temp variable */
|
|
update_nodemasks_hier(cs, &trialcs->mems_allowed);
|
|
@@ -1425,9 +1425,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|
|
|| (is_spread_page(cs) != is_spread_page(trialcs)));
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cs->flags = trialcs->flags;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
|
|
rebuild_sched_domains_locked();
|
|
@@ -1866,7 +1866,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
|
|
cpuset_filetype_t type = seq_cft(sf)->private;
|
|
int ret = 0;
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
|
|
switch (type) {
|
|
case FILE_CPULIST:
|
|
@@ -1890,7 +1890,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
return ret;
|
|
}
|
|
|
|
@@ -2125,12 +2125,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
|
|
cpuset_inc();
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
if (is_in_v2_mode()) {
|
|
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
|
|
cs->effective_mems = parent->effective_mems;
|
|
}
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
|
|
goto out_unlock;
|
|
@@ -2157,7 +2157,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cs->mems_allowed = parent->mems_allowed;
|
|
cs->effective_mems = parent->mems_allowed;
|
|
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
|
|
@@ -2165,7 +2165,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
cpumask_copy(cs->prefer_cpus, parent->prefer_cpus);
|
|
#endif
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
out_unlock:
|
|
mutex_unlock(&cpuset_mutex);
|
|
put_online_cpus();
|
|
@@ -2210,7 +2210,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
|
|
static void cpuset_bind(struct cgroup_subsys_state *root_css)
|
|
{
|
|
mutex_lock(&cpuset_mutex);
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
|
|
if (is_in_v2_mode()) {
|
|
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
|
|
@@ -2221,7 +2221,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
|
|
top_cpuset.mems_allowed = top_cpuset.effective_mems;
|
|
}
|
|
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
mutex_unlock(&cpuset_mutex);
|
|
}
|
|
|
|
@@ -2235,7 +2235,7 @@ static void cpuset_fork(struct task_struct *task)
|
|
if (task_css_is_root(task, cpuset_cgrp_id))
|
|
return;
|
|
|
|
- set_cpus_allowed_ptr(task, ¤t->cpus_allowed);
|
|
+ set_cpus_allowed_ptr(task, current->cpus_ptr);
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
set_prefer_cpus_ptr(task, current->prefer_cpus);
|
|
#endif
|
|
@@ -2334,12 +2334,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
|
|
#endif
|
|
bool is_empty;
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cpumask_copy(cs->cpus_allowed, new_cpus);
|
|
cpumask_copy(cs->effective_cpus, new_cpus);
|
|
cs->mems_allowed = *new_mems;
|
|
cs->effective_mems = *new_mems;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
/*
|
|
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
|
|
@@ -2383,10 +2383,10 @@ hotplug_update_tasks(struct cpuset *cs,
|
|
if (nodes_empty(*new_mems))
|
|
*new_mems = parent_cs(cs)->effective_mems;
|
|
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
cpumask_copy(cs->effective_cpus, new_cpus);
|
|
cs->effective_mems = *new_mems;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
|
|
if (cpus_updated)
|
|
update_tasks_cpumask(cs);
|
|
@@ -2479,21 +2479,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
|
/* synchronize cpus_allowed to cpu_active_mask */
|
|
if (cpus_updated) {
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
if (!on_dfl)
|
|
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
|
|
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
/* we don't mess with cpumasks of tasks in top_cpuset */
|
|
}
|
|
|
|
/* synchronize mems_allowed to N_MEMORY */
|
|
if (mems_updated) {
|
|
- spin_lock_irq(&callback_lock);
|
|
+ raw_spin_lock_irq(&callback_lock);
|
|
if (!on_dfl)
|
|
top_cpuset.mems_allowed = new_mems;
|
|
top_cpuset.effective_mems = new_mems;
|
|
- spin_unlock_irq(&callback_lock);
|
|
+ raw_spin_unlock_irq(&callback_lock);
|
|
update_tasks_nodemask(&top_cpuset);
|
|
}
|
|
|
|
@@ -2592,11 +2592,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&callback_lock, flags);
|
|
+ raw_spin_lock_irqsave(&callback_lock, flags);
|
|
rcu_read_lock();
|
|
guarantee_online_cpus(task_cs(tsk), pmask);
|
|
rcu_read_unlock();
|
|
- spin_unlock_irqrestore(&callback_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -2657,11 +2657,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
|
|
nodemask_t mask;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&callback_lock, flags);
|
|
+ raw_spin_lock_irqsave(&callback_lock, flags);
|
|
rcu_read_lock();
|
|
guarantee_online_mems(task_cs(tsk), &mask);
|
|
rcu_read_unlock();
|
|
- spin_unlock_irqrestore(&callback_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
|
|
|
|
return mask;
|
|
}
|
|
@@ -2753,14 +2753,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
|
|
return true;
|
|
|
|
/* Not hardwall and node outside mems_allowed: scan up cpusets */
|
|
- spin_lock_irqsave(&callback_lock, flags);
|
|
+ raw_spin_lock_irqsave(&callback_lock, flags);
|
|
|
|
rcu_read_lock();
|
|
cs = nearest_hardwall_ancestor(task_cs(current));
|
|
allowed = node_isset(node, cs->mems_allowed);
|
|
rcu_read_unlock();
|
|
|
|
- spin_unlock_irqrestore(&callback_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
|
|
return allowed;
|
|
}
|
|
|
|
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
|
|
index d0ed410b4..3c949c46c 100644
|
|
--- a/kernel/cgroup/rstat.c
|
|
+++ b/kernel/cgroup/rstat.c
|
|
@@ -149,8 +149,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
|
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
|
|
cpu);
|
|
struct cgroup *pos = NULL;
|
|
+ unsigned long flags;
|
|
|
|
- raw_spin_lock(cpu_lock);
|
|
+ raw_spin_lock_irqsave(cpu_lock, flags);
|
|
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
|
|
struct cgroup_subsys_state *css;
|
|
|
|
@@ -162,7 +163,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
|
css->ss->css_rstat_flush(css, cpu);
|
|
rcu_read_unlock();
|
|
}
|
|
- raw_spin_unlock(cpu_lock);
|
|
+ raw_spin_unlock_irqrestore(cpu_lock, flags);
|
|
|
|
/* if @may_sleep, play nice and yield if necessary */
|
|
if (may_sleep && (need_resched() ||
|
|
diff --git a/kernel/cpu.c b/kernel/cpu.c
|
|
index c943454b7..460126cdd 100644
|
|
--- a/kernel/cpu.c
|
|
+++ b/kernel/cpu.c
|
|
@@ -832,6 +832,15 @@ static int take_cpu_down(void *_param)
|
|
int err, cpu = smp_processor_id();
|
|
int ret;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ /*
|
|
+ * If any tasks disabled migration before we got here,
|
|
+ * go back and sleep again.
|
|
+ */
|
|
+ if (cpu_nr_pinned(cpu))
|
|
+ return -EAGAIN;
|
|
+#endif
|
|
+
|
|
/* Ensure this CPU doesn't handle any more interrupts. */
|
|
err = __cpu_disable();
|
|
if (err < 0)
|
|
@@ -861,6 +870,10 @@ static int take_cpu_down(void *_param)
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+struct task_struct *takedown_cpu_task;
|
|
+#endif
|
|
+
|
|
static int takedown_cpu(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
@@ -875,11 +888,39 @@ static int takedown_cpu(unsigned int cpu)
|
|
*/
|
|
irq_lock_sparse();
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ WARN_ON_ONCE(takedown_cpu_task);
|
|
+ takedown_cpu_task = current;
|
|
+
|
|
+again:
|
|
+ /*
|
|
+ * If a task pins this CPU after we pass this check, take_cpu_down
|
|
+ * will return -EAGAIN.
|
|
+ */
|
|
+ for (;;) {
|
|
+ int nr_pinned;
|
|
+
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ nr_pinned = cpu_nr_pinned(cpu);
|
|
+ if (nr_pinned == 0)
|
|
+ break;
|
|
+ schedule();
|
|
+ }
|
|
+ set_current_state(TASK_RUNNING);
|
|
+#endif
|
|
+
|
|
/*
|
|
* So now all preempt/rcu users must observe !cpu_active().
|
|
*/
|
|
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ if (err == -EAGAIN)
|
|
+ goto again;
|
|
+#endif
|
|
if (err) {
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ takedown_cpu_task = NULL;
|
|
+#endif
|
|
/* CPU refused to die */
|
|
irq_unlock_sparse();
|
|
/* Unpark the hotplug thread so we can rollback there */
|
|
@@ -898,6 +939,9 @@ static int takedown_cpu(unsigned int cpu)
|
|
wait_for_ap_thread(st, false);
|
|
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ takedown_cpu_task = NULL;
|
|
+#endif
|
|
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
|
|
irq_unlock_sparse();
|
|
|
|
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
|
|
index 6a4b41484..197cb422f 100644
|
|
--- a/kernel/debug/kdb/kdb_io.c
|
|
+++ b/kernel/debug/kdb/kdb_io.c
|
|
@@ -857,9 +857,11 @@ int kdb_printf(const char *fmt, ...)
|
|
va_list ap;
|
|
int r;
|
|
|
|
+ kdb_trap_printk++;
|
|
va_start(ap, fmt);
|
|
r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
|
|
va_end(ap);
|
|
+ kdb_trap_printk--;
|
|
|
|
return r;
|
|
}
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index a49860879..2e28519ea 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -1108,7 +1108,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
|
|
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
|
|
|
|
raw_spin_lock_init(&cpuctx->hrtimer_lock);
|
|
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
|
|
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
|
|
timer->function = perf_mux_hrtimer_handler;
|
|
}
|
|
|
|
@@ -9368,7 +9368,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
|
|
if (!is_sampling_event(event))
|
|
return;
|
|
|
|
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
hwc->hrtimer.function = perf_swevent_hrtimer;
|
|
|
|
/*
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
|
index 2a32d32bd..6b38a0490 100644
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -151,7 +151,7 @@ static void __exit_signal(struct task_struct *tsk)
|
|
* Do this under ->siglock, we can race with another thread
|
|
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
|
|
*/
|
|
- flush_sigqueue(&tsk->pending);
|
|
+ flush_task_sigqueue(tsk);
|
|
tsk->sighand = NULL;
|
|
spin_unlock(&sighand->siglock);
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index bfc4534ff..0f8b3e146 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -41,6 +41,7 @@
|
|
#include <linux/hmm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/kprobes.h>
|
|
#include <linux/vmacache.h>
|
|
#include <linux/nsproxy.h>
|
|
#include <linux/capability.h>
|
|
@@ -690,6 +691,19 @@ void __mmdrop(struct mm_struct *mm)
|
|
}
|
|
EXPORT_SYMBOL_GPL(__mmdrop);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+/*
|
|
+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
|
|
+ * want another facility to make this work.
|
|
+ */
|
|
+void __mmdrop_delayed(struct rcu_head *rhp)
|
|
+{
|
|
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
|
|
+
|
|
+ __mmdrop(mm);
|
|
+}
|
|
+#endif
|
|
+
|
|
static void mmdrop_async_fn(struct work_struct *work)
|
|
{
|
|
struct mm_struct *mm;
|
|
@@ -724,13 +738,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
|
|
if (atomic_dec_and_test(&sig->sigcnt))
|
|
free_signal_struct(sig);
|
|
}
|
|
-
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static
|
|
+#endif
|
|
void __put_task_struct(struct task_struct *tsk)
|
|
{
|
|
WARN_ON(!tsk->exit_state);
|
|
WARN_ON(atomic_read(&tsk->usage));
|
|
WARN_ON(tsk == current);
|
|
|
|
+ /*
|
|
+ * Remove function-return probe instances associated with this
|
|
+ * task and put them back on the free list.
|
|
+ */
|
|
+ kprobe_flush_task(tsk);
|
|
+
|
|
+ /* Task is done with its stack. */
|
|
+ put_task_stack(tsk);
|
|
+
|
|
cgroup_free(tsk);
|
|
task_numa_free(tsk, true);
|
|
security_task_free(tsk);
|
|
@@ -743,7 +768,18 @@ void __put_task_struct(struct task_struct *tsk)
|
|
if (!profile_handoff_task(tsk))
|
|
free_task(tsk);
|
|
}
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
EXPORT_SYMBOL_GPL(__put_task_struct);
|
|
+#else
|
|
+void __put_task_struct_cb(struct rcu_head *rhp)
|
|
+{
|
|
+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
|
|
+
|
|
+ __put_task_struct(tsk);
|
|
+
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
|
|
+#endif
|
|
|
|
void __init __weak arch_task_cache_init(void) { }
|
|
|
|
@@ -908,6 +944,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|
#ifdef CONFIG_STACKPROTECTOR
|
|
tsk->stack_canary = get_random_canary();
|
|
#endif
|
|
+ if (orig->cpus_ptr == &orig->cpus_mask)
|
|
+ tsk->cpus_ptr = &tsk->cpus_mask;
|
|
|
|
/*
|
|
* One for the user space visible state that goes away when reaped.
|
|
@@ -922,6 +960,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|
tsk->splice_pipe = NULL;
|
|
tsk->task_frag.page = NULL;
|
|
tsk->wake_q.next = NULL;
|
|
+ tsk->wake_q_sleeper.next = NULL;
|
|
|
|
account_kernel_stack(tsk, 1);
|
|
|
|
@@ -1670,6 +1709,9 @@ static void rt_mutex_init_task(struct task_struct *p)
|
|
*/
|
|
static void posix_cpu_timers_init(struct task_struct *tsk)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ tsk->posix_timer_list = NULL;
|
|
+#endif
|
|
tsk->cputime_expires.prof_exp = 0;
|
|
tsk->cputime_expires.virt_exp = 0;
|
|
tsk->cputime_expires.sched_exp = 0;
|
|
@@ -1905,6 +1947,7 @@ static __latent_entropy struct task_struct *copy_process(
|
|
spin_lock_init(&p->alloc_lock);
|
|
|
|
init_sigpending(&p->pending);
|
|
+ p->sigqueue_cache = NULL;
|
|
|
|
p->utime = p->stime = p->gtime = 0;
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index a36a006e5..b1c8d5866 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -967,7 +967,9 @@ static void exit_pi_state_list(struct task_struct *curr)
|
|
if (head->next != next) {
|
|
/* retain curr->pi_lock for the loop invariant */
|
|
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
|
+ raw_spin_unlock_irq(&curr->pi_lock);
|
|
spin_unlock(&hb->lock);
|
|
+ raw_spin_lock_irq(&curr->pi_lock);
|
|
put_pi_state(pi_state);
|
|
continue;
|
|
}
|
|
@@ -1578,6 +1580,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
|
struct task_struct *new_owner;
|
|
bool postunlock = false;
|
|
DEFINE_WAKE_Q(wake_q);
|
|
+ DEFINE_WAKE_Q(wake_sleeper_q);
|
|
int ret = 0;
|
|
|
|
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
|
|
@@ -1627,14 +1630,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
|
* not fail.
|
|
*/
|
|
pi_state_update_owner(pi_state, new_owner);
|
|
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
|
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
|
|
+ &wake_sleeper_q);
|
|
}
|
|
|
|
out_unlock:
|
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
|
|
if (postunlock)
|
|
- rt_mutex_postunlock(&wake_q);
|
|
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2258,6 +2262,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
|
requeue_pi_wake_futex(this, &key2, hb2);
|
|
drop_count++;
|
|
continue;
|
|
+ } else if (ret == -EAGAIN) {
|
|
+ /*
|
|
+ * Waiter was woken by timeout or
|
|
+ * signal and has set pi_blocked_on to
|
|
+ * PI_WAKEUP_INPROGRESS before we
|
|
+ * tried to enqueue it on the rtmutex.
|
|
+ */
|
|
+ this->pi_state = NULL;
|
|
+ put_pi_state(pi_state);
|
|
+ continue;
|
|
} else if (ret) {
|
|
/*
|
|
* rt_mutex_start_proxy_lock() detected a
|
|
@@ -2821,10 +2835,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
|
|
if (abs_time) {
|
|
to = &timeout;
|
|
|
|
- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
|
|
- CLOCK_REALTIME : CLOCK_MONOTONIC,
|
|
- HRTIMER_MODE_ABS);
|
|
- hrtimer_init_sleeper(to, current);
|
|
+ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ?
|
|
+ CLOCK_REALTIME : CLOCK_MONOTONIC,
|
|
+ HRTIMER_MODE_ABS, current);
|
|
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
|
|
current->timer_slack_ns);
|
|
}
|
|
@@ -2922,9 +2935,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
|
|
if (time) {
|
|
to = &timeout;
|
|
- hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
|
|
- HRTIMER_MODE_ABS);
|
|
- hrtimer_init_sleeper(to, current);
|
|
+ hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME,
|
|
+ HRTIMER_MODE_ABS, current);
|
|
hrtimer_set_expires(&to->timer, *time);
|
|
}
|
|
|
|
@@ -2987,7 +2999,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
goto no_block;
|
|
}
|
|
|
|
- rt_mutex_init_waiter(&rt_waiter);
|
|
+ rt_mutex_init_waiter(&rt_waiter, false);
|
|
|
|
/*
|
|
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
|
|
@@ -3003,6 +3015,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
* before __rt_mutex_start_proxy_lock() is done.
|
|
*/
|
|
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
|
|
+ /*
|
|
+ * the migrate_disable() here disables migration in the in_atomic() fast
|
|
+ * path which is enabled again in the following spin_unlock(). We have
|
|
+ * one migrate_disable() pending in the slow-path which is reversed
|
|
+ * after the raw_spin_unlock_irq() where we leave the atomic context.
|
|
+ */
|
|
+ migrate_disable();
|
|
+
|
|
spin_unlock(q.lock_ptr);
|
|
/*
|
|
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
|
|
@@ -3011,6 +3031,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
*/
|
|
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
|
|
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
|
|
+ migrate_enable();
|
|
|
|
if (ret) {
|
|
if (ret == 1)
|
|
@@ -3145,10 +3166,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
* rt_waiter. Also see the WARN in wake_futex_pi().
|
|
*/
|
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
+ /*
|
|
+ * Magic trickery for now to make the RT migrate disable
|
|
+ * logic happy. The following spin_unlock() happens with
|
|
+ * interrupts disabled so the internal migrate_enable()
|
|
+ * won't undo the migrate_disable() which was issued when
|
|
+ * locking hb->lock.
|
|
+ */
|
|
+ migrate_disable();
|
|
spin_unlock(&hb->lock);
|
|
|
|
/* drops pi_state->pi_mutex.wait_lock */
|
|
ret = wake_futex_pi(uaddr, uval, pi_state);
|
|
+ migrate_enable();
|
|
|
|
put_pi_state(pi_state);
|
|
|
|
@@ -3319,7 +3349,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
- struct futex_hash_bucket *hb;
|
|
+ struct futex_hash_bucket *hb, *hb2;
|
|
union futex_key key2 = FUTEX_KEY_INIT;
|
|
struct futex_q q = futex_q_init;
|
|
int res, ret;
|
|
@@ -3335,10 +3365,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
|
|
if (abs_time) {
|
|
to = &timeout;
|
|
- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
|
|
- CLOCK_REALTIME : CLOCK_MONOTONIC,
|
|
- HRTIMER_MODE_ABS);
|
|
- hrtimer_init_sleeper(to, current);
|
|
+ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ?
|
|
+ CLOCK_REALTIME : CLOCK_MONOTONIC,
|
|
+ HRTIMER_MODE_ABS, current);
|
|
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
|
|
current->timer_slack_ns);
|
|
}
|
|
@@ -3347,7 +3376,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* The waiter is allocated on our stack, manipulated by the requeue
|
|
* code while we sleep on uaddr.
|
|
*/
|
|
- rt_mutex_init_waiter(&rt_waiter);
|
|
+ rt_mutex_init_waiter(&rt_waiter, false);
|
|
|
|
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
|
|
if (unlikely(ret != 0))
|
|
@@ -3378,20 +3407,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
|
|
futex_wait_queue_me(hb, &q, to);
|
|
|
|
- spin_lock(&hb->lock);
|
|
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
|
- spin_unlock(&hb->lock);
|
|
- if (ret)
|
|
- goto out_put_keys;
|
|
+ /*
|
|
+ * On RT we must avoid races with requeue and trying to block
|
|
+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
|
|
+ * serializing access to pi_blocked_on with pi_lock.
|
|
+ */
|
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
|
+ if (current->pi_blocked_on) {
|
|
+ /*
|
|
+ * We have been requeued or are in the process of
|
|
+ * being requeued.
|
|
+ */
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+ } else {
|
|
+ /*
|
|
+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
|
|
+ * prevents a concurrent requeue from moving us to the
|
|
+ * uaddr2 rtmutex. After that we can safely acquire
|
|
+ * (and possibly block on) hb->lock.
|
|
+ */
|
|
+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+
|
|
+ spin_lock(&hb->lock);
|
|
+
|
|
+ /*
|
|
+ * Clean up pi_blocked_on. We might leak it otherwise
|
|
+ * when we succeeded with the hb->lock in the fast
|
|
+ * path.
|
|
+ */
|
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
|
+ current->pi_blocked_on = NULL;
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+
|
|
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
|
+ spin_unlock(&hb->lock);
|
|
+ if (ret)
|
|
+ goto out_put_keys;
|
|
+ }
|
|
|
|
/*
|
|
- * In order for us to be here, we know our q.key == key2, and since
|
|
- * we took the hb->lock above, we also know that futex_requeue() has
|
|
- * completed and we no longer have to concern ourselves with a wakeup
|
|
- * race with the atomic proxy lock acquisition by the requeue code. The
|
|
- * futex_requeue dropped our key1 reference and incremented our key2
|
|
- * reference count.
|
|
+ * In order to be here, we have either been requeued, are in
|
|
+ * the process of being requeued, or requeue successfully
|
|
+ * acquired uaddr2 on our behalf. If pi_blocked_on was
|
|
+ * non-null above, we may be racing with a requeue. Do not
|
|
+ * rely on q->lock_ptr to be hb2->lock until after blocking on
|
|
+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
|
|
+ * reference and incremented our key2 reference count.
|
|
*/
|
|
+ hb2 = hash_futex(&key2);
|
|
|
|
/* Check if the requeue code acquired the second futex for us. */
|
|
if (!q.rt_waiter) {
|
|
@@ -3400,14 +3464,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* did a lock-steal - fix up the PI-state in that case.
|
|
*/
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
- spin_lock(q.lock_ptr);
|
|
+ spin_lock(&hb2->lock);
|
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
/*
|
|
* Drop the reference to the pi state which
|
|
* the requeue_pi() code acquired for us.
|
|
*/
|
|
put_pi_state(q.pi_state);
|
|
- spin_unlock(q.lock_ptr);
|
|
+ spin_unlock(&hb2->lock);
|
|
/*
|
|
* Adjust the return value. It's either -EFAULT or
|
|
* success (1) but the caller expects 0 for success.
|
|
@@ -3426,7 +3491,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
pi_mutex = &q.pi_state->pi_mutex;
|
|
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
|
|
|
|
- spin_lock(q.lock_ptr);
|
|
+ spin_lock(&hb2->lock);
|
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
|
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
|
|
ret = 0;
|
|
|
|
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
|
|
index 38554bc35..06a80bbf7 100644
|
|
--- a/kernel/irq/handle.c
|
|
+++ b/kernel/irq/handle.c
|
|
@@ -185,10 +185,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
|
{
|
|
irqreturn_t retval;
|
|
unsigned int flags = 0;
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
+ u64 ip = regs ? instruction_pointer(regs) : 0;
|
|
|
|
retval = __handle_irq_event_percpu(desc, &flags);
|
|
|
|
- add_interrupt_randomness(desc->irq_data.irq, flags);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ desc->random_ip = ip;
|
|
+#else
|
|
+ add_interrupt_randomness(desc->irq_data.irq, flags, ip);
|
|
+#endif
|
|
|
|
if (!noirqdebug)
|
|
note_interrupt(desc, retval);
|
|
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
|
|
index 163712c76..e2353e775 100644
|
|
--- a/kernel/irq/manage.c
|
|
+++ b/kernel/irq/manage.c
|
|
@@ -24,6 +24,7 @@
|
|
#include "internals.h"
|
|
|
|
#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
+# ifndef CONFIG_PREEMPT_RT_BASE
|
|
__read_mostly bool force_irqthreads;
|
|
EXPORT_SYMBOL_GPL(force_irqthreads);
|
|
|
|
@@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg)
|
|
return 0;
|
|
}
|
|
early_param("threadirqs", setup_forced_irqthreads);
|
|
+# endif
|
|
#endif
|
|
|
|
static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
|
|
@@ -1158,6 +1160,12 @@ static int irq_thread(void *data)
|
|
if (action_ret == IRQ_WAKE_THREAD)
|
|
irq_wake_secondary(desc, action);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ migrate_disable();
|
|
+ add_interrupt_randomness(action->irq, 0,
|
|
+ desc->random_ip ^ (unsigned long) action);
|
|
+ migrate_enable();
|
|
+#endif
|
|
wake_threads_waitq(desc);
|
|
}
|
|
|
|
@@ -1609,6 +1617,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
|
}
|
|
|
|
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
|
|
+ irq_settings_set_no_softirq_call(desc);
|
|
+
|
|
if (irq_settings_can_autoenable(desc)) {
|
|
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
|
} else {
|
|
@@ -2733,7 +2744,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
|
|
* This call sets the internal irqchip state of an interrupt,
|
|
* depending on the value of @which.
|
|
*
|
|
- * This function should be called with preemption disabled if the
|
|
+ * This function should be called with migration disabled if the
|
|
* interrupt controller has per-cpu registers.
|
|
*/
|
|
int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
|
|
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
|
|
index e43795cd2..47e2f9e23 100644
|
|
--- a/kernel/irq/settings.h
|
|
+++ b/kernel/irq/settings.h
|
|
@@ -17,6 +17,7 @@ enum {
|
|
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
|
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
|
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
|
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
|
|
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
|
};
|
|
|
|
@@ -31,6 +32,7 @@ enum {
|
|
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
|
#define IRQ_IS_POLLED GOT_YOU_MORON
|
|
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
|
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
|
|
#undef IRQF_MODIFY_MASK
|
|
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
|
|
|
@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
|
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
|
}
|
|
|
|
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
|
|
+{
|
|
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
|
|
+}
|
|
+
|
|
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
|
|
+{
|
|
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
|
|
+}
|
|
+
|
|
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
|
{
|
|
return desc->status_use_accessors & _IRQ_PER_CPU;
|
|
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
|
|
index d867d6dda..cd12ee86c 100644
|
|
--- a/kernel/irq/spurious.c
|
|
+++ b/kernel/irq/spurious.c
|
|
@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
|
|
|
|
static int __init irqfixup_setup(char *str)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
|
|
+ return 1;
|
|
+#endif
|
|
irqfixup = 1;
|
|
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
|
|
printk(KERN_WARNING "This may impact system performance.\n");
|
|
@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
|
|
|
|
static int __init irqpoll_setup(char *str)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
|
|
+ return 1;
|
|
+#endif
|
|
irqfixup = 2;
|
|
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
|
|
"enabled\n");
|
|
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
|
|
index 73288914e..b6d9d3594 100644
|
|
--- a/kernel/irq_work.c
|
|
+++ b/kernel/irq_work.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/cpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/smp.h>
|
|
+#include <linux/interrupt.h>
|
|
#include <asm/processor.h>
|
|
|
|
|
|
@@ -57,29 +58,40 @@ void __weak arch_irq_work_raise(void)
|
|
}
|
|
|
|
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
|
|
-static void __irq_work_queue_local(struct irq_work *work)
|
|
+static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list)
|
|
{
|
|
- /* If the work is "lazy", handle it from next tick if any */
|
|
- if (work->flags & IRQ_WORK_LAZY) {
|
|
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
|
- tick_nohz_tick_stopped())
|
|
- arch_irq_work_raise();
|
|
- } else {
|
|
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
|
- arch_irq_work_raise();
|
|
- }
|
|
+ bool empty;
|
|
+
|
|
+ empty = llist_add(&work->llnode, list);
|
|
+
|
|
+ if (empty &&
|
|
+ (!(work->flags & IRQ_WORK_LAZY) ||
|
|
+ tick_nohz_tick_stopped()))
|
|
+ arch_irq_work_raise();
|
|
+}
|
|
+
|
|
+static inline bool use_lazy_list(struct irq_work *work)
|
|
+{
|
|
+ return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
|
|
+ || (work->flags & IRQ_WORK_LAZY);
|
|
}
|
|
|
|
/* Enqueue the irq work @work on the current CPU */
|
|
bool irq_work_queue(struct irq_work *work)
|
|
{
|
|
+ struct llist_head *list;
|
|
+
|
|
/* Only queue if not already pending */
|
|
if (!irq_work_claim(work))
|
|
return false;
|
|
|
|
/* Queue the entry and raise the IPI if needed. */
|
|
preempt_disable();
|
|
- __irq_work_queue_local(work);
|
|
+ if (use_lazy_list(work))
|
|
+ list = this_cpu_ptr(&lazy_list);
|
|
+ else
|
|
+ list = this_cpu_ptr(&raised_list);
|
|
+ __irq_work_queue_local(work, list);
|
|
preempt_enable();
|
|
|
|
return true;
|
|
@@ -98,6 +110,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|
return irq_work_queue(work);
|
|
|
|
#else /* CONFIG_SMP: */
|
|
+ struct llist_head *list;
|
|
+
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(cpu));
|
|
|
|
@@ -106,13 +120,18 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|
return false;
|
|
|
|
preempt_disable();
|
|
+ if (use_lazy_list(work))
|
|
+ list = &per_cpu(lazy_list, cpu);
|
|
+ else
|
|
+ list = &per_cpu(raised_list, cpu);
|
|
+
|
|
if (cpu != smp_processor_id()) {
|
|
/* Arch remote IPI send/receive backend aren't NMI safe */
|
|
WARN_ON_ONCE(in_nmi());
|
|
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
|
+ if (llist_add(&work->llnode, list))
|
|
arch_send_call_function_single_ipi(cpu);
|
|
} else {
|
|
- __irq_work_queue_local(work);
|
|
+ __irq_work_queue_local(work, list);
|
|
}
|
|
preempt_enable();
|
|
|
|
@@ -128,9 +147,8 @@ bool irq_work_needs_cpu(void)
|
|
raised = this_cpu_ptr(&raised_list);
|
|
lazy = this_cpu_ptr(&lazy_list);
|
|
|
|
- if (llist_empty(raised) || arch_irq_work_has_interrupt())
|
|
- if (llist_empty(lazy))
|
|
- return false;
|
|
+ if (llist_empty(raised) && llist_empty(lazy))
|
|
+ return false;
|
|
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
|
@@ -144,8 +162,12 @@ static void irq_work_run_list(struct llist_head *list)
|
|
struct llist_node *llnode;
|
|
unsigned long flags;
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * nort: On RT IRQ-work may run in SOFTIRQ context.
|
|
+ */
|
|
BUG_ON(!irqs_disabled());
|
|
-
|
|
+#endif
|
|
if (llist_empty(list))
|
|
return;
|
|
|
|
@@ -177,7 +199,16 @@ static void irq_work_run_list(struct llist_head *list)
|
|
void irq_work_run(void)
|
|
{
|
|
irq_work_run_list(this_cpu_ptr(&raised_list));
|
|
- irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
|
|
+ /*
|
|
+ * NOTE: we raise softirq via IPI for safety,
|
|
+ * and execute in irq_work_tick() to move the
|
|
+ * overhead from hard to soft irq context.
|
|
+ */
|
|
+ if (!llist_empty(this_cpu_ptr(&lazy_list)))
|
|
+ raise_softirq(TIMER_SOFTIRQ);
|
|
+ } else
|
|
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_run);
|
|
|
|
@@ -187,8 +218,17 @@ void irq_work_tick(void)
|
|
|
|
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
|
|
irq_work_run_list(raised);
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
|
|
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
|
+void irq_work_tick_soft(void)
|
|
+{
|
|
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
* Synchronize against the irq_work @entry, ensures the entry is not
|
|
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
|
|
index 46ba85365..9a23632b6 100644
|
|
--- a/kernel/ksysfs.c
|
|
+++ b/kernel/ksysfs.c
|
|
@@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
|
|
|
|
#endif /* CONFIG_CRASH_CORE */
|
|
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL)
|
|
+static ssize_t realtime_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
+{
|
|
+ return sprintf(buf, "%d\n", 1);
|
|
+}
|
|
+KERNEL_ATTR_RO(realtime);
|
|
+#endif
|
|
+
|
|
/* whether file capabilities are enabled */
|
|
static ssize_t fscaps_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
@@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = {
|
|
#ifndef CONFIG_TINY_RCU
|
|
&rcu_expedited_attr.attr,
|
|
&rcu_normal_attr.attr,
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ &realtime_attr.attr,
|
|
#endif
|
|
NULL
|
|
};
|
|
diff --git a/kernel/kthread.c b/kernel/kthread.c
|
|
index 4191fed62..c02e3a406 100644
|
|
--- a/kernel/kthread.c
|
|
+++ b/kernel/kthread.c
|
|
@@ -181,7 +181,7 @@ void *kthread_data(struct task_struct *task)
|
|
*/
|
|
void set_kthreadd_affinity(void)
|
|
{
|
|
- set_cpus_allowed_ptr(current, &kthreadd_task->cpus_allowed);
|
|
+ set_cpus_allowed_ptr(current, kthreadd_task->cpus_ptr);
|
|
}
|
|
|
|
/**
|
|
@@ -653,7 +653,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
|
|
struct lock_class_key *key)
|
|
{
|
|
memset(worker, 0, sizeof(struct kthread_worker));
|
|
- spin_lock_init(&worker->lock);
|
|
+ raw_spin_lock_init(&worker->lock);
|
|
lockdep_set_class_and_name(&worker->lock, key, name);
|
|
INIT_LIST_HEAD(&worker->work_list);
|
|
INIT_LIST_HEAD(&worker->delayed_work_list);
|
|
@@ -695,21 +695,21 @@ int kthread_worker_fn(void *worker_ptr)
|
|
|
|
if (kthread_should_stop()) {
|
|
__set_current_state(TASK_RUNNING);
|
|
- spin_lock_irq(&worker->lock);
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
worker->task = NULL;
|
|
- spin_unlock_irq(&worker->lock);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
return 0;
|
|
}
|
|
|
|
work = NULL;
|
|
- spin_lock_irq(&worker->lock);
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
if (!list_empty(&worker->work_list)) {
|
|
work = list_first_entry(&worker->work_list,
|
|
struct kthread_work, node);
|
|
list_del_init(&work->node);
|
|
}
|
|
worker->current_work = work;
|
|
- spin_unlock_irq(&worker->lock);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
|
|
if (work) {
|
|
__set_current_state(TASK_RUNNING);
|
|
@@ -866,12 +866,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
|
|
bool ret = false;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&worker->lock, flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, flags);
|
|
if (!queuing_blocked(worker, work)) {
|
|
kthread_insert_work(worker, work, &worker->work_list);
|
|
ret = true;
|
|
}
|
|
- spin_unlock_irqrestore(&worker->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kthread_queue_work);
|
|
@@ -897,7 +897,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
|
|
if (WARN_ON_ONCE(!worker))
|
|
return;
|
|
|
|
- spin_lock(&worker->lock);
|
|
+ raw_spin_lock(&worker->lock);
|
|
/* Work must not be used with >1 worker, see kthread_queue_work(). */
|
|
WARN_ON_ONCE(work->worker != worker);
|
|
|
|
@@ -907,7 +907,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
|
|
if (!work->canceling)
|
|
kthread_insert_work(worker, work, &worker->work_list);
|
|
|
|
- spin_unlock(&worker->lock);
|
|
+ raw_spin_unlock(&worker->lock);
|
|
}
|
|
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
|
|
|
|
@@ -963,14 +963,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
|
|
unsigned long flags;
|
|
bool ret = false;
|
|
|
|
- spin_lock_irqsave(&worker->lock, flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, flags);
|
|
|
|
if (!queuing_blocked(worker, work)) {
|
|
__kthread_queue_delayed_work(worker, dwork, delay);
|
|
ret = true;
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&worker->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
|
|
@@ -1006,7 +1006,7 @@ void kthread_flush_work(struct kthread_work *work)
|
|
if (!worker)
|
|
return;
|
|
|
|
- spin_lock_irq(&worker->lock);
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
/* Work must not be used with >1 worker, see kthread_queue_work(). */
|
|
WARN_ON_ONCE(work->worker != worker);
|
|
|
|
@@ -1018,7 +1018,7 @@ void kthread_flush_work(struct kthread_work *work)
|
|
else
|
|
noop = true;
|
|
|
|
- spin_unlock_irq(&worker->lock);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
|
|
if (!noop)
|
|
wait_for_completion(&fwork.done);
|
|
@@ -1046,9 +1046,9 @@ static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
|
|
* any queuing is blocked by setting the canceling counter.
|
|
*/
|
|
work->canceling++;
|
|
- spin_unlock_irqrestore(&worker->lock, *flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
|
|
del_timer_sync(&dwork->timer);
|
|
- spin_lock_irqsave(&worker->lock, *flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, *flags);
|
|
work->canceling--;
|
|
}
|
|
|
|
@@ -1110,7 +1110,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
|
unsigned long flags;
|
|
int ret = false;
|
|
|
|
- spin_lock_irqsave(&worker->lock, flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, flags);
|
|
|
|
/* Do not bother with canceling when never queued. */
|
|
if (!work->worker)
|
|
@@ -1139,7 +1139,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
|
fast_queue:
|
|
__kthread_queue_delayed_work(worker, dwork, delay);
|
|
out:
|
|
- spin_unlock_irqrestore(&worker->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
|
|
@@ -1153,7 +1153,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
|
|
if (!worker)
|
|
goto out;
|
|
|
|
- spin_lock_irqsave(&worker->lock, flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, flags);
|
|
/* Work must not be used with >1 worker, see kthread_queue_work(). */
|
|
WARN_ON_ONCE(work->worker != worker);
|
|
|
|
@@ -1170,13 +1170,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
|
|
* In the meantime, block any queuing by setting the canceling counter.
|
|
*/
|
|
work->canceling++;
|
|
- spin_unlock_irqrestore(&worker->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
|
|
kthread_flush_work(work);
|
|
- spin_lock_irqsave(&worker->lock, flags);
|
|
+ raw_spin_lock_irqsave(&worker->lock, flags);
|
|
work->canceling--;
|
|
|
|
out_fast:
|
|
- spin_unlock_irqrestore(&worker->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
|
|
out:
|
|
return ret;
|
|
}
|
|
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
|
|
index 392c7f23a..c0bf04b6b 100644
|
|
--- a/kernel/locking/Makefile
|
|
+++ b/kernel/locking/Makefile
|
|
@@ -3,7 +3,7 @@
|
|
# and is generally not a function of system call inputs.
|
|
KCOV_INSTRUMENT := n
|
|
|
|
-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
|
|
+obj-y += semaphore.o percpu-rwsem.o
|
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
|
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
|
|
@@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
|
|
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
|
|
endif
|
|
|
|
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
|
|
+obj-y += mutex.o
|
|
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
|
|
+endif
|
|
+obj-y += rwsem.o
|
|
obj-$(CONFIG_LOCKDEP) += lockdep.o
|
|
ifeq ($(CONFIG_PROC_FS),y)
|
|
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
|
|
@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
|
|
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
|
|
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
|
|
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
|
|
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
|
|
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
|
|
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
|
|
+endif
|
|
+obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o
|
|
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
|
|
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
|
|
obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
|
|
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
|
|
index 1e272f6a0..1938b4bfb 100644
|
|
--- a/kernel/locking/lockdep.c
|
|
+++ b/kernel/locking/lockdep.c
|
|
@@ -3826,6 +3826,7 @@ static void check_flags(unsigned long flags)
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* We dont accurately track softirq state in e.g.
|
|
* hardirq contexts (such as on 4KSTACKS), so only
|
|
@@ -3840,6 +3841,7 @@ static void check_flags(unsigned long flags)
|
|
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
|
|
}
|
|
}
|
|
+#endif
|
|
|
|
if (!debug_locks)
|
|
print_irqtrace_events(current);
|
|
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
|
|
index 7d0b0ed74..a81e6ef33 100644
|
|
--- a/kernel/locking/locktorture.c
|
|
+++ b/kernel/locking/locktorture.c
|
|
@@ -29,7 +29,6 @@
|
|
#include <linux/kthread.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/spinlock.h>
|
|
-#include <linux/rwlock.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/smp.h>
|
|
diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c
|
|
new file mode 100644
|
|
index 000000000..4f81595c0
|
|
--- /dev/null
|
|
+++ b/kernel/locking/mutex-rt.c
|
|
@@ -0,0 +1,223 @@
|
|
+/*
|
|
+ * kernel/rt.c
|
|
+ *
|
|
+ * Real-Time Preemption Support
|
|
+ *
|
|
+ * started by Ingo Molnar:
|
|
+ *
|
|
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
+ *
|
|
+ * historic credit for proving that Linux spinlocks can be implemented via
|
|
+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
|
|
+ * and others) who prototyped it on 2.4 and did lots of comparative
|
|
+ * research and analysis; TimeSys, for proving that you can implement a
|
|
+ * fully preemptible kernel via the use of IRQ threading and mutexes;
|
|
+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
|
|
+ * right one; and to MontaVista, who ported pmutexes to 2.6.
|
|
+ *
|
|
+ * This code is a from-scratch implementation and is not based on pmutexes,
|
|
+ * but the idea of converting spinlocks to mutexes is used here too.
|
|
+ *
|
|
+ * lock debugging, locking tree, deadlock detection:
|
|
+ *
|
|
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
|
|
+ * Released under the General Public License (GPL).
|
|
+ *
|
|
+ * Includes portions of the generic R/W semaphore implementation from:
|
|
+ *
|
|
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
|
|
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
|
|
+ * - Derived also from comments by Linus
|
|
+ *
|
|
+ * Pending ownership of locks and ownership stealing:
|
|
+ *
|
|
+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
|
|
+ *
|
|
+ * (also by Steven Rostedt)
|
|
+ * - Converted single pi_lock to individual task locks.
|
|
+ *
|
|
+ * By Esben Nielsen:
|
|
+ * Doing priority inheritance with help of the scheduler.
|
|
+ *
|
|
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
+ * - major rework based on Esben Nielsens initial patch
|
|
+ * - replaced thread_info references by task_struct refs
|
|
+ * - removed task->pending_owner dependency
|
|
+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
|
|
+ * in the scheduler return path as discussed with Steven Rostedt
|
|
+ *
|
|
+ * Copyright (C) 2006, Kihon Technologies Inc.
|
|
+ * Steven Rostedt <rostedt@goodmis.org>
|
|
+ * - debugged and patched Thomas Gleixner's rework.
|
|
+ * - added back the cmpxchg to the rework.
|
|
+ * - turned atomic require back on for SMP.
|
|
+ */
|
|
+
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/rtmutex.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/kallsyms.h>
|
|
+#include <linux/syscalls.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/plist.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/futex.h>
|
|
+#include <linux/hrtimer.h>
|
|
+
|
|
+#include "rtmutex_common.h"
|
|
+
|
|
+/*
|
|
+ * struct mutex functions
|
|
+ */
|
|
+void __mutex_do_init(struct mutex *mutex, const char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
|
|
+ lockdep_init_map(&mutex->dep_map, name, key, 0);
|
|
+#endif
|
|
+ mutex->lock.save_state = 0;
|
|
+}
|
|
+EXPORT_SYMBOL(__mutex_do_init);
|
|
+
|
|
+void __lockfunc _mutex_lock(struct mutex *lock)
|
|
+{
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock);
|
|
+
|
|
+void __lockfunc _mutex_lock_io(struct mutex *lock)
|
|
+{
|
|
+ int token;
|
|
+
|
|
+ token = io_schedule_prepare();
|
|
+ _mutex_lock(lock);
|
|
+ io_schedule_finish(token);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(_mutex_lock_io);
|
|
+
|
|
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_interruptible);
|
|
+
|
|
+int __lockfunc _mutex_lock_killable(struct mutex *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_killable);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
|
|
+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_nested);
|
|
+
|
|
+void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ int token;
|
|
+
|
|
+ token = io_schedule_prepare();
|
|
+
|
|
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
|
|
+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
|
|
+
|
|
+ io_schedule_finish(token);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
|
|
+
|
|
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|
+{
|
|
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
|
|
+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
|
|
+
|
|
+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
|
|
+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
|
|
+
|
|
+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_killable_nested);
|
|
+#endif
|
|
+
|
|
+int __lockfunc _mutex_trylock(struct mutex *lock)
|
|
+{
|
|
+ int ret = __rt_mutex_trylock(&lock->lock);
|
|
+
|
|
+ if (ret)
|
|
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_trylock);
|
|
+
|
|
+void __lockfunc _mutex_unlock(struct mutex *lock)
|
|
+{
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ __rt_mutex_unlock(&lock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_unlock);
|
|
+
|
|
+/**
|
|
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
+ * @cnt: the atomic which we are to dec
|
|
+ * @lock: the mutex to return holding if we dec to 0
|
|
+ *
|
|
+ * return true and hold lock if we dec to 0, return false otherwise
|
|
+ */
|
|
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
|
+{
|
|
+ /* dec if we can't possibly hit 0 */
|
|
+ if (atomic_add_unless(cnt, -1, 1))
|
|
+ return 0;
|
|
+ /* we might hit 0, so take the lock */
|
|
+ mutex_lock(lock);
|
|
+ if (!atomic_dec_and_test(cnt)) {
|
|
+ /* when we actually did the dec, we didn't hit 0 */
|
|
+ mutex_unlock(lock);
|
|
+ return 0;
|
|
+ }
|
|
+ /* we hit 0, and we hold the lock */
|
|
+ return 1;
|
|
+}
|
|
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
index a5ec4f685..fe5153fc7 100644
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -7,6 +7,11 @@
|
|
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
|
|
* Copyright (C) 2006 Esben Nielsen
|
|
+ * Adaptive Spinlocks:
|
|
+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
|
|
+ * and Peter Morreale,
|
|
+ * Adaptive Spinlocks simplification:
|
|
+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
|
|
*
|
|
* See Documentation/locking/rt-mutex-design.txt for details.
|
|
*/
|
|
@@ -18,6 +23,8 @@
|
|
#include <linux/sched/wake_q.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/timer.h>
|
|
+#include <linux/ww_mutex.h>
|
|
+#include <linux/blkdev.h>
|
|
|
|
#include "rtmutex_common.h"
|
|
|
|
@@ -135,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
|
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
|
}
|
|
|
|
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
|
|
+ waiter != PI_REQUEUE_INPROGRESS;
|
|
+}
|
|
+
|
|
/*
|
|
* We can speed up the acquire/release, if there's no debugging state to be
|
|
* set up.
|
|
@@ -228,7 +241,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
|
|
* Only use with rt_mutex_waiter_{less,equal}()
|
|
*/
|
|
#define task_to_waiter(p) \
|
|
- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
|
|
+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
|
|
|
|
static inline int
|
|
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
|
|
@@ -268,6 +281,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
|
|
return 1;
|
|
}
|
|
|
|
+#define STEAL_NORMAL 0
|
|
+#define STEAL_LATERAL 1
|
|
+
|
|
+static inline int
|
|
+rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
|
|
+{
|
|
+ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
|
|
+
|
|
+ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
|
|
+ return 1;
|
|
+
|
|
+ /*
|
|
+ * Note that RT tasks are excluded from lateral-steals
|
|
+ * to prevent the introduction of an unbounded latency.
|
|
+ */
|
|
+ if (mode == STEAL_NORMAL || rt_task(waiter->task))
|
|
+ return 0;
|
|
+
|
|
+ return rt_mutex_waiter_equal(waiter, top_waiter);
|
|
+}
|
|
+
|
|
static void
|
|
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
|
|
{
|
|
@@ -372,6 +406,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
|
|
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
|
|
}
|
|
|
|
+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ if (waiter->savestate)
|
|
+ wake_up_lock_sleeper(waiter->task);
|
|
+ else
|
|
+ wake_up_process(waiter->task);
|
|
+}
|
|
+
|
|
/*
|
|
* Max number of times we'll walk the boosting chain:
|
|
*/
|
|
@@ -379,7 +421,8 @@ int max_lock_depth = 1024;
|
|
|
|
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
|
|
{
|
|
- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
|
|
+ return rt_mutex_real_waiter(p->pi_blocked_on) ?
|
|
+ p->pi_blocked_on->lock : NULL;
|
|
}
|
|
|
|
/*
|
|
@@ -515,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
* reached or the state of the chain has changed while we
|
|
* dropped the locks.
|
|
*/
|
|
- if (!waiter)
|
|
+ if (!rt_mutex_real_waiter(waiter))
|
|
goto out_unlock_pi;
|
|
|
|
/*
|
|
@@ -696,13 +739,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
* follow here. This is the end of the chain we are walking.
|
|
*/
|
|
if (!rt_mutex_owner(lock)) {
|
|
+ struct rt_mutex_waiter *lock_top_waiter;
|
|
+
|
|
/*
|
|
* If the requeue [7] above changed the top waiter,
|
|
* then we need to wake the new top waiter up to try
|
|
* to get the lock.
|
|
*/
|
|
- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
|
|
- wake_up_process(rt_mutex_top_waiter(lock)->task);
|
|
+ lock_top_waiter = rt_mutex_top_waiter(lock);
|
|
+ if (prerequeue_top_waiter != lock_top_waiter)
|
|
+ rt_mutex_wake_waiter(lock_top_waiter);
|
|
raw_spin_unlock_irq(&lock->wait_lock);
|
|
return 0;
|
|
}
|
|
@@ -804,9 +850,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
* @task: The task which wants to acquire the lock
|
|
* @waiter: The waiter that is queued to the lock's wait tree if the
|
|
* callsite called task_blocked_on_lock(), otherwise NULL
|
|
+ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
|
|
*/
|
|
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
- struct rt_mutex_waiter *waiter)
|
|
+static int __try_to_take_rt_mutex(struct rt_mutex *lock,
|
|
+ struct task_struct *task,
|
|
+ struct rt_mutex_waiter *waiter, int mode)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
|
|
@@ -842,12 +890,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
*/
|
|
if (waiter) {
|
|
/*
|
|
- * If waiter is not the highest priority waiter of
|
|
- * @lock, give up.
|
|
+ * If waiter is not the highest priority waiter of @lock,
|
|
+ * or its peer when lateral steal is allowed, give up.
|
|
*/
|
|
- if (waiter != rt_mutex_top_waiter(lock))
|
|
+ if (!rt_mutex_steal(lock, waiter, mode))
|
|
return 0;
|
|
-
|
|
/*
|
|
* We can acquire the lock. Remove the waiter from the
|
|
* lock waiters tree.
|
|
@@ -865,14 +912,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
*/
|
|
if (rt_mutex_has_waiters(lock)) {
|
|
/*
|
|
- * If @task->prio is greater than or equal to
|
|
- * the top waiter priority (kernel view),
|
|
- * @task lost.
|
|
+ * If @task->prio is greater than the top waiter
|
|
+ * priority (kernel view), or equal to it when a
|
|
+ * lateral steal is forbidden, @task lost.
|
|
*/
|
|
- if (!rt_mutex_waiter_less(task_to_waiter(task),
|
|
- rt_mutex_top_waiter(lock)))
|
|
+ if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
|
|
return 0;
|
|
-
|
|
/*
|
|
* The current top waiter stays enqueued. We
|
|
* don't have to change anything in the lock
|
|
@@ -919,6 +964,344 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
return 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * preemptible spin_lock functions:
|
|
+ */
|
|
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
|
|
+ void (*slowfn)(struct rt_mutex *lock))
|
|
+{
|
|
+ might_sleep_no_state_check();
|
|
+
|
|
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
+ return;
|
|
+ else
|
|
+ slowfn(lock);
|
|
+}
|
|
+
|
|
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
|
|
+ void (*slowfn)(struct rt_mutex *lock))
|
|
+{
|
|
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
|
+ return;
|
|
+ else
|
|
+ slowfn(lock);
|
|
+}
|
|
+#ifdef CONFIG_SMP
|
|
+/*
|
|
+ * Note that owner is a speculative pointer and dereferencing relies
|
|
+ * on rcu_read_lock() and the check against the lock owner.
|
|
+ */
|
|
+static int adaptive_wait(struct rt_mutex *lock,
|
|
+ struct task_struct *owner)
|
|
+{
|
|
+ int res = 0;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ for (;;) {
|
|
+ if (owner != rt_mutex_owner(lock))
|
|
+ break;
|
|
+ /*
|
|
+ * Ensure that owner->on_cpu is dereferenced _after_
|
|
+ * checking the above to be valid.
|
|
+ */
|
|
+ barrier();
|
|
+ if (!owner->on_cpu) {
|
|
+ res = 1;
|
|
+ break;
|
|
+ }
|
|
+ cpu_relax();
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ return res;
|
|
+}
|
|
+#else
|
|
+static int adaptive_wait(struct rt_mutex *lock,
|
|
+ struct task_struct *orig_owner)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
+ struct rt_mutex_waiter *waiter,
|
|
+ struct task_struct *task,
|
|
+ enum rtmutex_chainwalk chwalk);
|
|
+/*
|
|
+ * Slow path lock function spin_lock style: this variant is very
|
|
+ * careful not to miss any non-lock wakeups.
|
|
+ *
|
|
+ * We store the current state under p->pi_lock in p->saved_state and
|
|
+ * the try_to_wake_up() code handles this accordingly.
|
|
+ */
|
|
+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
|
|
+ struct rt_mutex_waiter *waiter,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ struct task_struct *lock_owner, *self = current;
|
|
+ struct rt_mutex_waiter *top_waiter;
|
|
+ int ret;
|
|
+
|
|
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
|
|
+ return;
|
|
+
|
|
+ BUG_ON(rt_mutex_owner(lock) == self);
|
|
+
|
|
+ /*
|
|
+ * We save whatever state the task is in and we'll restore it
|
|
+ * after acquiring the lock taking real wakeups into account
|
|
+ * as well. We are serialized via pi_lock against wakeups. See
|
|
+ * try_to_wake_up().
|
|
+ */
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ self->saved_state = self->state;
|
|
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+
|
|
+ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
|
|
+ BUG_ON(ret);
|
|
+
|
|
+ for (;;) {
|
|
+ /* Try to acquire the lock again. */
|
|
+ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
|
|
+ break;
|
|
+
|
|
+ top_waiter = rt_mutex_top_waiter(lock);
|
|
+ lock_owner = rt_mutex_owner(lock);
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
+
|
|
+ debug_rt_mutex_print_deadlock(waiter);
|
|
+
|
|
+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
|
|
+ schedule();
|
|
+
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
+
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Restore the task state to current->saved_state. We set it
|
|
+ * to the original state above and the try_to_wake_up() code
|
|
+ * has possibly updated it when a real (non-rtmutex) wakeup
|
|
+ * happened while we were blocked. Clear saved_state so
|
|
+ * try_to_wakeup() does not get confused.
|
|
+ */
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ __set_current_state_no_track(self->saved_state);
|
|
+ self->saved_state = TASK_RUNNING;
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+
|
|
+ /*
|
|
+ * try_to_take_rt_mutex() sets the waiter bit
|
|
+ * unconditionally. We might have to fix that up:
|
|
+ */
|
|
+ fixup_rt_mutex_waiters(lock);
|
|
+
|
|
+ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
|
|
+ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
|
|
+}
|
|
+
|
|
+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
|
|
+{
|
|
+ struct rt_mutex_waiter waiter;
|
|
+ unsigned long flags;
|
|
+
|
|
+ rt_mutex_init_waiter(&waiter, true);
|
|
+
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
+ rt_spin_lock_slowlock_locked(lock, &waiter, flags);
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
+ debug_rt_mutex_free_waiter(&waiter);
|
|
+}
|
|
+
|
|
+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
|
|
+ struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wq_sleeper);
|
|
+/*
|
|
+ * Slow path to release a rt_mutex spin_lock style
|
|
+ */
|
|
+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ DEFINE_WAKE_Q(wake_q);
|
|
+ DEFINE_WAKE_Q(wake_sleeper_q);
|
|
+ bool postunlock;
|
|
+
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
+ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
+
|
|
+ if (postunlock)
|
|
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
|
|
+}
|
|
+
|
|
+void __lockfunc rt_spin_lock(spinlock_t *lock)
|
|
+{
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_lock);
|
|
+
|
|
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
|
|
+{
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_lock_nested);
|
|
+#endif
|
|
+
|
|
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
|
|
+{
|
|
+ /* NOTE: we always pass in '1' for nested, for simplicity */
|
|
+ spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
|
|
+ migrate_enable();
|
|
+ rcu_read_unlock();
|
|
+ sleeping_lock_dec();
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_unlock);
|
|
+
|
|
+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_spin_unlock);
|
|
+
|
|
+/*
|
|
+ * Wait for the lock to get unlocked: instead of polling for an unlock
|
|
+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
|
|
+ * schedule if there's contention:
|
|
+ */
|
|
+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
|
|
+{
|
|
+ spin_lock(lock);
|
|
+ spin_unlock(lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_unlock_wait);
|
|
+
|
|
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ sleeping_lock_inc();
|
|
+ migrate_disable();
|
|
+ ret = __rt_mutex_trylock(&lock->lock);
|
|
+ if (ret) {
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ rcu_read_lock();
|
|
+ } else {
|
|
+ migrate_enable();
|
|
+ sleeping_lock_dec();
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock);
|
|
+
|
|
+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ local_bh_disable();
|
|
+ ret = __rt_mutex_trylock(&lock->lock);
|
|
+ if (ret) {
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ } else
|
|
+ local_bh_enable();
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock_bh);
|
|
+
|
|
+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ *flags = 0;
|
|
+ ret = __rt_mutex_trylock(&lock->lock);
|
|
+ if (ret) {
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
|
|
+
|
|
+void
|
|
+__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
+ lockdep_init_map(&lock->dep_map, name, key, 0);
|
|
+#endif
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_spin_lock_init);
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ static inline int __sched
|
|
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
+{
|
|
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
|
|
+ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
|
|
+
|
|
+ if (!hold_ctx)
|
|
+ return 0;
|
|
+
|
|
+ if (unlikely(ctx == hold_ctx))
|
|
+ return -EALREADY;
|
|
+
|
|
+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
|
|
+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
|
+ ctx->contending_lock = ww;
|
|
+#endif
|
|
+ return -EDEADLK;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+ static inline int __sched
|
|
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
+{
|
|
+ BUG();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+static inline int
|
|
+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
+ struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
|
|
+}
|
|
+
|
|
/*
|
|
* Task blocks on lock.
|
|
*
|
|
@@ -951,6 +1334,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
return -EDEADLK;
|
|
|
|
raw_spin_lock(&task->pi_lock);
|
|
+ /*
|
|
+ * In the case of futex requeue PI, this will be a proxy
|
|
+ * lock. The task will wake unaware that it is enqueueed on
|
|
+ * this lock. Avoid blocking on two locks and corrupting
|
|
+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
|
|
+ * flag. futex_wait_requeue_pi() sets this when it wakes up
|
|
+ * before requeue (due to a signal or timeout). Do not enqueue
|
|
+ * the task if PI_WAKEUP_INPROGRESS is set.
|
|
+ */
|
|
+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
|
|
+
|
|
waiter->task = task;
|
|
waiter->lock = lock;
|
|
waiter->prio = task->prio;
|
|
@@ -974,7 +1373,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
rt_mutex_enqueue_pi(owner, waiter);
|
|
|
|
rt_mutex_adjust_prio(owner);
|
|
- if (owner->pi_blocked_on)
|
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
|
chain_walk = 1;
|
|
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
|
|
chain_walk = 1;
|
|
@@ -1016,6 +1415,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
* Called with lock->wait_lock held and interrupts disabled.
|
|
*/
|
|
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wake_sleeper_q,
|
|
struct rt_mutex *lock)
|
|
{
|
|
struct rt_mutex_waiter *waiter;
|
|
@@ -1055,7 +1455,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
|
|
* Pairs with preempt_enable() in rt_mutex_postunlock();
|
|
*/
|
|
preempt_disable();
|
|
- wake_q_add(wake_q, waiter->task);
|
|
+ if (waiter->savestate)
|
|
+ wake_q_add_sleeper(wake_sleeper_q, waiter->task);
|
|
+ else
|
|
+ wake_q_add(wake_q, waiter->task);
|
|
raw_spin_unlock(¤t->pi_lock);
|
|
}
|
|
|
|
@@ -1070,7 +1473,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
{
|
|
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
- struct rt_mutex *next_lock;
|
|
+ struct rt_mutex *next_lock = NULL;
|
|
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
|
|
@@ -1096,7 +1499,8 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
rt_mutex_adjust_prio(owner);
|
|
|
|
/* Store the lock on which owner is blocked or NULL */
|
|
- next_lock = task_blocked_on_lock(owner);
|
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
|
+ next_lock = task_blocked_on_lock(owner);
|
|
|
|
raw_spin_unlock(&owner->pi_lock);
|
|
|
|
@@ -1132,26 +1536,28 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
|
waiter = task->pi_blocked_on;
|
|
- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
|
|
+ if (!rt_mutex_real_waiter(waiter) ||
|
|
+ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
return;
|
|
}
|
|
next_lock = waiter->lock;
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
|
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
|
get_task_struct(task);
|
|
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
|
|
next_lock, NULL, task);
|
|
}
|
|
|
|
-void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
|
|
+void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
|
|
{
|
|
debug_rt_mutex_init_waiter(waiter);
|
|
RB_CLEAR_NODE(&waiter->pi_tree_entry);
|
|
RB_CLEAR_NODE(&waiter->tree_entry);
|
|
waiter->task = NULL;
|
|
+ waiter->savestate = savestate;
|
|
}
|
|
|
|
/**
|
|
@@ -1167,7 +1573,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
|
|
static int __sched
|
|
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
- struct rt_mutex_waiter *waiter)
|
|
+ struct rt_mutex_waiter *waiter,
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
{
|
|
int ret = 0;
|
|
|
|
@@ -1176,16 +1583,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
if (try_to_take_rt_mutex(lock, current, waiter))
|
|
break;
|
|
|
|
- /*
|
|
- * TASK_INTERRUPTIBLE checks for signals and
|
|
- * timeout. Ignored otherwise.
|
|
- */
|
|
- if (likely(state == TASK_INTERRUPTIBLE)) {
|
|
- /* Signal pending? */
|
|
- if (signal_pending(current))
|
|
- ret = -EINTR;
|
|
- if (timeout && !timeout->task)
|
|
- ret = -ETIMEDOUT;
|
|
+ if (timeout && !timeout->task) {
|
|
+ ret = -ETIMEDOUT;
|
|
+ break;
|
|
+ }
|
|
+ if (signal_pending_state(state, current)) {
|
|
+ ret = -EINTR;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (ww_ctx && ww_ctx->acquired > 0) {
|
|
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
|
if (ret)
|
|
break;
|
|
}
|
|
@@ -1224,33 +1632,104 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
|
}
|
|
}
|
|
|
|
-/*
|
|
- * Slow path lock function:
|
|
- */
|
|
-static int __sched
|
|
-rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
- struct hrtimer_sleeper *timeout,
|
|
- enum rtmutex_chainwalk chwalk)
|
|
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
{
|
|
- struct rt_mutex_waiter waiter;
|
|
- unsigned long flags;
|
|
- int ret = 0;
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
+ /*
|
|
+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
|
|
+ * but released with a normal mutex_unlock in this call.
|
|
+ *
|
|
+ * This should never happen, always use ww_mutex_unlock.
|
|
+ */
|
|
+ DEBUG_LOCKS_WARN_ON(ww->ctx);
|
|
+
|
|
+ /*
|
|
+ * Not quite done after calling ww_acquire_done() ?
|
|
+ */
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
|
|
|
|
- rt_mutex_init_waiter(&waiter);
|
|
+ if (ww_ctx->contending_lock) {
|
|
+ /*
|
|
+ * After -EDEADLK you tried to
|
|
+ * acquire a different ww_mutex? Bad!
|
|
+ */
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
|
|
+
|
|
+ /*
|
|
+ * You called ww_mutex_lock after receiving -EDEADLK,
|
|
+ * but 'forgot' to unlock everything else first?
|
|
+ */
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
|
|
+ ww_ctx->contending_lock = NULL;
|
|
+ }
|
|
|
|
/*
|
|
- * Technically we could use raw_spin_[un]lock_irq() here, but this can
|
|
- * be called in early boot if the cmpxchg() fast path is disabled
|
|
- * (debug, no architecture support). In this case we will acquire the
|
|
- * rtmutex with lock->wait_lock held. But we cannot unconditionally
|
|
- * enable interrupts in that early boot case. So we need to use the
|
|
- * irqsave/restore variants.
|
|
+ * Naughty, using a different class will lead to undefined behavior!
|
|
*/
|
|
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
|
|
+#endif
|
|
+ ww_ctx->acquired++;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void ww_mutex_account_lock(struct rt_mutex *lock,
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
+{
|
|
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
|
|
+ struct rt_mutex_waiter *waiter, *n;
|
|
+
|
|
+ /*
|
|
+ * This branch gets optimized out for the common case,
|
|
+ * and is only important for ww_mutex_lock.
|
|
+ */
|
|
+ ww_mutex_lock_acquired(ww, ww_ctx);
|
|
+ ww->ctx = ww_ctx;
|
|
+
|
|
+ /*
|
|
+ * Give any possible sleeping processes the chance to wake up,
|
|
+ * so they can recheck if they have to back off.
|
|
+ */
|
|
+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
|
|
+ tree_entry) {
|
|
+ /* XXX debug rt mutex waiter wakeup */
|
|
+
|
|
+ BUG_ON(waiter->lock != lock);
|
|
+ rt_mutex_wake_waiter(waiter);
|
|
+ }
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static void ww_mutex_account_lock(struct rt_mutex *lock,
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
+{
|
|
+ BUG();
|
|
+}
|
|
+#endif
|
|
+
|
|
+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
+ struct hrtimer_sleeper *timeout,
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx,
|
|
+ struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (ww_ctx) {
|
|
+ struct ww_mutex *ww;
|
|
+
|
|
+ ww = container_of(lock, struct ww_mutex, base.lock);
|
|
+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
|
+ return -EALREADY;
|
|
+ }
|
|
+#endif
|
|
|
|
/* Try to acquire the lock again: */
|
|
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
|
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
+ if (ww_ctx)
|
|
+ ww_mutex_account_lock(lock, ww_ctx);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1260,16 +1739,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
if (unlikely(timeout))
|
|
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
|
|
|
|
- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
|
|
+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
|
|
|
|
- if (likely(!ret))
|
|
+ if (likely(!ret)) {
|
|
/* sleep on the mutex */
|
|
- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
|
|
+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
|
|
+ ww_ctx);
|
|
+ } else if (ww_ctx) {
|
|
+ /* ww_mutex received EDEADLK, let it become EALREADY */
|
|
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
|
+ BUG_ON(!ret);
|
|
+ }
|
|
|
|
if (unlikely(ret)) {
|
|
__set_current_state(TASK_RUNNING);
|
|
- remove_waiter(lock, &waiter);
|
|
- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
|
+ remove_waiter(lock, waiter);
|
|
+ /* ww_mutex wants to report EDEADLK/EALREADY, let it */
|
|
+ if (!ww_ctx)
|
|
+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
|
|
+ } else if (ww_ctx) {
|
|
+ ww_mutex_account_lock(lock, ww_ctx);
|
|
}
|
|
|
|
/*
|
|
@@ -1277,6 +1766,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
* unconditionally. We might have to fix that up.
|
|
*/
|
|
fixup_rt_mutex_waiters(lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Slow path lock function:
|
|
+ */
|
|
+static int __sched
|
|
+rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
+ struct hrtimer_sleeper *timeout,
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
+{
|
|
+ struct rt_mutex_waiter waiter;
|
|
+ unsigned long flags;
|
|
+ int ret = 0;
|
|
+
|
|
+ rt_mutex_init_waiter(&waiter, false);
|
|
+
|
|
+ /*
|
|
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
|
|
+ * be called in early boot if the cmpxchg() fast path is disabled
|
|
+ * (debug, no architecture support). In this case we will acquire the
|
|
+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
|
|
+ * enable interrupts in that early boot case. So we need to use the
|
|
+ * irqsave/restore variants.
|
|
+ */
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
+
|
|
+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
|
|
+ &waiter);
|
|
|
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
@@ -1337,7 +1856,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
* Return whether the current task needs to call rt_mutex_postunlock().
|
|
*/
|
|
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
- struct wake_q_head *wake_q)
|
|
+ struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wake_sleeper_q)
|
|
{
|
|
unsigned long flags;
|
|
|
|
@@ -1391,7 +1911,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
*
|
|
* Queue the next waiter for wakeup once we release the wait_lock.
|
|
*/
|
|
- mark_wakeup_next_waiter(wake_q, lock);
|
|
+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
|
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
return true; /* call rt_mutex_postunlock() */
|
|
@@ -1405,29 +1925,45 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
*/
|
|
static inline int
|
|
rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
|
+ struct ww_acquire_ctx *ww_ctx,
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
- enum rtmutex_chainwalk chwalk))
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx))
|
|
{
|
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
return 0;
|
|
|
|
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
+ /*
|
|
+ * If rt_mutex blocks, the function sched_submit_work will not call
|
|
+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
|
|
+ * We must call blk_schedule_flush_plug here, if we don't call it,
|
|
+ * a deadlock in I/O may happen.
|
|
+ */
|
|
+ if (unlikely(blk_needs_flush_plug(current)))
|
|
+ blk_schedule_flush_plug(current);
|
|
+
|
|
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
|
|
}
|
|
|
|
static inline int
|
|
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx,
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
- enum rtmutex_chainwalk chwalk))
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx))
|
|
{
|
|
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
return 0;
|
|
|
|
- return slowfn(lock, state, timeout, chwalk);
|
|
+ if (unlikely(blk_needs_flush_plug(current)))
|
|
+ blk_schedule_flush_plug(current);
|
|
+
|
|
+ return slowfn(lock, state, timeout, chwalk, ww_ctx);
|
|
}
|
|
|
|
static inline int
|
|
@@ -1443,9 +1979,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
|
|
/*
|
|
* Performs the wakeup of the the top-waiter and re-enables preemption.
|
|
*/
|
|
-void rt_mutex_postunlock(struct wake_q_head *wake_q)
|
|
+void rt_mutex_postunlock(struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wake_sleeper_q)
|
|
{
|
|
wake_up_q(wake_q);
|
|
+ wake_up_q_sleeper(wake_sleeper_q);
|
|
|
|
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
|
|
preempt_enable();
|
|
@@ -1454,23 +1992,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q)
|
|
static inline void
|
|
rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
bool (*slowfn)(struct rt_mutex *lock,
|
|
- struct wake_q_head *wqh))
|
|
+ struct wake_q_head *wqh,
|
|
+ struct wake_q_head *wq_sleeper))
|
|
{
|
|
DEFINE_WAKE_Q(wake_q);
|
|
+ DEFINE_WAKE_Q(wake_sleeper_q);
|
|
|
|
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
|
return;
|
|
|
|
- if (slowfn(lock, &wake_q))
|
|
- rt_mutex_postunlock(&wake_q);
|
|
+ if (slowfn(lock, &wake_q, &wake_sleeper_q))
|
|
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
|
|
}
|
|
|
|
-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
|
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
|
|
{
|
|
might_sleep();
|
|
+ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rt_mutex_lock_state - lock a rt_mutex with a given state
|
|
+ *
|
|
+ * @lock: The rt_mutex to be locked
|
|
+ * @state: The state to set when blocking on the rt_mutex
|
|
+ */
|
|
+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
|
|
+ unsigned int subclass, int state)
|
|
+{
|
|
+ int ret;
|
|
|
|
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
+ ret = __rt_mutex_lock_state(lock, state);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
|
+{
|
|
+ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -1511,16 +2072,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
*/
|
|
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
|
{
|
|
- int ret;
|
|
-
|
|
- might_sleep();
|
|
-
|
|
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
- if (ret)
|
|
- mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
-
|
|
- return ret;
|
|
+ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
|
|
|
@@ -1537,6 +2089,22 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
|
return __rt_mutex_slowtrylock(lock);
|
|
}
|
|
|
|
+/**
|
|
+ * rt_mutex_lock_killable - lock a rt_mutex killable
|
|
+ *
|
|
+ * @lock: the rt_mutex to be locked
|
|
+ * @detect_deadlock: deadlock detection on/off
|
|
+ *
|
|
+ * Returns:
|
|
+ * 0 on success
|
|
+ * -EINTR when interrupted by a signal
|
|
+ */
|
|
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
|
|
+{
|
|
+ return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
|
|
+
|
|
/**
|
|
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
|
* the timeout structure is provided
|
|
@@ -1560,6 +2128,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
RT_MUTEX_MIN_CHAINWALK,
|
|
+ NULL,
|
|
rt_mutex_slowlock);
|
|
if (ret)
|
|
mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
@@ -1568,6 +2137,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi()))
|
|
+#else
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
+#endif
|
|
+ return 0;
|
|
+
|
|
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_trylock - try to lock a rt_mutex
|
|
*
|
|
@@ -1583,10 +2164,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
|
{
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
- return 0;
|
|
-
|
|
- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+ ret = __rt_mutex_trylock(lock);
|
|
if (ret)
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
|
|
@@ -1594,6 +2172,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
|
|
+void __sched __rt_mutex_unlock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_unlock - unlock a rt_mutex
|
|
*
|
|
@@ -1602,16 +2185,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
|
{
|
|
mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
- rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
|
+ __rt_mutex_unlock(lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
|
|
|
-/**
|
|
- * Futex variant, that since futex variants do not use the fast-path, can be
|
|
- * simple and will not need to retry.
|
|
- */
|
|
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
- struct wake_q_head *wake_q)
|
|
+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
|
|
+ struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wq_sleeper)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
|
|
@@ -1628,23 +2208,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
* avoid inversion prior to the wakeup. preempt_disable()
|
|
* therein pairs with rt_mutex_postunlock().
|
|
*/
|
|
- mark_wakeup_next_waiter(wake_q, lock);
|
|
+ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
|
|
|
|
return true; /* call postunlock() */
|
|
}
|
|
|
|
+/**
|
|
+ * Futex variant, that since futex variants do not use the fast-path, can be
|
|
+ * simple and will not need to retry.
|
|
+ */
|
|
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
+ struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wq_sleeper)
|
|
+{
|
|
+ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
|
|
+}
|
|
+
|
|
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
|
|
{
|
|
DEFINE_WAKE_Q(wake_q);
|
|
+ DEFINE_WAKE_Q(wake_sleeper_q);
|
|
unsigned long flags;
|
|
bool postunlock;
|
|
|
|
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
- postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
|
|
+ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
|
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
if (postunlock)
|
|
- rt_mutex_postunlock(&wake_q);
|
|
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
|
|
}
|
|
|
|
/**
|
|
@@ -1683,7 +2275,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name,
|
|
if (name && key)
|
|
debug_rt_mutex_init(lock, name, key);
|
|
}
|
|
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
|
+EXPORT_SYMBOL(__rt_mutex_init);
|
|
|
|
/**
|
|
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
|
|
@@ -1703,6 +2295,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner)
|
|
{
|
|
__rt_mutex_init(lock, NULL, NULL);
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+ /*
|
|
+ * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is
|
|
+ * holding the ->wait_lock of the proxy_lock while unlocking a sleeping
|
|
+ * lock.
|
|
+ */
|
|
+ raw_spin_lock_init(&lock->wait_lock);
|
|
+#endif
|
|
debug_rt_mutex_proxy_lock(lock, proxy_owner);
|
|
rt_mutex_set_owner(lock, proxy_owner);
|
|
}
|
|
@@ -1725,6 +2325,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock)
|
|
rt_mutex_set_owner(lock, NULL);
|
|
}
|
|
|
|
+static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
|
|
+{
|
|
+ struct task_struct *tsk = current;
|
|
+ /*
|
|
+ * RT has a problem here when the wait got interrupted by a timeout
|
|
+ * or a signal. task->pi_blocked_on is still set. The task must
|
|
+ * acquire the hash bucket lock when returning from this function.
|
|
+ *
|
|
+ * If the hash bucket lock is contended then the
|
|
+ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
|
|
+ * task_blocks_on_rt_mutex() will trigger. This can be avoided by
|
|
+ * clearing task->pi_blocked_on which removes the task from the
|
|
+ * boosting chain of the rtmutex. That's correct because the task
|
|
+ * is not longer blocked on it.
|
|
+ */
|
|
+ raw_spin_lock(&tsk->pi_lock);
|
|
+ tsk->pi_blocked_on = NULL;
|
|
+ raw_spin_unlock(&tsk->pi_lock);
|
|
+}
|
|
+
|
|
/**
|
|
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
|
|
* @lock: the rt_mutex to take
|
|
@@ -1755,6 +2375,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
if (try_to_take_rt_mutex(lock, task, NULL))
|
|
return 1;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * In PREEMPT_RT there's an added race.
|
|
+ * If the task, that we are about to requeue, times out,
|
|
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
|
|
+ * to skip this task. But right after the task sets
|
|
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
|
|
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
|
|
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
|
|
+ * lock that it blocks on. We *must not* place this task
|
|
+ * on this proxy lock in that case.
|
|
+ *
|
|
+ * To prevent this race, we first take the task's pi_lock
|
|
+ * and check if it has updated its pi_blocked_on. If it has,
|
|
+ * we assume that it woke up and we return -EAGAIN.
|
|
+ * Otherwise, we set the task's pi_blocked_on to
|
|
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
|
|
+ * it will know that we are in the process of requeuing it.
|
|
+ */
|
|
+ raw_spin_lock(&task->pi_lock);
|
|
+ if (task->pi_blocked_on) {
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
+#endif
|
|
+
|
|
/* We enforce deadlock detection for futexes */
|
|
ret = task_blocks_on_rt_mutex(lock, waiter, task,
|
|
RT_MUTEX_FULL_CHAINWALK);
|
|
@@ -1769,6 +2417,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
ret = 0;
|
|
}
|
|
|
|
+ if (ret)
|
|
+ fixup_rt_mutex_blocked(lock);
|
|
+
|
|
debug_rt_mutex_print_deadlock(waiter);
|
|
|
|
return ret;
|
|
@@ -1854,12 +2505,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
|
raw_spin_lock_irq(&lock->wait_lock);
|
|
/* sleep on the mutex */
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
|
|
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
|
|
/*
|
|
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
|
* have to fix that up.
|
|
*/
|
|
fixup_rt_mutex_waiters(lock);
|
|
+ if (ret)
|
|
+ fixup_rt_mutex_blocked(lock);
|
|
+
|
|
raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
return ret;
|
|
@@ -1921,3 +2575,99 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
|
|
|
|
return cleanup;
|
|
}
|
|
+
|
|
+static inline int
|
|
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
|
|
+ unsigned tmp;
|
|
+
|
|
+ if (ctx->deadlock_inject_countdown-- == 0) {
|
|
+ tmp = ctx->deadlock_inject_interval;
|
|
+ if (tmp > UINT_MAX/4)
|
|
+ tmp = UINT_MAX;
|
|
+ else
|
|
+ tmp = tmp*2 + tmp + tmp/2;
|
|
+
|
|
+ ctx->deadlock_inject_interval = tmp;
|
|
+ ctx->deadlock_inject_countdown = tmp;
|
|
+ ctx->contending_lock = lock;
|
|
+
|
|
+ ww_mutex_unlock(lock);
|
|
+
|
|
+ return -EDEADLK;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+int __sched
|
|
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ might_sleep();
|
|
+
|
|
+ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
|
|
+ ctx ? &ctx->dep_map : NULL, _RET_IP_);
|
|
+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
|
|
+ ctx);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
|
+ else if (!ret && ctx && ctx->acquired > 1)
|
|
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
|
|
+
|
|
+int __sched
|
|
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ might_sleep();
|
|
+
|
|
+ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
|
|
+ ctx ? &ctx->dep_map : NULL, _RET_IP_);
|
|
+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
|
|
+ ctx);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
|
+ else if (!ret && ctx && ctx->acquired > 1)
|
|
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ww_mutex_lock);
|
|
+
|
|
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
|
+{
|
|
+ int nest = !!lock->ctx;
|
|
+
|
|
+ /*
|
|
+ * The unlocking fastpath is the 0->1 transition from 'locked'
|
|
+ * into 'unlocked' state:
|
|
+ */
|
|
+ if (nest) {
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
|
|
+#endif
|
|
+ if (lock->ctx->acquired > 0)
|
|
+ lock->ctx->acquired--;
|
|
+ lock->ctx = NULL;
|
|
+ }
|
|
+
|
|
+ mutex_release(&lock->base.dep_map, nest, _RET_IP_);
|
|
+ __rt_mutex_unlock(&lock->base.lock);
|
|
+}
|
|
+EXPORT_SYMBOL(ww_mutex_unlock);
|
|
+
|
|
+int __rt_mutex_owner_current(struct rt_mutex *lock)
|
|
+{
|
|
+ return rt_mutex_owner(lock) == current;
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_mutex_owner_current);
|
|
+#endif
|
|
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
|
|
index ca6fb4890..8e0c59227 100644
|
|
--- a/kernel/locking/rtmutex_common.h
|
|
+++ b/kernel/locking/rtmutex_common.h
|
|
@@ -15,6 +15,7 @@
|
|
|
|
#include <linux/rtmutex.h>
|
|
#include <linux/sched/wake_q.h>
|
|
+#include <linux/sched/debug.h>
|
|
|
|
/*
|
|
* This is the control structure for tasks blocked on a rt_mutex,
|
|
@@ -29,6 +30,7 @@ struct rt_mutex_waiter {
|
|
struct rb_node pi_tree_entry;
|
|
struct task_struct *task;
|
|
struct rt_mutex *lock;
|
|
+ bool savestate;
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
unsigned long ip;
|
|
struct pid *deadlock_task_pid;
|
|
@@ -130,11 +132,14 @@ enum rtmutex_chainwalk {
|
|
/*
|
|
* PI-futex support (proxy locking functions, etc.):
|
|
*/
|
|
+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
|
|
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
|
|
+
|
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
|
|
-extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
|
|
+extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate);
|
|
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
struct task_struct *task);
|
|
@@ -152,9 +157,27 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
|
|
|
|
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
|
|
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
- struct wake_q_head *wqh);
|
|
-
|
|
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
|
|
+ struct wake_q_head *wqh,
|
|
+ struct wake_q_head *wq_sleeper);
|
|
+
|
|
+extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
|
|
+ struct wake_q_head *wake_sleeper_q);
|
|
+
|
|
+/* RW semaphore special interface */
|
|
+struct ww_acquire_ctx;
|
|
+
|
|
+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
|
|
+extern int __rt_mutex_trylock(struct rt_mutex *lock);
|
|
+extern void __rt_mutex_unlock(struct rt_mutex *lock);
|
|
+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
+ struct hrtimer_sleeper *timeout,
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
+ struct ww_acquire_ctx *ww_ctx,
|
|
+ struct rt_mutex_waiter *waiter);
|
|
+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
|
|
+ struct rt_mutex_waiter *waiter,
|
|
+ unsigned long flags);
|
|
+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock);
|
|
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
# include "rtmutex-debug.h"
|
|
diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
|
|
new file mode 100644
|
|
index 000000000..0ae8c62ea
|
|
--- /dev/null
|
|
+++ b/kernel/locking/rwlock-rt.c
|
|
@@ -0,0 +1,384 @@
|
|
+/*
|
|
+ */
|
|
+#include <linux/sched/debug.h>
|
|
+#include <linux/export.h>
|
|
+
|
|
+#include "rtmutex_common.h"
|
|
+#include <linux/rwlock_types_rt.h>
|
|
+
|
|
+/*
|
|
+ * RT-specific reader/writer locks
|
|
+ *
|
|
+ * write_lock()
|
|
+ * 1) Lock lock->rtmutex
|
|
+ * 2) Remove the reader BIAS to force readers into the slow path
|
|
+ * 3) Wait until all readers have left the critical region
|
|
+ * 4) Mark it write locked
|
|
+ *
|
|
+ * write_unlock()
|
|
+ * 1) Remove the write locked marker
|
|
+ * 2) Set the reader BIAS so readers can use the fast path again
|
|
+ * 3) Unlock lock->rtmutex to release blocked readers
|
|
+ *
|
|
+ * read_lock()
|
|
+ * 1) Try fast path acquisition (reader BIAS is set)
|
|
+ * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag
|
|
+ * 3) If !writelocked, acquire it for read
|
|
+ * 4) If writelocked, block on lock->rtmutex
|
|
+ * 5) unlock lock->rtmutex, goto 1)
|
|
+ *
|
|
+ * read_unlock()
|
|
+ * 1) Try fast path release (reader count != 1)
|
|
+ * 2) Wake the writer waiting in write_lock()#3
|
|
+ *
|
|
+ * read_lock()#3 has the consequence, that rw locks on RT are not writer
|
|
+ * fair, but writers, which should be avoided in RT tasks (think tasklist
|
|
+ * lock), are subject to the rtmutex priority/DL inheritance mechanism.
|
|
+ *
|
|
+ * It's possible to make the rw locks writer fair by keeping a list of
|
|
+ * active readers. A blocked writer would force all newly incoming readers
|
|
+ * to block on the rtmutex, but the rtmutex would have to be proxy locked
|
|
+ * for one reader after the other. We can't use multi-reader inheritance
|
|
+ * because there is no way to support that with
|
|
+ * SCHED_DEADLINE. Implementing the one by one reader boosting/handover
|
|
+ * mechanism is a major surgery for a very dubious value.
|
|
+ *
|
|
+ * The risk of writer starvation is there, but the pathological use cases
|
|
+ * which trigger it are not necessarily the typical RT workloads.
|
|
+ */
|
|
+
|
|
+void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held semaphore:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
+ lockdep_init_map(&lock->dep_map, name, key, 0);
|
|
+#endif
|
|
+ atomic_set(&lock->readers, READER_BIAS);
|
|
+ rt_mutex_init(&lock->rtmutex);
|
|
+ lock->rtmutex.save_state = 1;
|
|
+}
|
|
+
|
|
+int __read_rt_trylock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ int r, old;
|
|
+
|
|
+ /*
|
|
+ * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is
|
|
+ * set.
|
|
+ */
|
|
+ for (r = atomic_read(&lock->readers); r < 0;) {
|
|
+ old = atomic_cmpxchg(&lock->readers, r, r + 1);
|
|
+ if (likely(old == r))
|
|
+ return 1;
|
|
+ r = old;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void __sched __read_rt_lock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+ struct rt_mutex_waiter waiter;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (__read_rt_trylock(lock))
|
|
+ return;
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ /*
|
|
+ * Allow readers as long as the writer has not completely
|
|
+ * acquired the semaphore for write.
|
|
+ */
|
|
+ if (atomic_read(&lock->readers) != WRITER_BIAS) {
|
|
+ atomic_inc(&lock->readers);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Call into the slow lock path with the rtmutex->wait_lock
|
|
+ * held, so this can't result in the following race:
|
|
+ *
|
|
+ * Reader1 Reader2 Writer
|
|
+ * read_lock()
|
|
+ * write_lock()
|
|
+ * rtmutex_lock(m)
|
|
+ * swait()
|
|
+ * read_lock()
|
|
+ * unlock(m->wait_lock)
|
|
+ * read_unlock()
|
|
+ * swake()
|
|
+ * lock(m->wait_lock)
|
|
+ * lock->writelocked=true
|
|
+ * unlock(m->wait_lock)
|
|
+ *
|
|
+ * write_unlock()
|
|
+ * lock->writelocked=false
|
|
+ * rtmutex_unlock(m)
|
|
+ * read_lock()
|
|
+ * write_lock()
|
|
+ * rtmutex_lock(m)
|
|
+ * swait()
|
|
+ * rtmutex_lock(m)
|
|
+ *
|
|
+ * That would put Reader1 behind the writer waiting on
|
|
+ * Reader2 to call read_unlock() which might be unbound.
|
|
+ */
|
|
+ rt_mutex_init_waiter(&waiter, true);
|
|
+ rt_spin_lock_slowlock_locked(m, &waiter, flags);
|
|
+ /*
|
|
+ * The slowlock() above is guaranteed to return with the rtmutex is
|
|
+ * now held, so there can't be a writer active. Increment the reader
|
|
+ * count and immediately drop the rtmutex again.
|
|
+ */
|
|
+ atomic_inc(&lock->readers);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ rt_spin_lock_slowunlock(m);
|
|
+
|
|
+ debug_rt_mutex_free_waiter(&waiter);
|
|
+}
|
|
+
|
|
+void __read_rt_unlock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+ struct task_struct *tsk;
|
|
+
|
|
+ /*
|
|
+ * sem->readers can only hit 0 when a writer is waiting for the
|
|
+ * active readers to leave the critical region.
|
|
+ */
|
|
+ if (!atomic_dec_and_test(&lock->readers))
|
|
+ return;
|
|
+
|
|
+ raw_spin_lock_irq(&m->wait_lock);
|
|
+ /*
|
|
+ * Wake the writer, i.e. the rtmutex owner. It might release the
|
|
+ * rtmutex concurrently in the fast path, but to clean up the rw
|
|
+ * lock it needs to acquire m->wait_lock. The worst case which can
|
|
+ * happen is a spurious wakeup.
|
|
+ */
|
|
+ tsk = rt_mutex_owner(m);
|
|
+ if (tsk)
|
|
+ wake_up_process(tsk);
|
|
+
|
|
+ raw_spin_unlock_irq(&m->wait_lock);
|
|
+}
|
|
+
|
|
+static void __write_unlock_common(struct rt_rw_lock *lock, int bias,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+
|
|
+ atomic_add(READER_BIAS - bias, &lock->readers);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ rt_spin_lock_slowunlock(m);
|
|
+}
|
|
+
|
|
+void __sched __write_rt_lock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+ struct task_struct *self = current;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* Take the rtmutex as a first step */
|
|
+ __rt_spin_lock(m);
|
|
+
|
|
+ /* Force readers into slow path */
|
|
+ atomic_sub(READER_BIAS, &lock->readers);
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ self->saved_state = self->state;
|
|
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+
|
|
+ for (;;) {
|
|
+ /* Have all readers left the critical region? */
|
|
+ if (!atomic_read(&lock->readers)) {
|
|
+ atomic_set(&lock->readers, WRITER_BIAS);
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ __set_current_state_no_track(self->saved_state);
|
|
+ self->saved_state = TASK_RUNNING;
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+
|
|
+ if (atomic_read(&lock->readers) != 0)
|
|
+ schedule();
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+
|
|
+ raw_spin_lock(&self->pi_lock);
|
|
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
|
|
+ raw_spin_unlock(&self->pi_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+int __write_rt_trylock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (!__rt_mutex_trylock(m))
|
|
+ return 0;
|
|
+
|
|
+ atomic_sub(READER_BIAS, &lock->readers);
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ if (!atomic_read(&lock->readers)) {
|
|
+ atomic_set(&lock->readers, WRITER_BIAS);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ return 1;
|
|
+ }
|
|
+ __write_unlock_common(lock, 0, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void __write_rt_unlock(struct rt_rw_lock *lock)
|
|
+{
|
|
+ struct rt_mutex *m = &lock->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ __write_unlock_common(lock, WRITER_BIAS, flags);
|
|
+}
|
|
+
|
|
+/* Map the reader biased implementation */
|
|
+static inline int do_read_rt_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ return __read_rt_trylock(rwlock);
|
|
+}
|
|
+
|
|
+static inline int do_write_rt_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ return __write_rt_trylock(rwlock);
|
|
+}
|
|
+
|
|
+static inline void do_read_rt_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ __read_rt_lock(rwlock);
|
|
+}
|
|
+
|
|
+static inline void do_write_rt_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ __write_rt_lock(rwlock);
|
|
+}
|
|
+
|
|
+static inline void do_read_rt_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ __read_rt_unlock(rwlock);
|
|
+}
|
|
+
|
|
+static inline void do_write_rt_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ __write_rt_unlock(rwlock);
|
|
+}
|
|
+
|
|
+static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+ __rwlock_biased_rt_init(rwlock, name, key);
|
|
+}
|
|
+
|
|
+int __lockfunc rt_read_can_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ return atomic_read(&rwlock->readers) < 0;
|
|
+}
|
|
+
|
|
+int __lockfunc rt_write_can_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ return atomic_read(&rwlock->readers) == READER_BIAS;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The common functions which get wrapped into the rwlock API.
|
|
+ */
|
|
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ sleeping_lock_inc();
|
|
+ migrate_disable();
|
|
+ ret = do_read_rt_trylock(rwlock);
|
|
+ if (ret) {
|
|
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
|
|
+ rcu_read_lock();
|
|
+ } else {
|
|
+ migrate_enable();
|
|
+ sleeping_lock_dec();
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_trylock);
|
|
+
|
|
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ sleeping_lock_inc();
|
|
+ migrate_disable();
|
|
+ ret = do_write_rt_trylock(rwlock);
|
|
+ if (ret) {
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
|
|
+ rcu_read_lock();
|
|
+ } else {
|
|
+ migrate_enable();
|
|
+ sleeping_lock_dec();
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_trylock);
|
|
+
|
|
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
|
|
+ do_read_rt_lock(rwlock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_lock);
|
|
+
|
|
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ sleeping_lock_inc();
|
|
+ rcu_read_lock();
|
|
+ migrate_disable();
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
|
|
+ do_write_rt_lock(rwlock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_lock);
|
|
+
|
|
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
|
|
+ do_read_rt_unlock(rwlock);
|
|
+ migrate_enable();
|
|
+ rcu_read_unlock();
|
|
+ sleeping_lock_dec();
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_unlock);
|
|
+
|
|
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
|
|
+ do_write_rt_unlock(rwlock);
|
|
+ migrate_enable();
|
|
+ rcu_read_unlock();
|
|
+ sleeping_lock_dec();
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_unlock);
|
|
+
|
|
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
|
|
+{
|
|
+ do_rwlock_rt_init(rwlock, name, key);
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_rwlock_init);
|
|
diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
|
|
new file mode 100644
|
|
index 000000000..966946454
|
|
--- /dev/null
|
|
+++ b/kernel/locking/rwsem-rt.c
|
|
@@ -0,0 +1,312 @@
|
|
+/*
|
|
+ */
|
|
+#include <linux/blkdev.h>
|
|
+#include <linux/rwsem.h>
|
|
+#include <linux/sched/debug.h>
|
|
+#include <linux/sched/signal.h>
|
|
+#include <linux/export.h>
|
|
+
|
|
+#include "rtmutex_common.h"
|
|
+
|
|
+/*
|
|
+ * RT-specific reader/writer semaphores
|
|
+ *
|
|
+ * down_write()
|
|
+ * 1) Lock sem->rtmutex
|
|
+ * 2) Remove the reader BIAS to force readers into the slow path
|
|
+ * 3) Wait until all readers have left the critical region
|
|
+ * 4) Mark it write locked
|
|
+ *
|
|
+ * up_write()
|
|
+ * 1) Remove the write locked marker
|
|
+ * 2) Set the reader BIAS so readers can use the fast path again
|
|
+ * 3) Unlock sem->rtmutex to release blocked readers
|
|
+ *
|
|
+ * down_read()
|
|
+ * 1) Try fast path acquisition (reader BIAS is set)
|
|
+ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag
|
|
+ * 3) If !writelocked, acquire it for read
|
|
+ * 4) If writelocked, block on sem->rtmutex
|
|
+ * 5) unlock sem->rtmutex, goto 1)
|
|
+ *
|
|
+ * up_read()
|
|
+ * 1) Try fast path release (reader count != 1)
|
|
+ * 2) Wake the writer waiting in down_write()#3
|
|
+ *
|
|
+ * down_read()#3 has the consequence, that rw semaphores on RT are not writer
|
|
+ * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
|
|
+ * are subject to the rtmutex priority/DL inheritance mechanism.
|
|
+ *
|
|
+ * It's possible to make the rw semaphores writer fair by keeping a list of
|
|
+ * active readers. A blocked writer would force all newly incoming readers to
|
|
+ * block on the rtmutex, but the rtmutex would have to be proxy locked for one
|
|
+ * reader after the other. We can't use multi-reader inheritance because there
|
|
+ * is no way to support that with SCHED_DEADLINE. Implementing the one by one
|
|
+ * reader boosting/handover mechanism is a major surgery for a very dubious
|
|
+ * value.
|
|
+ *
|
|
+ * The risk of writer starvation is there, but the pathological use cases
|
|
+ * which trigger it are not necessarily the typical RT workloads.
|
|
+ */
|
|
+
|
|
+void __rwsem_init(struct rw_semaphore *sem, const char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held semaphore:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
|
|
+ lockdep_init_map(&sem->dep_map, name, key, 0);
|
|
+#endif
|
|
+ atomic_set(&sem->readers, READER_BIAS);
|
|
+}
|
|
+EXPORT_SYMBOL(__rwsem_init);
|
|
+
|
|
+int __down_read_trylock(struct rw_semaphore *sem)
|
|
+{
|
|
+ int r, old;
|
|
+
|
|
+ /*
|
|
+ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
|
|
+ * set.
|
|
+ */
|
|
+ for (r = atomic_read(&sem->readers); r < 0;) {
|
|
+ old = atomic_cmpxchg(&sem->readers, r, r + 1);
|
|
+ if (likely(old == r))
|
|
+ return 1;
|
|
+ r = old;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __sched __down_read_common(struct rw_semaphore *sem, int state)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ struct rt_mutex_waiter waiter;
|
|
+ int ret;
|
|
+
|
|
+ if (__down_read_trylock(sem))
|
|
+ return 0;
|
|
+ /*
|
|
+ * If rt_mutex blocks, the function sched_submit_work will not call
|
|
+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
|
|
+ * We must call blk_schedule_flush_plug here, if we don't call it,
|
|
+ * a deadlock in I/O may happen.
|
|
+ */
|
|
+ if (unlikely(blk_needs_flush_plug(current)))
|
|
+ blk_schedule_flush_plug(current);
|
|
+
|
|
+ might_sleep();
|
|
+ raw_spin_lock_irq(&m->wait_lock);
|
|
+ /*
|
|
+ * Allow readers as long as the writer has not completely
|
|
+ * acquired the semaphore for write.
|
|
+ */
|
|
+ if (atomic_read(&sem->readers) != WRITER_BIAS) {
|
|
+ atomic_inc(&sem->readers);
|
|
+ raw_spin_unlock_irq(&m->wait_lock);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Call into the slow lock path with the rtmutex->wait_lock
|
|
+ * held, so this can't result in the following race:
|
|
+ *
|
|
+ * Reader1 Reader2 Writer
|
|
+ * down_read()
|
|
+ * down_write()
|
|
+ * rtmutex_lock(m)
|
|
+ * swait()
|
|
+ * down_read()
|
|
+ * unlock(m->wait_lock)
|
|
+ * up_read()
|
|
+ * swake()
|
|
+ * lock(m->wait_lock)
|
|
+ * sem->writelocked=true
|
|
+ * unlock(m->wait_lock)
|
|
+ *
|
|
+ * up_write()
|
|
+ * sem->writelocked=false
|
|
+ * rtmutex_unlock(m)
|
|
+ * down_read()
|
|
+ * down_write()
|
|
+ * rtmutex_lock(m)
|
|
+ * swait()
|
|
+ * rtmutex_lock(m)
|
|
+ *
|
|
+ * That would put Reader1 behind the writer waiting on
|
|
+ * Reader2 to call up_read() which might be unbound.
|
|
+ */
|
|
+ rt_mutex_init_waiter(&waiter, false);
|
|
+ ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
|
|
+ NULL, &waiter);
|
|
+ /*
|
|
+ * The slowlock() above is guaranteed to return with the rtmutex (for
|
|
+ * ret = 0) is now held, so there can't be a writer active. Increment
|
|
+ * the reader count and immediately drop the rtmutex again.
|
|
+ * For ret != 0 we don't hold the rtmutex and need unlock the wait_lock.
|
|
+ * We don't own the lock then.
|
|
+ */
|
|
+ if (!ret)
|
|
+ atomic_inc(&sem->readers);
|
|
+ raw_spin_unlock_irq(&m->wait_lock);
|
|
+ if (!ret)
|
|
+ __rt_mutex_unlock(m);
|
|
+
|
|
+ debug_rt_mutex_free_waiter(&waiter);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void __down_read(struct rw_semaphore *sem)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = __down_read_common(sem, TASK_UNINTERRUPTIBLE);
|
|
+ WARN_ON_ONCE(ret);
|
|
+}
|
|
+
|
|
+int __down_read_interruptible(struct rw_semaphore *sem)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = __down_read_common(sem, TASK_INTERRUPTIBLE);
|
|
+ if (likely(!ret))
|
|
+ return ret;
|
|
+ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret);
|
|
+ return -EINTR;
|
|
+}
|
|
+
|
|
+int __down_read_killable(struct rw_semaphore *sem)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = __down_read_common(sem, TASK_KILLABLE);
|
|
+ if (likely(!ret))
|
|
+ return ret;
|
|
+ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret);
|
|
+ return -EINTR;
|
|
+}
|
|
+
|
|
+void __up_read(struct rw_semaphore *sem)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ struct task_struct *tsk;
|
|
+
|
|
+ /*
|
|
+ * sem->readers can only hit 0 when a writer is waiting for the
|
|
+ * active readers to leave the critical region.
|
|
+ */
|
|
+ if (!atomic_dec_and_test(&sem->readers))
|
|
+ return;
|
|
+
|
|
+ raw_spin_lock_irq(&m->wait_lock);
|
|
+ /*
|
|
+ * Wake the writer, i.e. the rtmutex owner. It might release the
|
|
+ * rtmutex concurrently in the fast path (due to a signal), but to
|
|
+ * clean up the rwsem it needs to acquire m->wait_lock. The worst
|
|
+ * case which can happen is a spurious wakeup.
|
|
+ */
|
|
+ tsk = rt_mutex_owner(m);
|
|
+ if (tsk)
|
|
+ wake_up_process(tsk);
|
|
+
|
|
+ raw_spin_unlock_irq(&m->wait_lock);
|
|
+}
|
|
+
|
|
+static void __up_write_unlock(struct rw_semaphore *sem, int bias,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+
|
|
+ atomic_add(READER_BIAS - bias, &sem->readers);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ __rt_mutex_unlock(m);
|
|
+}
|
|
+
|
|
+static int __sched __down_write_common(struct rw_semaphore *sem, int state)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* Take the rtmutex as a first step */
|
|
+ if (__rt_mutex_lock_state(m, state))
|
|
+ return -EINTR;
|
|
+
|
|
+ /* Force readers into slow path */
|
|
+ atomic_sub(READER_BIAS, &sem->readers);
|
|
+ might_sleep();
|
|
+
|
|
+ set_current_state(state);
|
|
+ for (;;) {
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ /* Have all readers left the critical region? */
|
|
+ if (!atomic_read(&sem->readers)) {
|
|
+ atomic_set(&sem->readers, WRITER_BIAS);
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (signal_pending_state(state, current)) {
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ __up_write_unlock(sem, 0, flags);
|
|
+ return -EINTR;
|
|
+ }
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+
|
|
+ if (atomic_read(&sem->readers) != 0) {
|
|
+ schedule();
|
|
+ set_current_state(state);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void __sched __down_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ __down_write_common(sem, TASK_UNINTERRUPTIBLE);
|
|
+}
|
|
+
|
|
+int __sched __down_write_killable(struct rw_semaphore *sem)
|
|
+{
|
|
+ return __down_write_common(sem, TASK_KILLABLE);
|
|
+}
|
|
+
|
|
+int __down_write_trylock(struct rw_semaphore *sem)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (!__rt_mutex_trylock(m))
|
|
+ return 0;
|
|
+
|
|
+ atomic_sub(READER_BIAS, &sem->readers);
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ if (!atomic_read(&sem->readers)) {
|
|
+ atomic_set(&sem->readers, WRITER_BIAS);
|
|
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
|
|
+ return 1;
|
|
+ }
|
|
+ __up_write_unlock(sem, 0, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void __up_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ __up_write_unlock(sem, WRITER_BIAS, flags);
|
|
+}
|
|
+
|
|
+void __downgrade_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ struct rt_mutex *m = &sem->rtmutex;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
|
|
+ /* Release it and account current as reader */
|
|
+ __up_write_unlock(sem, WRITER_BIAS - 1, flags);
|
|
+}
|
|
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
|
|
index 936f3d14d..e89b70f47 100644
|
|
--- a/kernel/locking/spinlock.c
|
|
+++ b/kernel/locking/spinlock.c
|
|
@@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
|
|
* __[spin|read|write]_lock_bh()
|
|
*/
|
|
BUILD_LOCK_OPS(spin, raw_spinlock);
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
BUILD_LOCK_OPS(read, rwlock);
|
|
BUILD_LOCK_OPS(write, rwlock);
|
|
+#endif
|
|
|
|
#endif
|
|
|
|
@@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
|
|
EXPORT_SYMBOL(_raw_spin_unlock_bh);
|
|
#endif
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#ifndef CONFIG_INLINE_READ_TRYLOCK
|
|
int __lockfunc _raw_read_trylock(rwlock_t *lock)
|
|
{
|
|
@@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
|
|
EXPORT_SYMBOL(_raw_write_unlock_bh);
|
|
#endif
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
|
|
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
|
|
index 03595c29c..d63df281b 100644
|
|
--- a/kernel/locking/spinlock_debug.c
|
|
+++ b/kernel/locking/spinlock_debug.c
|
|
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
|
|
|
EXPORT_SYMBOL(__raw_spin_lock_init);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void __rwlock_init(rwlock_t *lock, const char *name,
|
|
struct lock_class_key *key)
|
|
{
|
|
@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
|
|
}
|
|
|
|
EXPORT_SYMBOL(__rwlock_init);
|
|
+#endif
|
|
|
|
static void spin_dump(raw_spinlock_t *lock, const char *msg)
|
|
{
|
|
@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
|
|
arch_spin_unlock(&lock->raw_lock);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
static void rwlock_bug(rwlock_t *lock, const char *msg)
|
|
{
|
|
if (!debug_locks_off())
|
|
@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
|
|
debug_write_unlock(lock);
|
|
arch_write_unlock(&lock->raw_lock);
|
|
}
|
|
+
|
|
+#endif
|
|
diff --git a/kernel/panic.c b/kernel/panic.c
|
|
index ebdb58e76..82955af2b 100644
|
|
--- a/kernel/panic.c
|
|
+++ b/kernel/panic.c
|
|
@@ -534,9 +534,11 @@ static u64 oops_id;
|
|
|
|
static int init_oops_id(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (!oops_id)
|
|
get_random_bytes(&oops_id, sizeof(oops_id));
|
|
else
|
|
+#endif
|
|
oops_id++;
|
|
|
|
return 0;
|
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
|
index fc6466351..8f90096cb 100644
|
|
--- a/kernel/power/hibernate.c
|
|
+++ b/kernel/power/hibernate.c
|
|
@@ -699,6 +699,10 @@ static int load_image_and_restore(void)
|
|
return error;
|
|
}
|
|
|
|
+#ifndef CONFIG_SUSPEND
|
|
+bool pm_in_action;
|
|
+#endif
|
|
+
|
|
/**
|
|
* hibernate - Carry out system hibernation, including saving the image.
|
|
*/
|
|
@@ -712,6 +716,8 @@ int hibernate(void)
|
|
return -EPERM;
|
|
}
|
|
|
|
+ pm_in_action = true;
|
|
+
|
|
lock_system_sleep();
|
|
/* The snapshot device should not be opened while we're running */
|
|
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
|
@@ -791,6 +797,7 @@ int hibernate(void)
|
|
atomic_inc(&snapshot_device_available);
|
|
Unlock:
|
|
unlock_system_sleep();
|
|
+ pm_in_action = false;
|
|
carry_out_hibernation = false;
|
|
pr_info("hibernation exit\n");
|
|
|
|
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
|
|
index 07a32e99a..5b28b0ac8 100644
|
|
--- a/kernel/power/suspend.c
|
|
+++ b/kernel/power/suspend.c
|
|
@@ -603,6 +603,8 @@ static int enter_state(suspend_state_t state)
|
|
return error;
|
|
}
|
|
|
|
+bool pm_in_action;
|
|
+
|
|
/**
|
|
* pm_suspend - Externally visible function for suspending the system.
|
|
* @state: System sleep state to enter.
|
|
@@ -617,6 +619,7 @@ int pm_suspend(suspend_state_t state)
|
|
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
|
|
return -EINVAL;
|
|
|
|
+ pm_in_action = true;
|
|
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
|
|
error = enter_state(state);
|
|
if (error) {
|
|
@@ -626,6 +629,7 @@ int pm_suspend(suspend_state_t state)
|
|
suspend_stats.success++;
|
|
}
|
|
pr_info("suspend exit\n");
|
|
+ pm_in_action = false;
|
|
return error;
|
|
}
|
|
EXPORT_SYMBOL(pm_suspend);
|
|
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
|
index f52eceb3c..903fa8f2e 100644
|
|
--- a/kernel/printk/printk.c
|
|
+++ b/kernel/printk/printk.c
|
|
@@ -418,6 +418,65 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
|
|
printk_safe_exit_irqrestore(flags); \
|
|
} while (0)
|
|
|
|
+#ifdef CONFIG_EARLY_PRINTK
|
|
+struct console *early_console;
|
|
+
|
|
+static void early_vprintk(const char *fmt, va_list ap)
|
|
+{
|
|
+ if (early_console) {
|
|
+ char buf[512];
|
|
+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
+
|
|
+ early_console->write(early_console, buf, n);
|
|
+ }
|
|
+}
|
|
+
|
|
+asmlinkage void early_printk(const char *fmt, ...)
|
|
+{
|
|
+ va_list ap;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+ early_vprintk(fmt, ap);
|
|
+ va_end(ap);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This is independent of any log levels - a global
|
|
+ * kill switch that turns off all of printk.
|
|
+ *
|
|
+ * Used by the NMI watchdog if early-printk is enabled.
|
|
+ */
|
|
+static bool __read_mostly printk_killswitch;
|
|
+
|
|
+static int __init force_early_printk_setup(char *str)
|
|
+{
|
|
+ printk_killswitch = true;
|
|
+ return 0;
|
|
+}
|
|
+early_param("force_early_printk", force_early_printk_setup);
|
|
+
|
|
+void printk_kill(void)
|
|
+{
|
|
+ printk_killswitch = true;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PRINTK
|
|
+static int forced_early_printk(const char *fmt, va_list ap)
|
|
+{
|
|
+ if (!printk_killswitch)
|
|
+ return 0;
|
|
+ early_vprintk(fmt, ap);
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#else
|
|
+static inline int forced_early_printk(const char *fmt, va_list ap)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PRINTK
|
|
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
|
/* the next printk record to read by syslog(READ) or /proc/kmsg */
|
|
@@ -1418,6 +1477,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|
u64 next_seq;
|
|
u64 seq;
|
|
u32 idx;
|
|
+ int attempts = 0;
|
|
+ int num_msg;
|
|
bool time;
|
|
|
|
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
|
|
@@ -1426,6 +1487,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|
|
|
time = printk_time;
|
|
logbuf_lock_irq();
|
|
+
|
|
+try_again:
|
|
+ attempts++;
|
|
+ if (attempts > 10) {
|
|
+ len = -EBUSY;
|
|
+ goto out;
|
|
+ }
|
|
+ num_msg = 0;
|
|
+
|
|
/*
|
|
* Find first record that fits, including all following records,
|
|
* into the user-provided buffer for this dump.
|
|
@@ -1438,6 +1508,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|
len += msg_print_text(msg, true, time, NULL, 0);
|
|
idx = log_next(idx);
|
|
seq++;
|
|
+ num_msg++;
|
|
+ if (num_msg > 5) {
|
|
+ num_msg = 0;
|
|
+ logbuf_unlock_irq();
|
|
+ logbuf_lock_irq();
|
|
+ if (clear_seq < log_first_seq)
|
|
+ goto try_again;
|
|
+ }
|
|
}
|
|
|
|
/* move first record forward until length fits into the buffer */
|
|
@@ -1449,6 +1527,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|
len -= msg_print_text(msg, true, time, NULL, 0);
|
|
idx = log_next(idx);
|
|
seq++;
|
|
+ num_msg++;
|
|
+ if (num_msg > 5) {
|
|
+ num_msg = 0;
|
|
+ logbuf_unlock_irq();
|
|
+ logbuf_lock_irq();
|
|
+ if (clear_seq < log_first_seq)
|
|
+ goto try_again;
|
|
+ }
|
|
}
|
|
|
|
/* last message fitting into this dump */
|
|
@@ -1481,6 +1567,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
|
clear_seq = log_next_seq;
|
|
clear_idx = log_next_idx;
|
|
}
|
|
+out:
|
|
logbuf_unlock_irq();
|
|
|
|
kfree(text);
|
|
@@ -1615,6 +1702,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
|
|
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Special console_lock variants that help to reduce the risk of soft-lockups.
|
|
* They allow to pass console_lock to another printk() call using a busy wait.
|
|
@@ -1779,6 +1867,17 @@ static int console_trylock_spinning(void)
|
|
return 1;
|
|
}
|
|
|
|
+#else
|
|
+
|
|
+void zap_locks(void) {}
|
|
+
|
|
+static int console_trylock_spinning(void)
|
|
+{
|
|
+ return console_trylock();
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
/*
|
|
* Call the console drivers, asking them to write out
|
|
* log_buf[start] to log_buf[end - 1].
|
|
@@ -1794,6 +1893,12 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
|
|
if (!console_drivers)
|
|
return;
|
|
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
|
|
+ if (in_irq() || in_nmi())
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ migrate_disable();
|
|
for_each_console(con) {
|
|
if (exclusive_console && con != exclusive_console)
|
|
continue;
|
|
@@ -1809,6 +1914,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
|
|
else
|
|
con->write(con, text, len);
|
|
}
|
|
+ migrate_enable();
|
|
}
|
|
|
|
int printk_delay_msec __read_mostly;
|
|
@@ -1978,7 +2084,12 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|
bool in_sched = false, pending_output;
|
|
unsigned long flags;
|
|
u64 curr_log_seq;
|
|
-
|
|
+ /*
|
|
+ * Fall back to early_printk if a debugging subsystem has
|
|
+ * killed printk output
|
|
+ */
|
|
+ if (unlikely(forced_early_printk(fmt, args)))
|
|
+ return 1;
|
|
if (unlikely(suppress_panic_printk) &&
|
|
atomic_read(&panic_cpu) != raw_smp_processor_id())
|
|
return 0;
|
|
@@ -2000,20 +2111,30 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|
|
|
/* If called from the scheduler, we can not call up(). */
|
|
if (!in_sched && pending_output) {
|
|
+ int may_trylock = 1;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * we can't take a sleeping lock with IRQs or preeption disabled
|
|
+ * so we can't print in these contexts
|
|
+ */
|
|
+ if (!(preempt_count() == 0 && !irqs_disabled()))
|
|
+ may_trylock = 0;
|
|
+#endif
|
|
/*
|
|
* Disable preemption to avoid being preempted while holding
|
|
* console_sem which would prevent anyone from printing to
|
|
* console
|
|
*/
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
/*
|
|
* Try to acquire and then immediately release the console
|
|
* semaphore. The release will print out buffers and wake up
|
|
* /dev/kmsg and syslog() users.
|
|
*/
|
|
- if (console_trylock_spinning())
|
|
+ if (may_trylock && console_trylock_spinning())
|
|
console_unlock();
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
}
|
|
|
|
if (pending_output)
|
|
@@ -2128,26 +2249,6 @@ static bool suppress_message_printing(int level) { return false; }
|
|
|
|
#endif /* CONFIG_PRINTK */
|
|
|
|
-#ifdef CONFIG_EARLY_PRINTK
|
|
-struct console *early_console;
|
|
-
|
|
-asmlinkage __visible void early_printk(const char *fmt, ...)
|
|
-{
|
|
- va_list ap;
|
|
- char buf[512];
|
|
- int n;
|
|
-
|
|
- if (!early_console)
|
|
- return;
|
|
-
|
|
- va_start(ap, fmt);
|
|
- n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
- va_end(ap);
|
|
-
|
|
- early_console->write(early_console, buf, n);
|
|
-}
|
|
-#endif
|
|
-
|
|
static int __add_preferred_console(char *name, int idx, char *options,
|
|
char *brl_options)
|
|
{
|
|
@@ -2522,6 +2623,10 @@ void console_unlock(void)
|
|
console_seq++;
|
|
raw_spin_unlock(&logbuf_lock);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ printk_safe_exit_irqrestore(flags);
|
|
+ call_console_drivers(ext_text, ext_len, text, len);
|
|
+#else
|
|
/*
|
|
* While actively printing out messages, if another printk()
|
|
* were to occur on another CPU, it may wait for this one to
|
|
@@ -2540,7 +2645,7 @@ void console_unlock(void)
|
|
}
|
|
|
|
printk_safe_exit_irqrestore(flags);
|
|
-
|
|
+#endif
|
|
/* Allow panic_cpu to take over the consoles safely */
|
|
if (abandon_console_lock_in_panic())
|
|
break;
|
|
@@ -2591,6 +2696,11 @@ void console_unblank(void)
|
|
{
|
|
struct console *c;
|
|
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
|
|
+ if (in_irq() || in_nmi())
|
|
+ return;
|
|
+ }
|
|
+
|
|
/*
|
|
* console_unblank can no longer be called in interrupt context unless
|
|
* oops_in_progress is set to 1..
|
|
diff --git a/kernel/printk/printk.c.rej b/kernel/printk/printk.c.rej
|
|
new file mode 100644
|
|
index 000000000..5b6a53cda
|
|
--- /dev/null
|
|
+++ b/kernel/printk/printk.c.rej
|
|
@@ -0,0 +1,23 @@
|
|
+diff a/kernel/printk/printk.c b/kernel/printk/printk.c (rejected hunks)
|
|
+@@ -1958,6 +2064,13 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|
+ unsigned long flags;
|
|
+ u64 curr_log_seq;
|
|
+
|
|
++ /*
|
|
++ * Fall back to early_printk if a debugging subsystem has
|
|
++ * killed printk output
|
|
++ */
|
|
++ if (unlikely(forced_early_printk(fmt, args)))
|
|
++ return 1;
|
|
++
|
|
+ if (level == LOGLEVEL_SCHED) {
|
|
+ level = LOGLEVEL_DEFAULT;
|
|
+ in_sched = true;
|
|
+@@ -2491,6 +2598,7 @@ void console_unlock(void)
|
|
+ }
|
|
+
|
|
+ printk_safe_exit_irqrestore(flags);
|
|
++#endif
|
|
+
|
|
+ if (do_cond_resched)
|
|
+ cond_resched();
|
|
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
|
|
index 424a04cdd..7378cf3ad 100644
|
|
--- a/kernel/ptrace.c
|
|
+++ b/kernel/ptrace.c
|
|
@@ -190,7 +190,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
|
|
spin_lock_irq(&task->sighand->siglock);
|
|
if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
|
|
!__fatal_signal_pending(task)) {
|
|
- task->state = __TASK_TRACED;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+ if (task->state & __TASK_TRACED)
|
|
+ task->state = __TASK_TRACED;
|
|
+ else
|
|
+ task->saved_state = __TASK_TRACED;
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
ret = true;
|
|
}
|
|
spin_unlock_irq(&task->sighand->siglock);
|
|
@@ -200,8 +207,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
|
|
|
|
static void ptrace_unfreeze_traced(struct task_struct *task)
|
|
{
|
|
- if (task->state != __TASK_TRACED)
|
|
- return;
|
|
+ unsigned long flags;
|
|
+ bool frozen = true;
|
|
|
|
WARN_ON(!task->ptrace || task->parent != current);
|
|
|
|
@@ -210,12 +217,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
|
|
* Recheck state under the lock to close this race.
|
|
*/
|
|
spin_lock_irq(&task->sighand->siglock);
|
|
- if (task->state == __TASK_TRACED) {
|
|
- if (__fatal_signal_pending(task))
|
|
- wake_up_state(task, __TASK_TRACED);
|
|
- else
|
|
- task->state = TASK_TRACED;
|
|
- }
|
|
+
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+ if (task->state == __TASK_TRACED)
|
|
+ task->state = TASK_TRACED;
|
|
+ else if (task->saved_state == __TASK_TRACED)
|
|
+ task->saved_state = TASK_TRACED;
|
|
+ else
|
|
+ frozen = false;
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+
|
|
+ if (frozen && __fatal_signal_pending(task))
|
|
+ wake_up_state(task, __TASK_TRACED);
|
|
+
|
|
spin_unlock_irq(&task->sighand->siglock);
|
|
}
|
|
|
|
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
|
|
index 9210379c0..a243a78ff 100644
|
|
--- a/kernel/rcu/Kconfig
|
|
+++ b/kernel/rcu/Kconfig
|
|
@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF
|
|
|
|
config RCU_FAST_NO_HZ
|
|
bool "Accelerate last non-dyntick-idle CPU's grace periods"
|
|
- depends on NO_HZ_COMMON && SMP && RCU_EXPERT
|
|
+ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
|
|
default n
|
|
help
|
|
This option permits CPUs to enter dynticks-idle state even if
|
|
@@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ
|
|
|
|
config RCU_BOOST
|
|
bool "Enable RCU priority boosting"
|
|
- depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
|
|
- default n
|
|
+ depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT_FULL
|
|
+ default y if PREEMPT_RT_FULL
|
|
help
|
|
This option boosts the priority of preempted RCU readers that
|
|
block the current preemptible RCU grace period for too long.
|
|
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
|
|
index 4d04683c3..808cce9a5 100644
|
|
--- a/kernel/rcu/rcu.h
|
|
+++ b/kernel/rcu/rcu.h
|
|
@@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads(void) { }
|
|
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
|
|
#else /* #ifdef CONFIG_TINY_RCU */
|
|
unsigned long rcu_get_gp_seq(void);
|
|
-unsigned long rcu_bh_get_gp_seq(void);
|
|
unsigned long rcu_sched_get_gp_seq(void);
|
|
unsigned long rcu_exp_batches_completed(void);
|
|
unsigned long rcu_exp_batches_completed_sched(void);
|
|
@@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp);
|
|
void show_rcu_gp_kthreads(void);
|
|
int rcu_get_gp_kthreads_prio(void);
|
|
void rcu_force_quiescent_state(void);
|
|
-void rcu_bh_force_quiescent_state(void);
|
|
void rcu_sched_force_quiescent_state(void);
|
|
extern struct workqueue_struct *rcu_gp_wq;
|
|
extern struct workqueue_struct *rcu_par_gp_wq;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#define rcu_bh_get_gp_seq rcu_get_gp_seq
|
|
+#define rcu_bh_force_quiescent_state rcu_force_quiescent_state
|
|
+#else
|
|
+unsigned long rcu_bh_get_gp_seq(void);
|
|
+void rcu_bh_force_quiescent_state(void);
|
|
+#endif
|
|
+
|
|
#endif /* #else #ifdef CONFIG_TINY_RCU */
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
|
|
index 0b7af7e2b..e95d121ef 100644
|
|
--- a/kernel/rcu/rcutorture.c
|
|
+++ b/kernel/rcu/rcutorture.c
|
|
@@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = {
|
|
.name = "rcu"
|
|
};
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Definitions for rcu_bh torture testing.
|
|
*/
|
|
@@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
|
|
.name = "rcu_bh"
|
|
};
|
|
|
|
+#else
|
|
+static struct rcu_torture_ops rcu_bh_ops = {
|
|
+ .ttype = INVALID_RCU_FLAVOR,
|
|
+};
|
|
+#endif
|
|
+
|
|
/*
|
|
* Don't even think about trying any of these in real life!!!
|
|
* The names includes "busted", and they really means it!
|
|
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
|
|
index a276bbe90..73ec7635b 100644
|
|
--- a/kernel/rcu/srcutree.c
|
|
+++ b/kernel/rcu/srcutree.c
|
|
@@ -38,6 +38,8 @@
|
|
#include <linux/delay.h>
|
|
#include <linux/module.h>
|
|
#include <linux/srcu.h>
|
|
+#include <linux/cpu.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include "rcu.h"
|
|
#include "rcu_segcblist.h"
|
|
@@ -462,21 +464,6 @@ static void srcu_gp_start(struct srcu_struct *sp)
|
|
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
|
|
}
|
|
|
|
-/*
|
|
- * Track online CPUs to guide callback workqueue placement.
|
|
- */
|
|
-DEFINE_PER_CPU(bool, srcu_online);
|
|
-
|
|
-void srcu_online_cpu(unsigned int cpu)
|
|
-{
|
|
- WRITE_ONCE(per_cpu(srcu_online, cpu), true);
|
|
-}
|
|
-
|
|
-void srcu_offline_cpu(unsigned int cpu)
|
|
-{
|
|
- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
|
|
-}
|
|
-
|
|
/*
|
|
* Place the workqueue handler on the specified CPU if online, otherwise
|
|
* just run it whereever. This is useful for placing workqueue handlers
|
|
@@ -488,12 +475,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
{
|
|
bool ret;
|
|
|
|
- preempt_disable();
|
|
- if (READ_ONCE(per_cpu(srcu_online, cpu)))
|
|
+ cpus_read_lock();
|
|
+ if (cpu_online(cpu))
|
|
ret = queue_delayed_work_on(cpu, wq, dwork, delay);
|
|
else
|
|
ret = queue_delayed_work(wq, dwork, delay);
|
|
- preempt_enable();
|
|
+ cpus_read_unlock();
|
|
return ret;
|
|
}
|
|
|
|
@@ -776,6 +763,8 @@ static void srcu_flip(struct srcu_struct *sp)
|
|
* negligible when amoritized over that time period, and the extra latency
|
|
* of a needlessly non-expedited grace period is similarly negligible.
|
|
*/
|
|
+static DEFINE_LOCAL_IRQ_LOCK(sp_llock);
|
|
+
|
|
static bool srcu_might_be_idle(struct srcu_struct *sp)
|
|
{
|
|
unsigned long curseq;
|
|
@@ -785,13 +774,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
|
|
unsigned long tlast;
|
|
|
|
/* If the local srcu_data structure has callbacks, not idle. */
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(sp_llock, flags);
|
|
sdp = this_cpu_ptr(sp->sda);
|
|
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(sp_llock, flags);
|
|
return false; /* Callbacks already present, so not idle. */
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(sp_llock, flags);
|
|
|
|
/*
|
|
* No local callbacks, so probabalistically probe global state.
|
|
@@ -871,7 +860,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
|
|
}
|
|
rhp->func = func;
|
|
idx = srcu_read_lock(sp);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(sp_llock, flags);
|
|
sdp = this_cpu_ptr(sp->sda);
|
|
spin_lock_rcu_node(sdp);
|
|
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
|
|
@@ -887,7 +876,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
|
|
sdp->srcu_gp_seq_needed_exp = s;
|
|
needexp = true;
|
|
}
|
|
- spin_unlock_irqrestore_rcu_node(sdp, flags);
|
|
+ spin_unlock_rcu_node(sdp);
|
|
+ local_unlock_irqrestore(sp_llock, flags);
|
|
if (needgp)
|
|
srcu_funnel_gp_start(sp, sdp, s, do_norm);
|
|
else if (needexp)
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index a0dc5385b..b176b9d03 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -61,6 +61,13 @@
|
|
#include <linux/trace_events.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/ftrace.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/oom.h>
|
|
+#include <linux/smpboot.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/sched/isolation.h>
|
|
+#include "../time/tick-internal.h"
|
|
|
|
#include "tree.h"
|
|
#include "rcu.h"
|
|
@@ -244,6 +251,19 @@ void rcu_sched_qs(void)
|
|
this_cpu_ptr(&rcu_sched_data), true);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void rcu_preempt_qs(void);
|
|
+
|
|
+void rcu_bh_qs(void)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
|
|
+ local_irq_save(flags);
|
|
+ rcu_preempt_qs();
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+#else
|
|
void rcu_bh_qs(void)
|
|
{
|
|
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
|
|
@@ -254,6 +274,7 @@ void rcu_bh_qs(void)
|
|
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
|
|
}
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
* Steal a bit from the bottom of ->dynticks for idle entry/exit
|
|
@@ -569,6 +590,7 @@ unsigned long rcu_sched_get_gp_seq(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Return the number of RCU-bh GPs completed thus far for debug & stats.
|
|
*/
|
|
@@ -577,6 +599,7 @@ unsigned long rcu_bh_get_gp_seq(void)
|
|
return READ_ONCE(rcu_bh_state.gp_seq);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
|
|
+#endif
|
|
|
|
/*
|
|
* Return the number of RCU expedited batches completed thus far for
|
|
@@ -600,6 +623,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Force a quiescent state.
|
|
*/
|
|
@@ -618,6 +642,13 @@ void rcu_bh_force_quiescent_state(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
|
|
|
|
+#else
|
|
+void rcu_force_quiescent_state(void)
|
|
+{
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Force a quiescent state for RCU-sched.
|
|
*/
|
|
@@ -675,9 +706,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
|
|
case RCU_FLAVOR:
|
|
rsp = rcu_state_p;
|
|
break;
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
case RCU_BH_FLAVOR:
|
|
rsp = &rcu_bh_state;
|
|
break;
|
|
+#endif
|
|
case RCU_SCHED_FLAVOR:
|
|
rsp = &rcu_sched_state;
|
|
break;
|
|
@@ -1264,6 +1297,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
|
(rnp->ffmask & rdp->grpmask)) {
|
|
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
|
|
+ rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ;
|
|
rdp->rcu_iw_pending = true;
|
|
rdp->rcu_iw_gp_seq = rnp->gp_seq;
|
|
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
|
|
@@ -2870,18 +2904,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|
/*
|
|
* Do RCU core processing for the current CPU.
|
|
*/
|
|
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
|
+static __latent_entropy void rcu_process_callbacks(void)
|
|
{
|
|
struct rcu_state *rsp;
|
|
|
|
if (cpu_is_offline(smp_processor_id()))
|
|
return;
|
|
- trace_rcu_utilization(TPS("Start RCU core"));
|
|
for_each_rcu_flavor(rsp)
|
|
__rcu_process_callbacks(rsp);
|
|
- trace_rcu_utilization(TPS("End RCU core"));
|
|
}
|
|
|
|
+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
|
/*
|
|
* Schedule RCU callback invocation. If the specified type of RCU
|
|
* does not support RCU priority boosting, just do a direct call,
|
|
@@ -2893,19 +2926,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
{
|
|
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
|
|
return;
|
|
- if (likely(!rsp->boost)) {
|
|
- rcu_do_batch(rsp, rdp);
|
|
- return;
|
|
- }
|
|
- invoke_rcu_callbacks_kthread();
|
|
+ rcu_do_batch(rsp, rdp);
|
|
}
|
|
|
|
+static void rcu_wake_cond(struct task_struct *t, int status)
|
|
+{
|
|
+ /*
|
|
+ * If the thread is yielding, only wake it when this
|
|
+ * is invoked from idle
|
|
+ */
|
|
+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
|
|
+ wake_up_process(t);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
|
|
+ */
|
|
static void invoke_rcu_core(void)
|
|
{
|
|
- if (cpu_online(smp_processor_id()))
|
|
- raise_softirq(RCU_SOFTIRQ);
|
|
+ unsigned long flags;
|
|
+ struct task_struct *t;
|
|
+
|
|
+ if (!cpu_online(smp_processor_id()))
|
|
+ return;
|
|
+ local_irq_save(flags);
|
|
+ __this_cpu_write(rcu_cpu_has_work, 1);
|
|
+ t = __this_cpu_read(rcu_cpu_kthread_task);
|
|
+ if (t != NULL && current != t)
|
|
+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+
|
|
+static void rcu_cpu_kthread_park(unsigned int cpu)
|
|
+{
|
|
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
|
+}
|
|
+
|
|
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
|
+{
|
|
+ return __this_cpu_read(rcu_cpu_has_work);
|
|
}
|
|
|
|
+/*
|
|
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
|
+ * RCU softirq used in flavors and configurations of RCU that do not
|
|
+ * support RCU priority boosting.
|
|
+ */
|
|
+static void rcu_cpu_kthread(unsigned int cpu)
|
|
+{
|
|
+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
|
|
+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
|
|
+ int spincnt;
|
|
+
|
|
+ for (spincnt = 0; spincnt < 10; spincnt++) {
|
|
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
|
+ local_bh_disable();
|
|
+ *statusp = RCU_KTHREAD_RUNNING;
|
|
+ this_cpu_inc(rcu_cpu_kthread_loops);
|
|
+ local_irq_disable();
|
|
+ work = *workp;
|
|
+ *workp = 0;
|
|
+ local_irq_enable();
|
|
+ if (work)
|
|
+ rcu_process_callbacks();
|
|
+ local_bh_enable();
|
|
+ if (*workp == 0) {
|
|
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
|
+ *statusp = RCU_KTHREAD_WAITING;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ *statusp = RCU_KTHREAD_YIELDING;
|
|
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
|
|
+ schedule_timeout_interruptible(2);
|
|
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
|
+ *statusp = RCU_KTHREAD_WAITING;
|
|
+}
|
|
+
|
|
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
|
+ .store = &rcu_cpu_kthread_task,
|
|
+ .thread_should_run = rcu_cpu_kthread_should_run,
|
|
+ .thread_fn = rcu_cpu_kthread,
|
|
+ .thread_comm = "rcuc/%u",
|
|
+ .setup = rcu_cpu_kthread_setup,
|
|
+ .park = rcu_cpu_kthread_park,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Spawn per-CPU RCU core processing kthreads.
|
|
+ */
|
|
+static int __init rcu_spawn_core_kthreads(void)
|
|
+{
|
|
+ int cpu;
|
|
+
|
|
+ for_each_possible_cpu(cpu)
|
|
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
|
|
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(rcu_spawn_core_kthreads);
|
|
+
|
|
/*
|
|
* Handle any core-RCU processing required by a call_rcu() invocation.
|
|
*/
|
|
@@ -3057,6 +3177,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
|
|
* @head: structure to be used for queueing the RCU updates.
|
|
@@ -3084,6 +3205,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
|
|
__call_rcu(head, func, &rcu_bh_state, -1, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
+#endif
|
|
|
|
/*
|
|
* Queue an RCU callback for lazy invocation after a grace period.
|
|
@@ -3169,6 +3291,7 @@ void synchronize_sched(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
|
|
*
|
|
@@ -3195,6 +3318,7 @@ void synchronize_rcu_bh(void)
|
|
wait_rcu_gp(call_rcu_bh);
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
+#endif
|
|
|
|
/**
|
|
* get_state_synchronize_rcu - Snapshot current RCU state
|
|
@@ -3502,6 +3626,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
|
mutex_unlock(&rsp->barrier_mutex);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
|
*/
|
|
@@ -3510,6 +3635,7 @@ void rcu_barrier_bh(void)
|
|
_rcu_barrier(&rcu_bh_state);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
|
+#endif
|
|
|
|
/**
|
|
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
|
@@ -3659,8 +3785,6 @@ int rcutree_online_cpu(unsigned int cpu)
|
|
rnp->ffmask |= rdp->grpmask;
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
}
|
|
- if (IS_ENABLED(CONFIG_TREE_SRCU))
|
|
- srcu_online_cpu(cpu);
|
|
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
|
|
return 0; /* Too early in boot for scheduler work. */
|
|
sync_sched_exp_online_cleanup(cpu);
|
|
@@ -3688,8 +3812,6 @@ int rcutree_offline_cpu(unsigned int cpu)
|
|
}
|
|
|
|
rcutree_affinity_setting(cpu, cpu);
|
|
- if (IS_ENABLED(CONFIG_TREE_SRCU))
|
|
- srcu_offline_cpu(cpu);
|
|
return 0;
|
|
}
|
|
|
|
@@ -4158,12 +4280,13 @@ void __init rcu_init(void)
|
|
|
|
rcu_bootup_announce();
|
|
rcu_init_geometry();
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
rcu_init_one(&rcu_bh_state);
|
|
+#endif
|
|
rcu_init_one(&rcu_sched_state);
|
|
if (dump_tree)
|
|
rcu_dump_rcu_node_tree(&rcu_sched_state);
|
|
__rcu_init_preempt();
|
|
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
|
|
|
/*
|
|
* We don't need protection against CPU-hotplug here because
|
|
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
|
|
index 4e74df768..98257d20f 100644
|
|
--- a/kernel/rcu/tree.h
|
|
+++ b/kernel/rcu/tree.h
|
|
@@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavors;
|
|
*/
|
|
extern struct rcu_state rcu_sched_state;
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
extern struct rcu_state rcu_bh_state;
|
|
+#endif
|
|
|
|
#ifdef CONFIG_PREEMPT_RCU
|
|
extern struct rcu_state rcu_preempt_state;
|
|
@@ -421,12 +423,10 @@ extern struct rcu_state rcu_preempt_state;
|
|
|
|
int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
|
|
|
|
-#ifdef CONFIG_RCU_BOOST
|
|
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
|
DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
|
|
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
|
DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
|
-#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
#ifndef RCU_TREE_NONCORE
|
|
|
|
@@ -449,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
int ncheck);
|
|
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
|
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
|
-static void invoke_rcu_callbacks_kthread(void);
|
|
static bool rcu_is_callbacks_kthread(void);
|
|
+static void rcu_cpu_kthread_setup(unsigned int cpu);
|
|
#ifdef CONFIG_RCU_BOOST
|
|
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|
struct rcu_node *rnp);
|
|
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
|
|
index 7f8cb91aa..59745ada4 100644
|
|
--- a/kernel/rcu/tree_exp.h
|
|
+++ b/kernel/rcu/tree_exp.h
|
|
@@ -470,7 +470,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
|
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
smp_call_func_t func)
|
|
{
|
|
- int cpu;
|
|
struct rcu_node *rnp;
|
|
|
|
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
|
|
@@ -492,13 +491,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
|
continue;
|
|
}
|
|
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
|
|
- preempt_disable();
|
|
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
|
|
- /* If all offline, queue the work on an unbound CPU. */
|
|
- if (unlikely(cpu > rnp->grphi))
|
|
- cpu = WORK_CPU_UNBOUND;
|
|
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
|
|
- preempt_enable();
|
|
+ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
|
|
rnp->exp_need_flush = true;
|
|
}
|
|
|
|
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
|
|
index 568818bef..a9fdfa392 100644
|
|
--- a/kernel/rcu/tree_plugin.h
|
|
+++ b/kernel/rcu/tree_plugin.h
|
|
@@ -24,41 +24,16 @@
|
|
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
*/
|
|
|
|
-#include <linux/delay.h>
|
|
-#include <linux/gfp.h>
|
|
-#include <linux/oom.h>
|
|
-#include <linux/sched/debug.h>
|
|
-#include <linux/smpboot.h>
|
|
-#include <linux/sched/isolation.h>
|
|
-#include <uapi/linux/sched/types.h>
|
|
-#include "../time/tick-internal.h"
|
|
-
|
|
-#ifdef CONFIG_RCU_BOOST
|
|
-
|
|
#include "../locking/rtmutex_common.h"
|
|
|
|
/*
|
|
* Control variables for per-CPU and per-rcu_node kthreads. These
|
|
* handle all flavors of RCU.
|
|
*/
|
|
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
|
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
|
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
|
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
|
|
|
-#else /* #ifdef CONFIG_RCU_BOOST */
|
|
-
|
|
-/*
|
|
- * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
|
|
- * all uses are in dead code. Provide a definition to keep the compiler
|
|
- * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
|
|
- * This probably needs to be excluded from -rt builds.
|
|
- */
|
|
-#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
|
|
-#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
|
|
-
|
|
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
|
-
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
|
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
|
|
@@ -337,9 +312,13 @@ static void rcu_preempt_note_context_switch(bool preempt)
|
|
struct task_struct *t = current;
|
|
struct rcu_data *rdp;
|
|
struct rcu_node *rnp;
|
|
+ int sleeping_l = 0;
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL)
|
|
+ sleeping_l = t->sleeping_lock;
|
|
+#endif
|
|
+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
|
|
if (t->rcu_read_lock_nesting > 0 &&
|
|
!t->rcu_read_unlock_special.b.blocked) {
|
|
|
|
@@ -520,7 +499,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
}
|
|
|
|
/* Hardware IRQ handlers cannot block, complain if they get here. */
|
|
- if (in_irq() || in_serving_softirq()) {
|
|
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
|
|
lockdep_rcu_suspicious(__FILE__, __LINE__,
|
|
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
|
|
pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
|
|
@@ -1023,18 +1002,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
|
|
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
+/*
|
|
+ * If boosting, set rcuc kthreads to realtime priority.
|
|
+ */
|
|
+static void rcu_cpu_kthread_setup(unsigned int cpu)
|
|
+{
|
|
#ifdef CONFIG_RCU_BOOST
|
|
+ struct sched_param sp;
|
|
|
|
-static void rcu_wake_cond(struct task_struct *t, int status)
|
|
-{
|
|
- /*
|
|
- * If the thread is yielding, only wake it when this
|
|
- * is invoked from idle
|
|
- */
|
|
- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
|
|
- wake_up_process(t);
|
|
+ sp.sched_priority = kthread_prio;
|
|
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
}
|
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
+
|
|
/*
|
|
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
|
|
* or ->boost_tasks, advancing the pointer to the next task in the
|
|
@@ -1172,23 +1154,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
|
}
|
|
}
|
|
|
|
-/*
|
|
- * Wake up the per-CPU kthread to invoke RCU callbacks.
|
|
- */
|
|
-static void invoke_rcu_callbacks_kthread(void)
|
|
-{
|
|
- unsigned long flags;
|
|
-
|
|
- local_irq_save(flags);
|
|
- __this_cpu_write(rcu_cpu_has_work, 1);
|
|
- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
|
|
- current != __this_cpu_read(rcu_cpu_kthread_task)) {
|
|
- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
|
|
- __this_cpu_read(rcu_cpu_kthread_status));
|
|
- }
|
|
- local_irq_restore(flags);
|
|
-}
|
|
-
|
|
/*
|
|
* Is the current CPU running the RCU-callbacks kthread?
|
|
* Caller must have preemption disabled.
|
|
@@ -1243,67 +1208,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|
return 0;
|
|
}
|
|
|
|
-static void rcu_kthread_do_work(void)
|
|
-{
|
|
- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
|
|
- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
|
|
- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
|
|
-}
|
|
-
|
|
-static void rcu_cpu_kthread_setup(unsigned int cpu)
|
|
-{
|
|
- struct sched_param sp;
|
|
-
|
|
- sp.sched_priority = kthread_prio;
|
|
- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
|
-}
|
|
-
|
|
-static void rcu_cpu_kthread_park(unsigned int cpu)
|
|
-{
|
|
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
|
-}
|
|
-
|
|
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
|
-{
|
|
- return __this_cpu_read(rcu_cpu_has_work);
|
|
-}
|
|
-
|
|
-/*
|
|
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
|
- * RCU softirq used in flavors and configurations of RCU that do not
|
|
- * support RCU priority boosting.
|
|
- */
|
|
-static void rcu_cpu_kthread(unsigned int cpu)
|
|
-{
|
|
- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
|
|
- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
|
|
- int spincnt;
|
|
-
|
|
- for (spincnt = 0; spincnt < 10; spincnt++) {
|
|
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
|
- local_bh_disable();
|
|
- *statusp = RCU_KTHREAD_RUNNING;
|
|
- this_cpu_inc(rcu_cpu_kthread_loops);
|
|
- local_irq_disable();
|
|
- work = *workp;
|
|
- *workp = 0;
|
|
- local_irq_enable();
|
|
- if (work)
|
|
- rcu_kthread_do_work();
|
|
- local_bh_enable();
|
|
- if (*workp == 0) {
|
|
- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
|
- *statusp = RCU_KTHREAD_WAITING;
|
|
- return;
|
|
- }
|
|
- }
|
|
- *statusp = RCU_KTHREAD_YIELDING;
|
|
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
|
|
- schedule_timeout_interruptible(2);
|
|
- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
|
- *statusp = RCU_KTHREAD_WAITING;
|
|
-}
|
|
-
|
|
/*
|
|
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
|
|
* served by the rcu_node in question. The CPU hotplug lock is still
|
|
@@ -1334,26 +1238,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
|
free_cpumask_var(cm);
|
|
}
|
|
|
|
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
|
- .store = &rcu_cpu_kthread_task,
|
|
- .thread_should_run = rcu_cpu_kthread_should_run,
|
|
- .thread_fn = rcu_cpu_kthread,
|
|
- .thread_comm = "rcuc/%u",
|
|
- .setup = rcu_cpu_kthread_setup,
|
|
- .park = rcu_cpu_kthread_park,
|
|
-};
|
|
-
|
|
/*
|
|
* Spawn boost kthreads -- called as soon as the scheduler is running.
|
|
*/
|
|
static void __init rcu_spawn_boost_kthreads(void)
|
|
{
|
|
struct rcu_node *rnp;
|
|
- int cpu;
|
|
-
|
|
- for_each_possible_cpu(cpu)
|
|
- per_cpu(rcu_cpu_has_work, cpu) = 0;
|
|
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
|
rcu_for_each_leaf_node(rcu_state_p, rnp)
|
|
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
|
|
}
|
|
@@ -1376,11 +1266,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
}
|
|
|
|
-static void invoke_rcu_callbacks_kthread(void)
|
|
-{
|
|
- WARN_ON_ONCE(1);
|
|
-}
|
|
-
|
|
static bool rcu_is_callbacks_kthread(void)
|
|
{
|
|
return false;
|
|
@@ -1404,7 +1289,7 @@ static void rcu_prepare_kthreads(int cpu)
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
|
|
|
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
|
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
|
|
/*
|
|
* Check to see if any future RCU-related work will need to be done
|
|
@@ -1420,7 +1305,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
|
*nextevt = KTIME_MAX;
|
|
return rcu_cpu_has_callbacks(NULL);
|
|
}
|
|
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
|
|
|
|
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
|
/*
|
|
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
|
|
* after it.
|
|
@@ -1517,6 +1404,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
|
return cbs_ready;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
/*
|
|
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
|
|
* to invoke. If the CPU has callbacks, try to advance them. Tell the
|
|
@@ -1559,6 +1448,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
|
*nextevt = basemono + dj * TICK_NSEC;
|
|
return 0;
|
|
}
|
|
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
|
|
|
|
/*
|
|
* Prepare a CPU for idle from an RCU perspective. The first major task
|
|
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
|
|
index 759ea6881..b5ba34afd 100644
|
|
--- a/kernel/rcu/update.c
|
|
+++ b/kernel/rcu/update.c
|
|
@@ -68,8 +68,10 @@ extern int rcu_expedited; /* from sysctl */
|
|
module_param(rcu_expedited, int, 0);
|
|
extern int rcu_normal; /* from sysctl */
|
|
module_param(rcu_normal, int, 0);
|
|
-static int rcu_normal_after_boot;
|
|
+static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
module_param(rcu_normal_after_boot, int, 0);
|
|
+#endif
|
|
#endif /* #ifndef CONFIG_TINY_RCU */
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -288,6 +290,7 @@ int rcu_read_lock_held(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
|
|
*
|
|
@@ -314,6 +317,7 @@ int rcu_read_lock_bh_held(void)
|
|
return in_softirq() || irqs_disabled();
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
|
|
+#endif
|
|
|
|
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
|
|
index 0612af002..9150fc850 100644
|
|
--- a/kernel/sched/Makefile
|
|
+++ b/kernel/sched/Makefile
|
|
@@ -18,7 +18,7 @@ endif
|
|
|
|
obj-y += core.o loadavg.o clock.o cputime.o
|
|
obj-y += idle.o fair.o rt.o deadline.o
|
|
-obj-y += wait.o wait_bit.o swait.o completion.o
|
|
+obj-y += wait.o wait_bit.o swait.o swork.o completion.o
|
|
|
|
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
|
|
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
|
|
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
|
|
index a1ad5b7d5..49c141379 100644
|
|
--- a/kernel/sched/completion.c
|
|
+++ b/kernel/sched/completion.c
|
|
@@ -29,12 +29,12 @@ void complete(struct completion *x)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&x->wait.lock, flags);
|
|
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
|
|
|
if (x->done != UINT_MAX)
|
|
x->done++;
|
|
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
|
|
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
+ swake_up_locked(&x->wait);
|
|
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
}
|
|
EXPORT_SYMBOL(complete);
|
|
|
|
@@ -58,10 +58,10 @@ void complete_all(struct completion *x)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&x->wait.lock, flags);
|
|
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
|
x->done = UINT_MAX;
|
|
- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
|
|
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
+ swake_up_all_locked(&x->wait);
|
|
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
}
|
|
EXPORT_SYMBOL(complete_all);
|
|
|
|
@@ -70,20 +70,20 @@ do_wait_for_common(struct completion *x,
|
|
long (*action)(long), long timeout, int state)
|
|
{
|
|
if (!x->done) {
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
+ DECLARE_SWAITQUEUE(wait);
|
|
|
|
- __add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
|
|
do {
|
|
if (signal_pending_state(state, current)) {
|
|
timeout = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
+ __prepare_to_swait(&x->wait, &wait);
|
|
__set_current_state(state);
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
+ raw_spin_unlock_irq(&x->wait.lock);
|
|
timeout = action(timeout);
|
|
- spin_lock_irq(&x->wait.lock);
|
|
+ raw_spin_lock_irq(&x->wait.lock);
|
|
} while (!x->done && timeout);
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
+ __finish_swait(&x->wait, &wait);
|
|
if (!x->done)
|
|
return timeout;
|
|
}
|
|
@@ -100,9 +100,9 @@ __wait_for_common(struct completion *x,
|
|
|
|
complete_acquire(x);
|
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
+ raw_spin_lock_irq(&x->wait.lock);
|
|
timeout = do_wait_for_common(x, action, timeout, state);
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
+ raw_spin_unlock_irq(&x->wait.lock);
|
|
|
|
complete_release(x);
|
|
|
|
@@ -291,12 +291,12 @@ bool try_wait_for_completion(struct completion *x)
|
|
if (!READ_ONCE(x->done))
|
|
return false;
|
|
|
|
- spin_lock_irqsave(&x->wait.lock, flags);
|
|
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
|
if (!x->done)
|
|
ret = false;
|
|
else if (x->done != UINT_MAX)
|
|
x->done--;
|
|
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(try_wait_for_completion);
|
|
@@ -322,8 +322,8 @@ bool completion_done(struct completion *x)
|
|
* otherwise we can end up freeing the completion before complete()
|
|
* is done referencing it.
|
|
*/
|
|
- spin_lock_irqsave(&x->wait.lock, flags);
|
|
- spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(completion_done);
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 7825ceaae..3ae20bb64 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -45,7 +45,11 @@ const_debug unsigned int sysctl_sched_features =
|
|
* Number of tasks to iterate in a single balance run.
|
|
* Limited because this is done with IRQs disabled.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+const_debug unsigned int sysctl_sched_nr_migrate = 8;
|
|
+#else
|
|
const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
+#endif
|
|
|
|
/*
|
|
* period over which we measure -rt task CPU usage in us.
|
|
@@ -329,7 +333,7 @@ static void hrtick_rq_init(struct rq *rq)
|
|
rq->hrtick_csd.info = rq;
|
|
#endif
|
|
|
|
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
rq->hrtick_timer.function = hrtick;
|
|
}
|
|
#else /* CONFIG_SCHED_HRTICK */
|
|
@@ -423,9 +427,16 @@ static bool set_nr_if_polling(struct task_struct *p)
|
|
* This function must be used as-if it were wake_up_process(); IOW the task
|
|
* must be ready to be woken at this location.
|
|
*/
|
|
-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|
+void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
|
|
+ bool sleeper)
|
|
+
|
|
{
|
|
- struct wake_q_node *node = &task->wake_q;
|
|
+ struct wake_q_node *node;
|
|
+
|
|
+ if (sleeper)
|
|
+ node = &task->wake_q_sleeper;
|
|
+ else
|
|
+ node = &task->wake_q;
|
|
|
|
/*
|
|
* Atomically grab the task, if ->wake_q is !nil already it means
|
|
@@ -448,24 +459,32 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|
head->lastp = &node->next;
|
|
}
|
|
|
|
-void wake_up_q(struct wake_q_head *head)
|
|
+void __wake_up_q(struct wake_q_head *head, bool sleeper)
|
|
{
|
|
struct wake_q_node *node = head->first;
|
|
|
|
while (node != WAKE_Q_TAIL) {
|
|
struct task_struct *task;
|
|
|
|
- task = container_of(node, struct task_struct, wake_q);
|
|
+ if (sleeper)
|
|
+ task = container_of(node, struct task_struct, wake_q_sleeper);
|
|
+ else
|
|
+ task = container_of(node, struct task_struct, wake_q);
|
|
BUG_ON(!task);
|
|
/* Task can safely be re-inserted now: */
|
|
node = node->next;
|
|
- task->wake_q.next = NULL;
|
|
-
|
|
+ if (sleeper)
|
|
+ task->wake_q_sleeper.next = NULL;
|
|
+ else
|
|
+ task->wake_q.next = NULL;
|
|
/*
|
|
* wake_up_process() executes a full barrier, which pairs with
|
|
* the queueing in wake_q_add() so as not to miss wakeups.
|
|
*/
|
|
- wake_up_process(task);
|
|
+ if (sleeper)
|
|
+ wake_up_lock_sleeper(task);
|
|
+ else
|
|
+ wake_up_process(task);
|
|
put_task_struct(task);
|
|
}
|
|
}
|
|
@@ -501,6 +520,48 @@ void resched_curr(struct rq *rq)
|
|
trace_sched_wake_idle_without_ipi(cpu);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+
|
|
+static int tsk_is_polling(struct task_struct *p)
|
|
+{
|
|
+#ifdef TIF_POLLING_NRFLAG
|
|
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
|
+#else
|
|
+ return 0;
|
|
+#endif
|
|
+}
|
|
+
|
|
+void resched_curr_lazy(struct rq *rq)
|
|
+{
|
|
+ struct task_struct *curr = rq->curr;
|
|
+ int cpu;
|
|
+
|
|
+ if (!sched_feat(PREEMPT_LAZY)) {
|
|
+ resched_curr(rq);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ lockdep_assert_held(&rq->lock);
|
|
+
|
|
+ if (test_tsk_need_resched(curr))
|
|
+ return;
|
|
+
|
|
+ if (test_tsk_need_resched_lazy(curr))
|
|
+ return;
|
|
+
|
|
+ set_tsk_need_resched_lazy(curr);
|
|
+
|
|
+ cpu = cpu_of(rq);
|
|
+ if (cpu == smp_processor_id())
|
|
+ return;
|
|
+
|
|
+ /* NEED_RESCHED_LAZY must be visible before we test polling */
|
|
+ smp_mb();
|
|
+ if (!tsk_is_polling(curr))
|
|
+ smp_send_reschedule(cpu);
|
|
+}
|
|
+#endif
|
|
+
|
|
void resched_cpu(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
@@ -907,10 +968,10 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
|
|
*/
|
|
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
|
|
{
|
|
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
return false;
|
|
|
|
- if (is_per_cpu_kthread(p))
|
|
+ if (is_per_cpu_kthread(p) || __migrate_disabled(p))
|
|
return cpu_online(cpu);
|
|
|
|
return cpu_active(cpu);
|
|
@@ -959,6 +1020,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
|
|
struct migration_arg {
|
|
struct task_struct *task;
|
|
int dest_cpu;
|
|
+ bool done;
|
|
};
|
|
|
|
/*
|
|
@@ -994,6 +1056,11 @@ static int migration_cpu_stop(void *data)
|
|
struct task_struct *p = arg->task;
|
|
struct rq *rq = this_rq();
|
|
struct rq_flags rf;
|
|
+ int dest_cpu = arg->dest_cpu;
|
|
+
|
|
+ /* We don't look at arg after this point. */
|
|
+ smp_mb();
|
|
+ arg->done = true;
|
|
|
|
/*
|
|
* The original target CPU might have gone down and we might
|
|
@@ -1002,7 +1069,7 @@ static int migration_cpu_stop(void *data)
|
|
local_irq_disable();
|
|
/*
|
|
* We need to explicitly wake pending tasks before running
|
|
- * __migrate_task() such that we will not miss enforcing cpus_allowed
|
|
+ * __migrate_task() such that we will not miss enforcing cpus_ptr
|
|
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
|
|
*/
|
|
sched_ttwu_pending();
|
|
@@ -1016,9 +1083,9 @@ static int migration_cpu_stop(void *data)
|
|
*/
|
|
if (task_rq(p) == rq) {
|
|
if (task_on_rq_queued(p))
|
|
- rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
|
|
+ rq = __migrate_task(rq, &rf, p, dest_cpu);
|
|
else
|
|
- p->wake_cpu = arg->dest_cpu;
|
|
+ p->wake_cpu = dest_cpu;
|
|
}
|
|
rq_unlock(rq, &rf);
|
|
raw_spin_unlock(&p->pi_lock);
|
|
@@ -1033,9 +1100,18 @@ static int migration_cpu_stop(void *data)
|
|
*/
|
|
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
|
|
{
|
|
- cpumask_copy(&p->cpus_allowed, new_mask);
|
|
- p->nr_cpus_allowed = cpumask_weight(new_mask);
|
|
+ cpumask_copy(&p->cpus_mask, new_mask);
|
|
+ if (p->cpus_ptr == &p->cpus_mask)
|
|
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+int __migrate_disabled(struct task_struct *p)
|
|
+{
|
|
+ return p->migrate_disable;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(__migrate_disabled);
|
|
+#endif
|
|
|
|
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
{
|
|
@@ -1103,7 +1179,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
goto out;
|
|
}
|
|
|
|
- if (cpumask_equal(&p->cpus_allowed, new_mask))
|
|
+ if (cpumask_equal(&p->cpus_mask, new_mask))
|
|
goto out;
|
|
|
|
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
|
|
@@ -1125,7 +1201,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
}
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
- if (cpumask_test_cpu(task_cpu(p), new_mask))
|
|
+ if (cpumask_test_cpu(task_cpu(p), new_mask) ||
|
|
+ p->cpus_ptr != &p->cpus_mask)
|
|
goto out;
|
|
|
|
if (task_running(rq, p) || p->state == TASK_WAKING) {
|
|
@@ -1266,10 +1343,10 @@ static int migrate_swap_stop(void *data)
|
|
if (task_cpu(arg->src_task) != arg->src_cpu)
|
|
goto unlock;
|
|
|
|
- if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
|
|
goto unlock;
|
|
|
|
- if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
|
|
goto unlock;
|
|
|
|
__migrate_swap_task(arg->src_task, arg->dst_cpu);
|
|
@@ -1311,10 +1388,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
|
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
|
|
goto out;
|
|
|
|
- if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
|
|
goto out;
|
|
|
|
- if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
|
|
goto out;
|
|
|
|
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
|
|
@@ -1325,6 +1402,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
+static bool check_task_state(struct task_struct *p, long match_state)
|
|
+{
|
|
+ bool match = false;
|
|
+
|
|
+ raw_spin_lock_irq(&p->pi_lock);
|
|
+ if (p->state == match_state || p->saved_state == match_state)
|
|
+ match = true;
|
|
+ raw_spin_unlock_irq(&p->pi_lock);
|
|
+
|
|
+ return match;
|
|
+}
|
|
+
|
|
/*
|
|
* wait_task_inactive - wait for a thread to unschedule.
|
|
*
|
|
@@ -1369,7 +1458,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
* is actually now running somewhere else!
|
|
*/
|
|
while (task_running(rq, p)) {
|
|
- if (match_state && unlikely(p->state != match_state))
|
|
+ if (match_state && !check_task_state(p, match_state))
|
|
return 0;
|
|
cpu_relax();
|
|
}
|
|
@@ -1384,7 +1473,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
running = task_running(rq, p);
|
|
queued = task_on_rq_queued(p);
|
|
ncsw = 0;
|
|
- if (!match_state || p->state == match_state)
|
|
+ if (!match_state || p->state == match_state ||
|
|
+ p->saved_state == match_state)
|
|
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
@@ -1459,7 +1549,7 @@ void kick_process(struct task_struct *p)
|
|
EXPORT_SYMBOL_GPL(kick_process);
|
|
|
|
/*
|
|
- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
|
|
+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
|
|
*
|
|
* A few notes on cpu_active vs cpu_online:
|
|
*
|
|
@@ -1499,14 +1589,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
for_each_cpu(dest_cpu, nodemask) {
|
|
if (!cpu_active(dest_cpu))
|
|
continue;
|
|
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
|
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
|
|
return dest_cpu;
|
|
}
|
|
}
|
|
|
|
for (;;) {
|
|
/* Any allowed, online CPU? */
|
|
- for_each_cpu(dest_cpu, &p->cpus_allowed) {
|
|
+ for_each_cpu(dest_cpu, p->cpus_ptr) {
|
|
if (!is_cpu_allowed(p, dest_cpu))
|
|
continue;
|
|
|
|
@@ -1550,7 +1640,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
}
|
|
|
|
/*
|
|
- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
|
|
+ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
|
|
*/
|
|
static inline
|
|
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
|
@@ -1560,11 +1650,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
|
if (p->nr_cpus_allowed > 1)
|
|
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
|
else
|
|
- cpu = cpumask_any(&p->cpus_allowed);
|
|
+ cpu = cpumask_any(p->cpus_ptr);
|
|
|
|
/*
|
|
* In order not to call set_task_cpu() on a blocking task we need
|
|
- * to rely on ttwu() to place the task on a valid ->cpus_allowed
|
|
+ * to rely on ttwu() to place the task on a valid ->cpus_ptr
|
|
* CPU.
|
|
*
|
|
* Since this is common to all placement strategies, this lives here.
|
|
@@ -1988,8 +2078,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
*/
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
smp_mb__after_spinlock();
|
|
- if (!(p->state & state))
|
|
+ if (!(p->state & state)) {
|
|
+ /*
|
|
+ * The task might be running due to a spinlock sleeper
|
|
+ * wakeup. Check the saved state and set it to running
|
|
+ * if the wakeup condition is true.
|
|
+ */
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
|
|
+ if (p->saved_state & state) {
|
|
+ p->saved_state = TASK_RUNNING;
|
|
+ success = 1;
|
|
+ }
|
|
+ }
|
|
goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If this is a regular wakeup, then we can unconditionally
|
|
+ * clear the saved state of a "lock sleeper".
|
|
+ */
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER))
|
|
+ p->saved_state = TASK_RUNNING;
|
|
|
|
trace_sched_waking(p);
|
|
|
|
@@ -2103,6 +2212,18 @@ int wake_up_process(struct task_struct *p)
|
|
}
|
|
EXPORT_SYMBOL(wake_up_process);
|
|
|
|
+/**
|
|
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
|
|
+ * @p: The process to be woken up.
|
|
+ *
|
|
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
|
|
+ * the nature of the wakeup.
|
|
+ */
|
|
+int wake_up_lock_sleeper(struct task_struct *p)
|
|
+{
|
|
+ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
|
|
+}
|
|
+
|
|
int wake_up_state(struct task_struct *p, unsigned int state)
|
|
{
|
|
return try_to_wake_up(p, state, 0);
|
|
@@ -2372,6 +2493,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|
p->on_cpu = 0;
|
|
#endif
|
|
init_task_preempt_count(p);
|
|
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
|
|
+ task_thread_info(p)->preempt_lazy_count = 0;
|
|
+#endif
|
|
#ifdef CONFIG_SMP
|
|
plist_node_init(&p->pushable_tasks, MAX_PRIO);
|
|
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
|
@@ -2412,7 +2536,7 @@ void wake_up_new_task(struct task_struct *p)
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* Fork balancing, do it here and not earlier because:
|
|
- * - cpus_allowed can change in the fork path
|
|
+ * - cpus_ptr can change in the fork path
|
|
* - any previously selected CPU might disappear through hotplug
|
|
*
|
|
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
|
|
@@ -2701,23 +2825,18 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
|
* provided by mmdrop(),
|
|
* - a sync_core for SYNC_CORE.
|
|
*/
|
|
+ /*
|
|
+ * We use mmdrop_delayed() here so we don't have to do the
|
|
+ * full __mmdrop() when we are the last user.
|
|
+ */
|
|
if (mm) {
|
|
membarrier_mm_sync_core_before_usermode(mm);
|
|
- mmdrop(mm);
|
|
+ mmdrop_delayed(mm);
|
|
}
|
|
if (unlikely(prev_state == TASK_DEAD)) {
|
|
if (prev->sched_class->task_dead)
|
|
prev->sched_class->task_dead(prev);
|
|
|
|
- /*
|
|
- * Remove function-return probe instances associated with this
|
|
- * task and put them back on the free list.
|
|
- */
|
|
- kprobe_flush_task(prev);
|
|
-
|
|
- /* Task is done with its stack. */
|
|
- put_task_stack(prev);
|
|
-
|
|
put_task_struct_rcu_user(prev);
|
|
}
|
|
|
|
@@ -3405,6 +3524,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
BUG();
|
|
}
|
|
|
|
+static void migrate_disabled_sched(struct task_struct *p);
|
|
+
|
|
/*
|
|
* __schedule() is the main scheduler function.
|
|
*
|
|
@@ -3475,6 +3596,9 @@ static void __sched notrace __schedule(bool preempt)
|
|
rq_lock(rq, &rf);
|
|
smp_mb__after_spinlock();
|
|
|
|
+ if (__migrate_disabled(prev))
|
|
+ migrate_disabled_sched(prev);
|
|
+
|
|
/* Promote REQ to ACT */
|
|
rq->clock_update_flags <<= 1;
|
|
update_rq_clock(rq);
|
|
@@ -3497,6 +3621,7 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
|
next = pick_next_task(rq, prev, &rf);
|
|
clear_tsk_need_resched(prev);
|
|
+ clear_tsk_need_resched_lazy(prev);
|
|
clear_preempt_need_resched();
|
|
|
|
if (likely(prev != next)) {
|
|
@@ -3552,7 +3677,7 @@ void __noreturn do_task_dead(void)
|
|
|
|
static inline void sched_submit_work(struct task_struct *tsk)
|
|
{
|
|
- if (!tsk->state || tsk_is_pi_blocked(tsk))
|
|
+ if (!tsk->state)
|
|
return;
|
|
|
|
/*
|
|
@@ -3571,6 +3696,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
|
preempt_enable_no_resched();
|
|
}
|
|
|
|
+ if (tsk_is_pi_blocked(tsk))
|
|
+ return;
|
|
+
|
|
/*
|
|
* If we are going to sleep and we have plugged IO queued,
|
|
* make sure to submit it to avoid deadlocks.
|
|
@@ -3688,6 +3816,30 @@ static void __sched notrace preempt_schedule_common(void)
|
|
} while (need_resched());
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+/*
|
|
+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
|
|
+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
|
|
+ * preempt_lazy_count counter >0.
|
|
+ */
|
|
+static __always_inline int preemptible_lazy(void)
|
|
+{
|
|
+ if (test_thread_flag(TIF_NEED_RESCHED))
|
|
+ return 1;
|
|
+ if (current_thread_info()->preempt_lazy_count)
|
|
+ return 0;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline int preemptible_lazy(void)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PREEMPT
|
|
/*
|
|
* this is the entry point to schedule() from in-kernel preemption
|
|
@@ -3702,7 +3854,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
|
|
*/
|
|
if (likely(!preemptible()))
|
|
return;
|
|
-
|
|
+ if (!preemptible_lazy())
|
|
+ return;
|
|
preempt_schedule_common();
|
|
}
|
|
NOKPROBE_SYMBOL(preempt_schedule);
|
|
@@ -3729,6 +3882,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|
if (likely(!preemptible()))
|
|
return;
|
|
|
|
+ if (!preemptible_lazy())
|
|
+ return;
|
|
+
|
|
do {
|
|
/*
|
|
* Because the function tracer can trace preempt_count_sub()
|
|
@@ -4370,7 +4526,7 @@ static int __sched_setscheduler(struct task_struct *p,
|
|
* the entire root_domain to become SCHED_DEADLINE. We
|
|
* will also fail if there's no bandwidth available.
|
|
*/
|
|
- if (!cpumask_subset(span, &p->cpus_allowed) ||
|
|
+ if (!cpumask_subset(span, p->cpus_ptr) ||
|
|
rq->rd->dl_bw.bw == 0) {
|
|
task_rq_unlock(rq, p, &rf);
|
|
return -EPERM;
|
|
@@ -4969,7 +5125,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
goto out_unlock;
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
- cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
|
|
+ cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
|
out_unlock:
|
|
@@ -5508,7 +5664,9 @@ void init_idle(struct task_struct *idle, int cpu)
|
|
|
|
/* Set the preempt count _outside_ the spinlocks! */
|
|
init_idle_preempt_count(idle, cpu);
|
|
-
|
|
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
|
|
+ task_thread_info(idle)->preempt_lazy_count = 0;
|
|
+#endif
|
|
/*
|
|
* The idle tasks have their own, simple scheduling class:
|
|
*/
|
|
@@ -5547,7 +5705,7 @@ int task_can_attach(struct task_struct *p,
|
|
* allowed nodes is unnecessary. Thus, cpusets are not
|
|
* applicable for such threads. This prevents checking for
|
|
* success of set_cpus_allowed_ptr() on all attached tasks
|
|
- * before cpus_allowed may be changed.
|
|
+ * before cpus_mask may be changed.
|
|
*/
|
|
if (p->flags & PF_NO_SETAFFINITY) {
|
|
ret = -EINVAL;
|
|
@@ -5574,7 +5732,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
|
|
if (curr_cpu == target_cpu)
|
|
return 0;
|
|
|
|
- if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
|
|
return -EINVAL;
|
|
|
|
/* TODO: This is not properly updating schedstats */
|
|
@@ -5613,6 +5771,7 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
+
|
|
/*
|
|
* Ensure that the idle task is using init_mm right before its CPU goes
|
|
* offline.
|
|
@@ -5712,8 +5871,10 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
|
|
BUG_ON(!next);
|
|
put_prev_task(rq, next);
|
|
|
|
+ WARN_ON_ONCE(__migrate_disabled(next));
|
|
+
|
|
/*
|
|
- * Rules for changing task_struct::cpus_allowed are holding
|
|
+ * Rules for changing task_struct::cpus_mask are holding
|
|
* both pi_lock and rq->lock, such that holding either
|
|
* stabilizes the mask.
|
|
*
|
|
@@ -6200,7 +6361,7 @@ void __init sched_init(void)
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
static inline int preempt_count_equals(int preempt_offset)
|
|
{
|
|
- int nested = preempt_count() + rcu_preempt_depth();
|
|
+ int nested = preempt_count() + sched_rcu_preempt_depth();
|
|
|
|
return (nested == preempt_offset);
|
|
}
|
|
@@ -7433,7 +7594,7 @@ static int __set_prefer_cpus_ptr(struct task_struct *p,
|
|
if (cpumask_equal(p->prefer_cpus, new_mask))
|
|
goto out;
|
|
|
|
- if (!cpumask_subset(new_mask, &p->cpus_allowed)) {
|
|
+ if (!cpumask_subset(new_mask,p->cpus_ptr)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
@@ -7569,3 +7730,171 @@ const u32 sched_prio_to_wmult[40] = {
|
|
};
|
|
|
|
#undef CREATE_TRACE_POINTS
|
|
+
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+
|
|
+static inline void
|
|
+update_nr_migratory(struct task_struct *p, long delta)
|
|
+{
|
|
+ if (unlikely((p->sched_class == &rt_sched_class ||
|
|
+ p->sched_class == &dl_sched_class) &&
|
|
+ p->nr_cpus_allowed > 1)) {
|
|
+ if (p->sched_class == &rt_sched_class)
|
|
+ task_rq(p)->rt.rt_nr_migratory += delta;
|
|
+ else
|
|
+ task_rq(p)->dl.dl_nr_migratory += delta;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void
|
|
+migrate_disable_update_cpus_allowed(struct task_struct *p)
|
|
+{
|
|
+ p->cpus_ptr = cpumask_of(smp_processor_id());
|
|
+ update_nr_migratory(p, -1);
|
|
+ p->nr_cpus_allowed = 1;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+migrate_enable_update_cpus_allowed(struct task_struct *p)
|
|
+{
|
|
+ struct rq *rq;
|
|
+ struct rq_flags rf;
|
|
+
|
|
+ rq = task_rq_lock(p, &rf);
|
|
+ p->cpus_ptr = &p->cpus_mask;
|
|
+ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
|
|
+ update_nr_migratory(p, 1);
|
|
+ task_rq_unlock(rq, p, &rf);
|
|
+}
|
|
+
|
|
+void migrate_disable(void)
|
|
+{
|
|
+ preempt_disable();
|
|
+
|
|
+ if (++current->migrate_disable == 1) {
|
|
+ this_rq()->nr_pinned++;
|
|
+ preempt_lazy_disable();
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ WARN_ON_ONCE(current->pinned_on_cpu >= 0);
|
|
+ current->pinned_on_cpu = smp_processor_id();
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ preempt_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_disable);
|
|
+
|
|
+static void migrate_disabled_sched(struct task_struct *p)
|
|
+{
|
|
+ if (p->migrate_disable_scheduled)
|
|
+ return;
|
|
+
|
|
+ migrate_disable_update_cpus_allowed(p);
|
|
+ p->migrate_disable_scheduled = 1;
|
|
+}
|
|
+
|
|
+static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work);
|
|
+static DEFINE_PER_CPU(struct migration_arg, migrate_arg);
|
|
+
|
|
+void migrate_enable(void)
|
|
+{
|
|
+ struct task_struct *p = current;
|
|
+ struct rq *rq = this_rq();
|
|
+ int cpu = task_cpu(p);
|
|
+
|
|
+ WARN_ON_ONCE(p->migrate_disable <= 0);
|
|
+ if (p->migrate_disable > 1) {
|
|
+ p->migrate_disable--;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ preempt_disable();
|
|
+
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ WARN_ON_ONCE(current->pinned_on_cpu != cpu);
|
|
+ current->pinned_on_cpu = -1;
|
|
+#endif
|
|
+
|
|
+ WARN_ON_ONCE(rq->nr_pinned < 1);
|
|
+
|
|
+ p->migrate_disable = 0;
|
|
+ rq->nr_pinned--;
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
+ if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) &&
|
|
+ takedown_cpu_task)
|
|
+ wake_up_process(takedown_cpu_task);
|
|
+#endif
|
|
+
|
|
+ if (!p->migrate_disable_scheduled)
|
|
+ goto out;
|
|
+
|
|
+ p->migrate_disable_scheduled = 0;
|
|
+
|
|
+ migrate_enable_update_cpus_allowed(p);
|
|
+
|
|
+ WARN_ON(smp_processor_id() != cpu);
|
|
+ if (!is_cpu_allowed(p, cpu)) {
|
|
+ struct migration_arg __percpu *arg;
|
|
+ struct cpu_stop_work __percpu *work;
|
|
+ struct rq_flags rf;
|
|
+
|
|
+ work = this_cpu_ptr(&migrate_work);
|
|
+ arg = this_cpu_ptr(&migrate_arg);
|
|
+ WARN_ON_ONCE(!arg->done && !work->disabled && work->arg);
|
|
+
|
|
+ arg->task = p;
|
|
+ arg->done = false;
|
|
+
|
|
+ rq = task_rq_lock(p, &rf);
|
|
+ update_rq_clock(rq);
|
|
+ arg->dest_cpu = select_fallback_rq(cpu, p);
|
|
+ task_rq_unlock(rq, p, &rf);
|
|
+
|
|
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
|
+ arg, work);
|
|
+ tlb_migrate_finish(p->mm);
|
|
+ }
|
|
+
|
|
+out:
|
|
+ preempt_lazy_enable();
|
|
+ preempt_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_enable);
|
|
+
|
|
+int cpu_nr_pinned(int cpu)
|
|
+{
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
+
|
|
+ return rq->nr_pinned;
|
|
+}
|
|
+
|
|
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+static void migrate_disabled_sched(struct task_struct *p)
|
|
+{
|
|
+}
|
|
+
|
|
+void migrate_disable(void)
|
|
+{
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ current->migrate_disable++;
|
|
+#endif
|
|
+ barrier();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_disable);
|
|
+
|
|
+void migrate_enable(void)
|
|
+{
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ struct task_struct *p = current;
|
|
+
|
|
+ WARN_ON_ONCE(p->migrate_disable <= 0);
|
|
+ p->migrate_disable--;
|
|
+#endif
|
|
+ barrier();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_enable);
|
|
+#else
|
|
+static void migrate_disabled_sched(struct task_struct *p)
|
|
+{
|
|
+}
|
|
+#endif
|
|
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
|
|
index 50316455e..d57fb2f8a 100644
|
|
--- a/kernel/sched/cpudeadline.c
|
|
+++ b/kernel/sched/cpudeadline.c
|
|
@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
|
const struct sched_dl_entity *dl_se = &p->dl;
|
|
|
|
if (later_mask &&
|
|
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
|
|
+ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
|
|
return 1;
|
|
} else {
|
|
int best_cpu = cpudl_maximum(cp);
|
|
|
|
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
|
|
|
|
- if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
|
|
+ if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
|
|
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
|
|
if (later_mask)
|
|
cpumask_set_cpu(best_cpu, later_mask);
|
|
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
|
|
index daaadf939..f7d2c10b4 100644
|
|
--- a/kernel/sched/cpupri.c
|
|
+++ b/kernel/sched/cpupri.c
|
|
@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
|
if (skip)
|
|
continue;
|
|
|
|
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
|
+ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
|
|
continue;
|
|
|
|
if (lowest_mask) {
|
|
- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
|
+ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
|
|
|
|
/*
|
|
* We have to ensure that we have at least one bit
|
|
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
|
|
index 6c4f93af1..a074b84ed 100644
|
|
--- a/kernel/sched/deadline.c
|
|
+++ b/kernel/sched/deadline.c
|
|
@@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p)
|
|
|
|
dl_se->dl_non_contending = 1;
|
|
get_task_struct(p);
|
|
- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
|
|
+ hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
|
|
}
|
|
|
|
static void task_contending(struct sched_dl_entity *dl_se, int flags)
|
|
@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
|
|
* If we cannot preempt any rq, fall back to pick any
|
|
* online CPU:
|
|
*/
|
|
- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
|
+ cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
|
|
if (cpu >= nr_cpu_ids) {
|
|
/*
|
|
* Failed to find any suitable CPU.
|
|
@@ -1086,7 +1086,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
|
|
{
|
|
struct hrtimer *timer = &dl_se->dl_timer;
|
|
|
|
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
timer->function = dl_task_timer;
|
|
}
|
|
|
|
@@ -1325,7 +1325,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
|
|
{
|
|
struct hrtimer *timer = &dl_se->inactive_timer;
|
|
|
|
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
timer->function = inactive_task_timer;
|
|
}
|
|
|
|
@@ -1857,7 +1857,7 @@ static void set_curr_task_dl(struct rq *rq)
|
|
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
- cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
+ cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
@@ -2035,7 +2035,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
|
*/
|
|
next_task = pick_next_pushable_dl_task(rq);
|
|
if (unlikely(next_task != task ||
|
|
- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed))) {
|
|
+ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr))) {
|
|
double_unlock_balance(rq, later_rq);
|
|
later_rq = NULL;
|
|
break;
|
|
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
|
|
index bcdfdaae3..589f451f8 100644
|
|
--- a/kernel/sched/debug.c
|
|
+++ b/kernel/sched/debug.c
|
|
@@ -1034,6 +1034,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
|
|
P(dl.runtime);
|
|
P(dl.deadline);
|
|
}
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
|
|
+ P(migrate_disable);
|
|
+#endif
|
|
+ P(nr_cpus_allowed);
|
|
#undef PN_SCHEDSTAT
|
|
#undef PN
|
|
#undef __PN
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 1c78e2f29..0d5f153e3 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -1714,7 +1714,7 @@ static void task_numa_compare(struct task_numa_env *env,
|
|
* be incurred if the tasks were swapped.
|
|
*/
|
|
/* Skip this swap candidate if cannot move to the source cpu */
|
|
- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
|
|
goto unlock;
|
|
|
|
/*
|
|
@@ -1784,7 +1784,7 @@ static void task_numa_compare(struct task_numa_env *env,
|
|
*/
|
|
local_irq_disable();
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
- env->p->select_cpus = &env->p->cpus_allowed;
|
|
+ env->p->select_cpus = env->p->cpus_ptr;
|
|
#endif
|
|
env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
|
|
env->dst_cpu);
|
|
@@ -1815,7 +1815,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
|
|
|
|
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
|
|
/* Skip this CPU if the source task cannot migrate */
|
|
- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
|
|
+ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
|
|
continue;
|
|
|
|
env->dst_cpu = cpu;
|
|
@@ -4254,7 +4254,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
|
ideal_runtime = sched_slice(cfs_rq, curr);
|
|
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
|
if (delta_exec > ideal_runtime) {
|
|
- resched_curr(rq_of(cfs_rq));
|
|
+ resched_curr_lazy(rq_of(cfs_rq));
|
|
/*
|
|
* The current task ran long enough, ensure it doesn't get
|
|
* re-elected due to buddy favours.
|
|
@@ -4278,7 +4278,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
|
return;
|
|
|
|
if (delta > ideal_runtime)
|
|
- resched_curr(rq_of(cfs_rq));
|
|
+ resched_curr_lazy(rq_of(cfs_rq));
|
|
}
|
|
|
|
static void
|
|
@@ -4420,7 +4420,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
|
* validating it and just reschedule.
|
|
*/
|
|
if (queued) {
|
|
- resched_curr(rq_of(cfs_rq));
|
|
+ resched_curr_lazy(rq_of(cfs_rq));
|
|
return;
|
|
}
|
|
/*
|
|
@@ -4554,7 +4554,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
|
* hierarchy can be throttled
|
|
*/
|
|
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
|
|
- resched_curr(rq_of(cfs_rq));
|
|
+ resched_curr_lazy(rq_of(cfs_rq));
|
|
}
|
|
|
|
static __always_inline
|
|
@@ -5330,7 +5330,7 @@ static inline struct cpumask *task_prefer_cpus(struct task_struct *p)
|
|
return p->prefer_cpus;
|
|
|
|
if (task_group(p)->auto_affinity->mode == 0)
|
|
- return &p->cpus_allowed;
|
|
+ return p->cpus_ptr;
|
|
|
|
ad = &task_group(p)->auto_affinity->ad;
|
|
return ad->domains[ad->curr_level];
|
|
@@ -5764,7 +5764,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
|
|
|
if (delta < 0) {
|
|
if (rq->curr == p)
|
|
- resched_curr(rq);
|
|
+ resched_curr_lazy(rq);
|
|
return;
|
|
}
|
|
hrtick_start(rq, delta);
|
|
@@ -6489,7 +6489,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|
p->select_cpus))
|
|
#else
|
|
if (!cpumask_intersects(sched_group_span(group),
|
|
- &p->cpus_allowed))
|
|
+ p->cpus_ptr))
|
|
#endif
|
|
continue;
|
|
|
|
@@ -6625,7 +6625,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
for_each_cpu_and(i, sched_group_span(group), p->select_cpus) {
|
|
#else
|
|
- for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
|
|
+ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
|
|
#endif
|
|
if (sched_idle_cpu(i))
|
|
return i;
|
|
@@ -6672,7 +6672,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
if (!cpumask_intersects(sched_domain_span(sd), p->select_cpus))
|
|
#else
|
|
- if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
|
|
+ if (!cpumask_intersects(sched_domain_span(sd),p->cpus_ptr))
|
|
#endif
|
|
return prev_cpu;
|
|
|
|
@@ -6793,7 +6793,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
cpumask_and(cpus, sched_domain_span(sd), p->select_cpus);
|
|
#else
|
|
- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
|
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
|
#endif
|
|
|
|
for_each_cpu_wrap(core, cpus, target) {
|
|
@@ -6832,7 +6832,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
|
|
if (!cpumask_test_cpu(cpu, p->select_cpus) ||
|
|
!cpumask_test_cpu(cpu, sched_domain_span(sd)))
|
|
#else
|
|
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
|
|
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
|
|
!cpumask_test_cpu(cpu, sched_domain_span(sd)))
|
|
#endif
|
|
continue;
|
|
@@ -6898,7 +6898,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
cpumask_and(cpus, sched_domain_span(sd), p->select_cpus);
|
|
#else
|
|
- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
|
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
|
#endif
|
|
|
|
for_each_cpu_wrap(cpu, cpus, target) {
|
|
@@ -6972,7 +6972,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
cpumask_test_cpu(p->recent_used_cpu, p->select_cpus)) {
|
|
#else
|
|
- cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
|
|
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
|
|
#endif
|
|
/*
|
|
* Replace recent_used_cpu with prev as it is a potential
|
|
@@ -7214,8 +7214,8 @@ static inline bool prefer_cpus_valid(struct task_struct *p)
|
|
struct cpumask *prefer_cpus = task_prefer_cpus(p);
|
|
|
|
return !cpumask_empty(prefer_cpus) &&
|
|
- !cpumask_equal(prefer_cpus, &p->cpus_allowed) &&
|
|
- cpumask_subset(prefer_cpus, &p->cpus_allowed);
|
|
+ !cpumask_equal(prefer_cpus, p->cpus_ptr) &&
|
|
+ cpumask_subset(prefer_cpus, p->cpus_ptr);
|
|
}
|
|
|
|
/*
|
|
@@ -7321,7 +7321,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|
time = schedstat_start_time();
|
|
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
- p->select_cpus = &p->cpus_allowed;
|
|
+ p->select_cpus =p->cpus_ptr;
|
|
if (dynamic_affinity_used() || smart_grid_used())
|
|
set_task_select_cpus(p, &idlest_cpu, sd_flag);
|
|
#endif
|
|
@@ -7332,7 +7332,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
&& cpumask_test_cpu(cpu, p->select_cpus);
|
|
#else
|
|
- && cpumask_test_cpu(cpu, &p->cpus_allowed);
|
|
+ && cpumask_test_cpu(cpu, p->cpus_ptr);
|
|
#endif
|
|
}
|
|
|
|
@@ -7605,7 +7605,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
|
return;
|
|
|
|
preempt:
|
|
- resched_curr(rq);
|
|
+ resched_curr_lazy(rq);
|
|
/*
|
|
* Only set the backward buddy when the current task is still
|
|
* on the rq. This can happen when a wakeup gets interleaved
|
|
@@ -8345,7 +8345,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
/*
|
|
* We do not migrate tasks that are:
|
|
* 1) throttled_lb_pair, or
|
|
- * 2) cannot be migrated to this CPU due to cpus_allowed, or
|
|
+ * 2) cannot be migrated to this CPU due to cpus_ptr, or
|
|
* 3) running (obviously), or
|
|
* 4) are cache-hot on their current CPU.
|
|
*/
|
|
@@ -8353,13 +8353,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
return 0;
|
|
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
- p->select_cpus = &p->cpus_allowed;
|
|
+ p->select_cpus = p->cpus_ptr;
|
|
if (dynamic_affinity_used() || smart_grid_used())
|
|
set_task_select_cpus(p, NULL, 0);
|
|
|
|
if (!cpumask_test_cpu(env->dst_cpu, p->select_cpus)) {
|
|
#else
|
|
- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
|
|
+ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
|
|
#endif
|
|
int cpu;
|
|
|
|
@@ -8383,7 +8383,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
|
if (cpumask_test_cpu(cpu, p->select_cpus)) {
|
|
#else
|
|
- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
|
|
+ if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
|
|
#endif
|
|
env->flags |= LBF_DST_PINNED;
|
|
env->new_dst_cpu = cpu;
|
|
@@ -8441,7 +8441,7 @@ can_migrate_task_llc(struct task_struct *p, struct rq *rq, struct rq *dst_rq)
|
|
if (throttled_lb_pair(task_group(p), cpu_of(rq), dst_cpu))
|
|
return false;
|
|
|
|
- if (!cpumask_test_cpu(dst_cpu, &p->cpus_allowed)) {
|
|
+ if (!cpumask_test_cpu(dst_cpu, p->cpus_ptr)) {
|
|
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
|
|
return false;
|
|
}
|
|
@@ -9039,7 +9039,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
|
|
|
|
/*
|
|
* Group imbalance indicates (and tries to solve) the problem where balancing
|
|
- * groups is inadequate due to ->cpus_allowed constraints.
|
|
+ * groups is inadequate due to ->cpus_ptr constraints.
|
|
*
|
|
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
|
|
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
|
|
@@ -9654,7 +9654,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|
/*
|
|
* If the busiest group is imbalanced the below checks don't
|
|
* work because they assume all things are equal, which typically
|
|
- * isn't true due to cpus_allowed constraints and the like.
|
|
+ * isn't true due to cpus_ptr constraints and the like.
|
|
*/
|
|
if (busiest->group_type == group_imbalanced)
|
|
goto force_balance;
|
|
@@ -10050,7 +10050,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
* if the curr task on busiest CPU can't be
|
|
* moved to this_cpu:
|
|
*/
|
|
- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
|
|
+ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
|
|
raw_spin_unlock_irqrestore(&busiest->lock,
|
|
flags);
|
|
env.flags |= LBF_ALL_PINNED;
|
|
@@ -11185,7 +11185,7 @@ static void task_fork_fair(struct task_struct *p)
|
|
* 'current' within the tree based on its new key value.
|
|
*/
|
|
swap(curr->vruntime, se->vruntime);
|
|
- resched_curr(rq);
|
|
+ resched_curr_lazy(rq);
|
|
}
|
|
|
|
se->vruntime -= cfs_rq->min_vruntime;
|
|
@@ -11209,7 +11209,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
|
*/
|
|
if (rq->curr == p) {
|
|
if (p->prio > oldprio)
|
|
- resched_curr(rq);
|
|
+ resched_curr_lazy(rq);
|
|
} else
|
|
check_preempt_curr(rq, p, 0);
|
|
}
|
|
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
|
index 515bfbcc6..550e236f4 100644
|
|
--- a/kernel/sched/features.h
|
|
+++ b/kernel/sched/features.h
|
|
@@ -46,11 +46,19 @@ SCHED_FEAT(LB_BIAS, true)
|
|
*/
|
|
SCHED_FEAT(NONTASK_CAPACITY, true)
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+SCHED_FEAT(TTWU_QUEUE, false)
|
|
+# ifdef CONFIG_PREEMPT_LAZY
|
|
+SCHED_FEAT(PREEMPT_LAZY, true)
|
|
+# endif
|
|
+#else
|
|
+
|
|
/*
|
|
* Queue remote wakeups on the target CPU and process them
|
|
* using the scheduler IPI. Reduces rq->lock contention/bounces.
|
|
*/
|
|
SCHED_FEAT(TTWU_QUEUE, true)
|
|
+#endif
|
|
|
|
/*
|
|
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
|
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
index 5dff9a6fe..9caa6e1c5 100644
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
|
|
|
raw_spin_lock_init(&rt_b->rt_runtime_lock);
|
|
|
|
- hrtimer_init(&rt_b->rt_period_timer,
|
|
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
|
|
+ HRTIMER_MODE_REL_HARD);
|
|
rt_b->rt_period_timer.function = sched_rt_period_timer;
|
|
}
|
|
|
|
@@ -1622,7 +1622,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
- cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
+ cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
return 1;
|
|
|
|
return 0;
|
|
@@ -1782,7 +1782,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|
struct task_struct *next_task = pick_next_pushable_task(rq);
|
|
if (unlikely(next_task != task ||
|
|
!rt_task(task) ||
|
|
- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed))) {
|
|
+ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
lowest_rq = NULL;
|
|
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
|
index 1d882a2b8..2c94e9a24 100644
|
|
--- a/kernel/sched/sched.h
|
|
+++ b/kernel/sched/sched.h
|
|
@@ -1019,6 +1019,9 @@ struct rq {
|
|
struct cpuidle_state *idle_state;
|
|
#endif
|
|
|
|
+#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP)
|
|
+ int nr_pinned;
|
|
+#endif
|
|
KABI_RESERVE(1)
|
|
KABI_RESERVE(2)
|
|
};
|
|
@@ -1571,6 +1574,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
|
|
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
|
|
#define WF_FORK 0x02 /* Child wakeup after fork */
|
|
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
|
|
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
|
|
|
|
/*
|
|
* To aid in avoiding the subversion of "niceness" due to uneven distribution
|
|
@@ -1768,6 +1772,15 @@ extern void reweight_task(struct task_struct *p, int prio);
|
|
extern void resched_curr(struct rq *rq);
|
|
extern void resched_cpu(int cpu);
|
|
|
|
+#ifdef CONFIG_PREEMPT_LAZY
|
|
+extern void resched_curr_lazy(struct rq *rq);
|
|
+#else
|
|
+static inline void resched_curr_lazy(struct rq *rq)
|
|
+{
|
|
+ resched_curr(rq);
|
|
+}
|
|
+#endif
|
|
+
|
|
extern struct rt_bandwidth def_rt_bandwidth;
|
|
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
|
|
|
|
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
|
|
index 66b59ac77..119a56d7f 100644
|
|
--- a/kernel/sched/swait.c
|
|
+++ b/kernel/sched/swait.c
|
|
@@ -32,6 +32,25 @@ void swake_up_locked(struct swait_queue_head *q)
|
|
}
|
|
EXPORT_SYMBOL(swake_up_locked);
|
|
|
|
+void swake_up_all_locked(struct swait_queue_head *q)
|
|
+{
|
|
+ struct swait_queue *curr;
|
|
+ int wakes = 0;
|
|
+
|
|
+ while (!list_empty(&q->task_list)) {
|
|
+
|
|
+ curr = list_first_entry(&q->task_list, typeof(*curr),
|
|
+ task_list);
|
|
+ wake_up_process(curr->task);
|
|
+ list_del_init(&curr->task_list);
|
|
+ wakes++;
|
|
+ }
|
|
+ if (pm_in_action)
|
|
+ return;
|
|
+ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
|
|
+}
|
|
+EXPORT_SYMBOL(swake_up_all_locked);
|
|
+
|
|
void swake_up_one(struct swait_queue_head *q)
|
|
{
|
|
unsigned long flags;
|
|
@@ -51,6 +70,7 @@ void swake_up_all(struct swait_queue_head *q)
|
|
struct swait_queue *curr;
|
|
LIST_HEAD(tmp);
|
|
|
|
+ WARN_ON(irqs_disabled());
|
|
raw_spin_lock_irq(&q->lock);
|
|
list_splice_init(&q->task_list, &tmp);
|
|
while (!list_empty(&tmp)) {
|
|
@@ -69,7 +89,7 @@ void swake_up_all(struct swait_queue_head *q)
|
|
}
|
|
EXPORT_SYMBOL(swake_up_all);
|
|
|
|
-static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
|
|
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
|
|
{
|
|
wait->task = current;
|
|
if (list_empty(&wait->task_list))
|
|
diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
|
|
new file mode 100644
|
|
index 000000000..c90d14b9b
|
|
--- /dev/null
|
|
+++ b/kernel/sched/swork.c
|
|
@@ -0,0 +1,173 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
|
|
+ *
|
|
+ * Provides a framework for enqueuing callbacks from irq context
|
|
+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
|
|
+ */
|
|
+
|
|
+#include <linux/swait.h>
|
|
+#include <linux/swork.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/export.h>
|
|
+
|
|
+#define SWORK_EVENT_PENDING 1
|
|
+
|
|
+static DEFINE_MUTEX(worker_mutex);
|
|
+static struct sworker *glob_worker;
|
|
+
|
|
+struct sworker {
|
|
+ struct list_head events;
|
|
+ struct swait_queue_head wq;
|
|
+
|
|
+ raw_spinlock_t lock;
|
|
+
|
|
+ struct task_struct *task;
|
|
+ int refs;
|
|
+};
|
|
+
|
|
+static bool swork_readable(struct sworker *worker)
|
|
+{
|
|
+ bool r;
|
|
+
|
|
+ if (kthread_should_stop())
|
|
+ return true;
|
|
+
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ r = !list_empty(&worker->events);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+static int swork_kthread(void *arg)
|
|
+{
|
|
+ struct sworker *worker = arg;
|
|
+
|
|
+ for (;;) {
|
|
+ swait_event_interruptible_exclusive(worker->wq,
|
|
+ swork_readable(worker));
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ while (!list_empty(&worker->events)) {
|
|
+ struct swork_event *sev;
|
|
+
|
|
+ sev = list_first_entry(&worker->events,
|
|
+ struct swork_event, item);
|
|
+ list_del(&sev->item);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+
|
|
+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
|
|
+ &sev->flags));
|
|
+ sev->func(sev);
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ }
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct sworker *swork_create(void)
|
|
+{
|
|
+ struct sworker *worker;
|
|
+
|
|
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
|
|
+ if (!worker)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ INIT_LIST_HEAD(&worker->events);
|
|
+ raw_spin_lock_init(&worker->lock);
|
|
+ init_swait_queue_head(&worker->wq);
|
|
+
|
|
+ worker->task = kthread_run(swork_kthread, worker, "kswork");
|
|
+ if (IS_ERR(worker->task)) {
|
|
+ kfree(worker);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
+
|
|
+ return worker;
|
|
+}
|
|
+
|
|
+static void swork_destroy(struct sworker *worker)
|
|
+{
|
|
+ kthread_stop(worker->task);
|
|
+
|
|
+ WARN_ON(!list_empty(&worker->events));
|
|
+ kfree(worker);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * swork_queue - queue swork
|
|
+ *
|
|
+ * Returns %false if @work was already on a queue, %true otherwise.
|
|
+ *
|
|
+ * The work is queued and processed on a random CPU
|
|
+ */
|
|
+bool swork_queue(struct swork_event *sev)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
|
|
+ return false;
|
|
+
|
|
+ raw_spin_lock_irqsave(&glob_worker->lock, flags);
|
|
+ list_add_tail(&sev->item, &glob_worker->events);
|
|
+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
|
|
+
|
|
+ swake_up_one(&glob_worker->wq);
|
|
+ return true;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_queue);
|
|
+
|
|
+/**
|
|
+ * swork_get - get an instance of the sworker
|
|
+ *
|
|
+ * Returns an negative error code if the initialization if the worker did not
|
|
+ * work, %0 otherwise.
|
|
+ *
|
|
+ */
|
|
+int swork_get(void)
|
|
+{
|
|
+ struct sworker *worker;
|
|
+
|
|
+ mutex_lock(&worker_mutex);
|
|
+ if (!glob_worker) {
|
|
+ worker = swork_create();
|
|
+ if (IS_ERR(worker)) {
|
|
+ mutex_unlock(&worker_mutex);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ glob_worker = worker;
|
|
+ }
|
|
+
|
|
+ glob_worker->refs++;
|
|
+ mutex_unlock(&worker_mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_get);
|
|
+
|
|
+/**
|
|
+ * swork_put - puts an instance of the sworker
|
|
+ *
|
|
+ * Will destroy the sworker thread. This function must not be called until all
|
|
+ * queued events have been completed.
|
|
+ */
|
|
+void swork_put(void)
|
|
+{
|
|
+ mutex_lock(&worker_mutex);
|
|
+
|
|
+ glob_worker->refs--;
|
|
+ if (glob_worker->refs > 0)
|
|
+ goto out;
|
|
+
|
|
+ swork_destroy(glob_worker);
|
|
+ glob_worker = NULL;
|
|
+out:
|
|
+ mutex_unlock(&worker_mutex);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_put);
|
|
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
|
|
index ad5591520..1952bc9c0 100644
|
|
--- a/kernel/sched/topology.c
|
|
+++ b/kernel/sched/topology.c
|
|
@@ -294,6 +294,7 @@ static int init_rootdomain(struct root_domain *rd)
|
|
rd->rto_cpu = -1;
|
|
raw_spin_lock_init(&rd->rto_lock);
|
|
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
|
|
+ rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ;
|
|
#endif
|
|
|
|
init_dl_bw(&rd->dl_bw);
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 69b9d8bff..8f0407c3a 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/sched/task.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/sched/cputime.h>
|
|
+#include <linux/sched/rt.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/tty.h>
|
|
#include <linux/binfmts.h>
|
|
@@ -400,13 +401,30 @@ void task_join_group_stop(struct task_struct *task)
|
|
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
|
|
}
|
|
|
|
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
|
|
+{
|
|
+ struct sigqueue *q = t->sigqueue_cache;
|
|
+
|
|
+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
|
|
+ return NULL;
|
|
+ return q;
|
|
+}
|
|
+
|
|
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
|
|
+{
|
|
+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
|
|
+ return 0;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
/*
|
|
* allocate a new signal queue record
|
|
* - this may be called without locks if and only if t == current, otherwise an
|
|
* appropriate lock must be held to stop the target task from exiting
|
|
*/
|
|
static struct sigqueue *
|
|
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
|
|
+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
+ int override_rlimit, int fromslab)
|
|
{
|
|
struct sigqueue *q = NULL;
|
|
struct user_struct *user;
|
|
@@ -428,7 +446,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
|
rcu_read_unlock();
|
|
|
|
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
|
|
- q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
+ if (!fromslab)
|
|
+ q = get_task_cache(t);
|
|
+ if (!q)
|
|
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
} else {
|
|
print_dropped_signal(sig);
|
|
}
|
|
@@ -445,6 +466,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
|
return q;
|
|
}
|
|
|
|
+static struct sigqueue *
|
|
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
+ int override_rlimit)
|
|
+{
|
|
+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
|
|
+}
|
|
+
|
|
static void __sigqueue_free(struct sigqueue *q)
|
|
{
|
|
if (q->flags & SIGQUEUE_PREALLOC)
|
|
@@ -454,6 +482,21 @@ static void __sigqueue_free(struct sigqueue *q)
|
|
kmem_cache_free(sigqueue_cachep, q);
|
|
}
|
|
|
|
+static void sigqueue_free_current(struct sigqueue *q)
|
|
+{
|
|
+ struct user_struct *up;
|
|
+
|
|
+ if (q->flags & SIGQUEUE_PREALLOC)
|
|
+ return;
|
|
+
|
|
+ up = q->user;
|
|
+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
|
|
+ if (atomic_dec_and_test(&up->sigpending))
|
|
+ free_uid(up);
|
|
+ } else
|
|
+ __sigqueue_free(q);
|
|
+}
|
|
+
|
|
void flush_sigqueue(struct sigpending *queue)
|
|
{
|
|
struct sigqueue *q;
|
|
@@ -466,6 +509,21 @@ void flush_sigqueue(struct sigpending *queue)
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * Called from __exit_signal. Flush tsk->pending and
|
|
+ * tsk->sigqueue_cache
|
|
+ */
|
|
+void flush_task_sigqueue(struct task_struct *tsk)
|
|
+{
|
|
+ struct sigqueue *q;
|
|
+
|
|
+ flush_sigqueue(&tsk->pending);
|
|
+
|
|
+ q = get_task_cache(tsk);
|
|
+ if (q)
|
|
+ kmem_cache_free(sigqueue_cachep, q);
|
|
+}
|
|
+
|
|
/*
|
|
* Flush all pending signals for this kthread.
|
|
*/
|
|
@@ -589,7 +647,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
|
|
(info->si_code == SI_TIMER) &&
|
|
(info->si_sys_private);
|
|
|
|
- __sigqueue_free(first);
|
|
+ sigqueue_free_current(first);
|
|
} else {
|
|
/*
|
|
* Ok, it wasn't in the queue. This must be
|
|
@@ -626,6 +684,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
|
bool resched_timer = false;
|
|
int signr;
|
|
|
|
+ WARN_ON_ONCE(tsk != current);
|
|
+
|
|
/* We only dequeue private signals from ourselves, we don't let
|
|
* signalfd steal them
|
|
*/
|
|
@@ -1289,8 +1349,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
|
|
* We don't want to have recursive SIGSEGV's etc, for example,
|
|
* that is why we also clear SIGNAL_UNKILLABLE.
|
|
*/
|
|
-int
|
|
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
+static int
|
|
+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
{
|
|
unsigned long int flags;
|
|
int ret, blocked, ignored;
|
|
@@ -1319,6 +1379,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
return ret;
|
|
}
|
|
|
|
+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
+{
|
|
+/*
|
|
+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
|
|
+ * since it can not enable preemption, and the signal code's spin_locks
|
|
+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
|
|
+ * send the signal on exit of the trap.
|
|
+ */
|
|
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+ if (in_atomic()) {
|
|
+ if (WARN_ON_ONCE(t != current))
|
|
+ return 0;
|
|
+ if (WARN_ON_ONCE(t->forced_info.si_signo))
|
|
+ return 0;
|
|
+
|
|
+ if (is_si_special(info)) {
|
|
+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
|
|
+ t->forced_info.si_signo = sig;
|
|
+ t->forced_info.si_errno = 0;
|
|
+ t->forced_info.si_code = SI_KERNEL;
|
|
+ t->forced_info.si_pid = 0;
|
|
+ t->forced_info.si_uid = 0;
|
|
+ } else {
|
|
+ t->forced_info = *info;
|
|
+ }
|
|
+
|
|
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
|
+ return 0;
|
|
+ }
|
|
+#endif
|
|
+ return do_force_sig_info(sig, info, t);
|
|
+}
|
|
+
|
|
/*
|
|
* Nuke all other threads in the group.
|
|
*/
|
|
@@ -1735,7 +1828,8 @@ EXPORT_SYMBOL(kill_pid);
|
|
*/
|
|
struct sigqueue *sigqueue_alloc(void)
|
|
{
|
|
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
|
|
+ /* Preallocated sigqueue objects always from the slabcache ! */
|
|
+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
|
|
|
|
if (q)
|
|
q->flags |= SIGQUEUE_PREALLOC;
|
|
@@ -2105,15 +2199,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|
if (gstop_done && ptrace_reparented(current))
|
|
do_notify_parent_cldstop(current, false, why);
|
|
|
|
- /*
|
|
- * Don't want to allow preemption here, because
|
|
- * sys_ptrace() needs this task to be inactive.
|
|
- *
|
|
- * XXX: implement read_unlock_no_resched().
|
|
- */
|
|
- preempt_disable();
|
|
read_unlock(&tasklist_lock);
|
|
- preempt_enable_no_resched();
|
|
freezable_schedule();
|
|
} else {
|
|
/*
|
|
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
|
index 99a047f70..9ee6a8ab7 100644
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -21,11 +21,14 @@
|
|
#include <linux/freezer.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/rcupdate.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/smpboot.h>
|
|
#include <linux/tick.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/irq.h>
|
|
+#include <linux/sched/types.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/irq.h>
|
|
@@ -56,12 +59,136 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
|
|
|
|
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
|
|
+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
|
|
+#endif
|
|
|
|
const char * const softirq_to_name[NR_SOFTIRQS] = {
|
|
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
|
|
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
|
};
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
+# ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+struct softirq_runner {
|
|
+ struct task_struct *runner[NR_SOFTIRQS];
|
|
+};
|
|
+
|
|
+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
|
|
+
|
|
+static inline void softirq_set_runner(unsigned int sirq)
|
|
+{
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+
|
|
+ sr->runner[sirq] = current;
|
|
+}
|
|
+
|
|
+static inline void softirq_clr_runner(unsigned int sirq)
|
|
+{
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+
|
|
+ sr->runner[sirq] = NULL;
|
|
+}
|
|
+
|
|
+static bool softirq_check_runner_tsk(struct task_struct *tsk,
|
|
+ unsigned int *pending)
|
|
+{
|
|
+ bool ret = false;
|
|
+
|
|
+ if (!tsk)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * The wakeup code in rtmutex.c wakes up the task
|
|
+ * _before_ it sets pi_blocked_on to NULL under
|
|
+ * tsk->pi_lock. So we need to check for both: state
|
|
+ * and pi_blocked_on.
|
|
+ * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the
|
|
+ * task does cpu_chill().
|
|
+ */
|
|
+ raw_spin_lock(&tsk->pi_lock);
|
|
+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
|
|
+ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
|
|
+ /* Clear all bits pending in that task */
|
|
+ *pending &= ~(tsk->softirqs_raised);
|
|
+ ret = true;
|
|
+ }
|
|
+ raw_spin_unlock(&tsk->pi_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * On preempt-rt a softirq running context might be blocked on a
|
|
+ * lock. There might be no other runnable task on this CPU because the
|
|
+ * lock owner runs on some other CPU. So we have to go into idle with
|
|
+ * the pending bit set. Therefor we need to check this otherwise we
|
|
+ * warn about false positives which confuses users and defeats the
|
|
+ * whole purpose of this test.
|
|
+ *
|
|
+ * This code is called with interrupts disabled.
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ struct task_struct *tsk;
|
|
+ static int rate_limit;
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+ u32 warnpending;
|
|
+ int i;
|
|
+
|
|
+ if (rate_limit >= 10)
|
|
+ return;
|
|
+
|
|
+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
|
|
+ if (!warnpending)
|
|
+ return;
|
|
+ for (i = 0; i < NR_SOFTIRQS; i++) {
|
|
+ tsk = sr->runner[i];
|
|
+
|
|
+ if (softirq_check_runner_tsk(tsk, &warnpending))
|
|
+ warnpending &= ~(1 << i);
|
|
+ }
|
|
+
|
|
+ if (warnpending) {
|
|
+ tsk = __this_cpu_read(ksoftirqd);
|
|
+ softirq_check_runner_tsk(tsk, &warnpending);
|
|
+ }
|
|
+
|
|
+ if (warnpending) {
|
|
+ tsk = __this_cpu_read(ktimer_softirqd);
|
|
+ softirq_check_runner_tsk(tsk, &warnpending);
|
|
+ }
|
|
+
|
|
+ if (warnpending) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ warnpending);
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# else
|
|
+/*
|
|
+ * On !PREEMPT_RT we just printk rate limited:
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ static int rate_limit;
|
|
+
|
|
+ if (rate_limit < 10 &&
|
|
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ local_softirq_pending());
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# endif
|
|
+
|
|
+#else /* !CONFIG_NO_HZ_COMMON */
|
|
+static inline void softirq_set_runner(unsigned int sirq) { }
|
|
+static inline void softirq_clr_runner(unsigned int sirq) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* we cannot loop indefinitely here to avoid userspace starvation,
|
|
* but we also don't want to introduce a worst case 1/HZ latency
|
|
@@ -77,6 +204,38 @@ static void wakeup_softirqd(void)
|
|
wake_up_process(tsk);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void wakeup_timer_softirqd(void)
|
|
+{
|
|
+ /* Interrupts are disabled: no need to stop preemption */
|
|
+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
|
|
+
|
|
+ if (tsk && tsk->state != TASK_RUNNING)
|
|
+ wake_up_process(tsk);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void handle_softirq(unsigned int vec_nr)
|
|
+{
|
|
+ struct softirq_action *h = softirq_vec + vec_nr;
|
|
+ int prev_count;
|
|
+
|
|
+ prev_count = preempt_count();
|
|
+
|
|
+ kstat_incr_softirqs_this_cpu(vec_nr);
|
|
+
|
|
+ trace_softirq_entry(vec_nr);
|
|
+ h->action(h);
|
|
+ trace_softirq_exit(vec_nr);
|
|
+ if (unlikely(prev_count != preempt_count())) {
|
|
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
|
|
+ vec_nr, softirq_to_name[vec_nr], h->action,
|
|
+ prev_count, preempt_count());
|
|
+ preempt_count_set(prev_count);
|
|
+ }
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* If ksoftirqd is scheduled, we do not want to process pending softirqs
|
|
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
|
|
@@ -92,6 +251,47 @@ static bool ksoftirqd_running(unsigned long pending)
|
|
return tsk && (tsk->state == TASK_RUNNING);
|
|
}
|
|
|
|
+static inline int ksoftirqd_softirq_pending(void)
|
|
+{
|
|
+ return local_softirq_pending();
|
|
+}
|
|
+
|
|
+static void handle_pending_softirqs(u32 pending)
|
|
+{
|
|
+ struct softirq_action *h = softirq_vec;
|
|
+ int softirq_bit;
|
|
+
|
|
+ local_irq_enable();
|
|
+
|
|
+ h = softirq_vec;
|
|
+
|
|
+ while ((softirq_bit = ffs(pending))) {
|
|
+ unsigned int vec_nr;
|
|
+
|
|
+ h += softirq_bit - 1;
|
|
+ vec_nr = h - softirq_vec;
|
|
+ handle_softirq(vec_nr);
|
|
+
|
|
+ h++;
|
|
+ pending >>= softirq_bit;
|
|
+ }
|
|
+
|
|
+ rcu_bh_qs();
|
|
+ local_irq_disable();
|
|
+}
|
|
+
|
|
+static void run_ksoftirqd(unsigned int cpu)
|
|
+{
|
|
+ local_irq_disable();
|
|
+ if (ksoftirqd_softirq_pending()) {
|
|
+ __do_softirq();
|
|
+ local_irq_enable();
|
|
+ cond_resched();
|
|
+ return;
|
|
+ }
|
|
+ local_irq_enable();
|
|
+}
|
|
+
|
|
/*
|
|
* preempt_count and SOFTIRQ_OFFSET usage:
|
|
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
|
|
@@ -251,10 +451,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
|
unsigned long old_flags = current->flags;
|
|
int max_restart = MAX_SOFTIRQ_RESTART;
|
|
- struct softirq_action *h;
|
|
bool in_hardirq;
|
|
__u32 pending;
|
|
- int softirq_bit;
|
|
|
|
/*
|
|
* Mask out PF_MEMALLOC s current task context is borrowed for the
|
|
@@ -273,36 +471,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|
/* Reset the pending bitmask before enabling irqs */
|
|
set_softirq_pending(0);
|
|
|
|
- local_irq_enable();
|
|
-
|
|
- h = softirq_vec;
|
|
-
|
|
- while ((softirq_bit = ffs(pending))) {
|
|
- unsigned int vec_nr;
|
|
- int prev_count;
|
|
-
|
|
- h += softirq_bit - 1;
|
|
-
|
|
- vec_nr = h - softirq_vec;
|
|
- prev_count = preempt_count();
|
|
-
|
|
- kstat_incr_softirqs_this_cpu(vec_nr);
|
|
-
|
|
- trace_softirq_entry(vec_nr);
|
|
- h->action(h);
|
|
- trace_softirq_exit(vec_nr);
|
|
- if (unlikely(prev_count != preempt_count())) {
|
|
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
|
|
- vec_nr, softirq_to_name[vec_nr], h->action,
|
|
- prev_count, preempt_count());
|
|
- preempt_count_set(prev_count);
|
|
- }
|
|
- h++;
|
|
- pending >>= softirq_bit;
|
|
- }
|
|
-
|
|
- rcu_bh_qs();
|
|
- local_irq_disable();
|
|
+ handle_pending_softirqs(pending);
|
|
|
|
pending = local_softirq_pending();
|
|
if (pending) {
|
|
@@ -338,6 +507,309 @@ asmlinkage __visible void do_softirq(void)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
+/*
|
|
+ * This function must run with irqs disabled!
|
|
+ */
|
|
+void raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ __raise_softirq_irqoff(nr);
|
|
+
|
|
+ /*
|
|
+ * If we're in an interrupt or softirq, we're done
|
|
+ * (this also catches softirq-disabled code). We will
|
|
+ * actually run the softirq once we return from
|
|
+ * the irq or softirq.
|
|
+ *
|
|
+ * Otherwise we wake up ksoftirqd to make sure we
|
|
+ * schedule the softirq soon.
|
|
+ */
|
|
+ if (!in_interrupt())
|
|
+ wakeup_softirqd();
|
|
+}
|
|
+
|
|
+void __raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ trace_softirq_raise(nr);
|
|
+ or_softirq_pending(1UL << nr);
|
|
+}
|
|
+
|
|
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
|
|
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
|
|
+static void ksoftirqd_set_sched_params(unsigned int cpu) { }
|
|
+
|
|
+#else /* !PREEMPT_RT_FULL */
|
|
+
|
|
+/*
|
|
+ * On RT we serialize softirq execution with a cpu local lock per softirq
|
|
+ */
|
|
+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
|
|
+
|
|
+void __init softirq_early_init(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < NR_SOFTIRQS; i++)
|
|
+ local_irq_lock_init(local_softirq_locks[i]);
|
|
+}
|
|
+
|
|
+static void lock_softirq(int which)
|
|
+{
|
|
+ local_lock(local_softirq_locks[which]);
|
|
+}
|
|
+
|
|
+static void unlock_softirq(int which)
|
|
+{
|
|
+ local_unlock(local_softirq_locks[which]);
|
|
+}
|
|
+
|
|
+static void do_single_softirq(int which)
|
|
+{
|
|
+ unsigned long old_flags = current->flags;
|
|
+
|
|
+ current->flags &= ~PF_MEMALLOC;
|
|
+ vtime_account_irq_enter(current);
|
|
+ current->flags |= PF_IN_SOFTIRQ;
|
|
+ lockdep_softirq_enter();
|
|
+ local_irq_enable();
|
|
+ handle_softirq(which);
|
|
+ local_irq_disable();
|
|
+ lockdep_softirq_exit();
|
|
+ current->flags &= ~PF_IN_SOFTIRQ;
|
|
+ vtime_account_irq_enter(current);
|
|
+ current_restore_flags(old_flags, PF_MEMALLOC);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called with interrupts disabled. Process softirqs which were raised
|
|
+ * in current context (or on behalf of ksoftirqd).
|
|
+ */
|
|
+static void do_current_softirqs(void)
|
|
+{
|
|
+ while (current->softirqs_raised) {
|
|
+ int i = __ffs(current->softirqs_raised);
|
|
+ unsigned int pending, mask = (1U << i);
|
|
+
|
|
+ current->softirqs_raised &= ~mask;
|
|
+ local_irq_enable();
|
|
+
|
|
+ /*
|
|
+ * If the lock is contended, we boost the owner to
|
|
+ * process the softirq or leave the critical section
|
|
+ * now.
|
|
+ */
|
|
+ lock_softirq(i);
|
|
+ local_irq_disable();
|
|
+ softirq_set_runner(i);
|
|
+ /*
|
|
+ * Check with the local_softirq_pending() bits,
|
|
+ * whether we need to process this still or if someone
|
|
+ * else took care of it.
|
|
+ */
|
|
+ pending = local_softirq_pending();
|
|
+ if (pending & mask) {
|
|
+ set_softirq_pending(pending & ~mask);
|
|
+ do_single_softirq(i);
|
|
+ }
|
|
+ softirq_clr_runner(i);
|
|
+ WARN_ON(current->softirq_nestcnt != 1);
|
|
+ local_irq_enable();
|
|
+ unlock_softirq(i);
|
|
+ local_irq_disable();
|
|
+ }
|
|
+}
|
|
+
|
|
+void __local_bh_disable(void)
|
|
+{
|
|
+ if (++current->softirq_nestcnt == 1)
|
|
+ migrate_disable();
|
|
+}
|
|
+EXPORT_SYMBOL(__local_bh_disable);
|
|
+
|
|
+void __local_bh_enable(void)
|
|
+{
|
|
+ if (WARN_ON(current->softirq_nestcnt == 0))
|
|
+ return;
|
|
+
|
|
+ local_irq_disable();
|
|
+ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
|
|
+ do_current_softirqs();
|
|
+ local_irq_enable();
|
|
+
|
|
+ if (--current->softirq_nestcnt == 0)
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(__local_bh_enable);
|
|
+
|
|
+void _local_bh_enable(void)
|
|
+{
|
|
+ if (WARN_ON(current->softirq_nestcnt == 0))
|
|
+ return;
|
|
+ if (--current->softirq_nestcnt == 0)
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(_local_bh_enable);
|
|
+
|
|
+int in_serving_softirq(void)
|
|
+{
|
|
+ return current->flags & PF_IN_SOFTIRQ;
|
|
+}
|
|
+EXPORT_SYMBOL(in_serving_softirq);
|
|
+
|
|
+/* Called with preemption disabled */
|
|
+static void run_ksoftirqd(unsigned int cpu)
|
|
+{
|
|
+ local_irq_disable();
|
|
+ current->softirq_nestcnt++;
|
|
+
|
|
+ do_current_softirqs();
|
|
+ current->softirq_nestcnt--;
|
|
+ local_irq_enable();
|
|
+ cond_resched();
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called from netif_rx_ni(). Preemption enabled, but migration
|
|
+ * disabled. So the cpu can't go away under us.
|
|
+ */
|
|
+void thread_do_softirq(void)
|
|
+{
|
|
+ if (!in_serving_softirq() && current->softirqs_raised) {
|
|
+ current->softirq_nestcnt++;
|
|
+ do_current_softirqs();
|
|
+ current->softirq_nestcnt--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void do_raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ unsigned int mask;
|
|
+
|
|
+ mask = 1UL << nr;
|
|
+
|
|
+ trace_softirq_raise(nr);
|
|
+ or_softirq_pending(mask);
|
|
+
|
|
+ /*
|
|
+ * If we are not in a hard interrupt and inside a bh disabled
|
|
+ * region, we simply raise the flag on current. local_bh_enable()
|
|
+ * will make sure that the softirq is executed. Otherwise we
|
|
+ * delegate it to ksoftirqd.
|
|
+ */
|
|
+ if (!in_irq() && current->softirq_nestcnt)
|
|
+ current->softirqs_raised |= mask;
|
|
+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
|
|
+ return;
|
|
+
|
|
+ if (mask & TIMER_SOFTIRQS)
|
|
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
|
|
+ else
|
|
+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
|
|
+}
|
|
+
|
|
+static void wakeup_proper_softirq(unsigned int nr)
|
|
+{
|
|
+ if ((1UL << nr) & TIMER_SOFTIRQS)
|
|
+ wakeup_timer_softirqd();
|
|
+ else
|
|
+ wakeup_softirqd();
|
|
+}
|
|
+
|
|
+void __raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ do_raise_softirq_irqoff(nr);
|
|
+ if (!in_irq() && !current->softirq_nestcnt)
|
|
+ wakeup_proper_softirq(nr);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
|
|
+ */
|
|
+void __raise_softirq_irqoff_ksoft(unsigned int nr)
|
|
+{
|
|
+ unsigned int mask;
|
|
+
|
|
+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
|
|
+ !__this_cpu_read(ktimer_softirqd)))
|
|
+ return;
|
|
+ mask = 1UL << nr;
|
|
+
|
|
+ trace_softirq_raise(nr);
|
|
+ or_softirq_pending(mask);
|
|
+ if (mask & TIMER_SOFTIRQS)
|
|
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
|
|
+ else
|
|
+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
|
|
+ wakeup_proper_softirq(nr);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function must run with irqs disabled!
|
|
+ */
|
|
+void raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ do_raise_softirq_irqoff(nr);
|
|
+
|
|
+ /*
|
|
+ * If we're in an hard interrupt we let irq return code deal
|
|
+ * with the wakeup of ksoftirqd.
|
|
+ */
|
|
+ if (in_irq())
|
|
+ return;
|
|
+ /*
|
|
+ * If we are in thread context but outside of a bh disabled
|
|
+ * region, we need to wake ksoftirqd as well.
|
|
+ *
|
|
+ * CHECKME: Some of the places which do that could be wrapped
|
|
+ * into local_bh_disable/enable pairs. Though it's unclear
|
|
+ * whether this is worth the effort. To find those places just
|
|
+ * raise a WARN() if the condition is met.
|
|
+ */
|
|
+ if (!current->softirq_nestcnt)
|
|
+ wakeup_proper_softirq(nr);
|
|
+}
|
|
+
|
|
+static inline int ksoftirqd_softirq_pending(void)
|
|
+{
|
|
+ return current->softirqs_raised;
|
|
+}
|
|
+
|
|
+static inline void local_bh_disable_nort(void) { }
|
|
+static inline void _local_bh_enable_nort(void) { }
|
|
+
|
|
+static inline void ksoftirqd_set_sched_params(unsigned int cpu)
|
|
+{
|
|
+ /* Take over all but timer pending softirqs when starting */
|
|
+ local_irq_disable();
|
|
+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
|
|
+ local_irq_enable();
|
|
+}
|
|
+
|
|
+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 1 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
+
|
|
+ /* Take over timer pending softirqs when starting */
|
|
+ local_irq_disable();
|
|
+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
|
|
+ local_irq_enable();
|
|
+}
|
|
+
|
|
+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
|
|
+ bool online)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 0 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
|
+}
|
|
+
|
|
+static int ktimer_softirqd_should_run(unsigned int cpu)
|
|
+{
|
|
+ return current->softirqs_raised;
|
|
+}
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
/*
|
|
* Enter an interrupt context.
|
|
*/
|
|
@@ -350,9 +822,9 @@ void irq_enter(void)
|
|
* Prevent raise_softirq from needlessly waking up ksoftirqd
|
|
* here, as softirq will be serviced on return from interrupt.
|
|
*/
|
|
- local_bh_disable();
|
|
+ local_bh_disable_nort();
|
|
tick_irq_enter();
|
|
- _local_bh_enable();
|
|
+ _local_bh_enable_nort();
|
|
}
|
|
|
|
__irq_enter();
|
|
@@ -360,6 +832,7 @@ void irq_enter(void)
|
|
|
|
static inline void invoke_softirq(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (ksoftirqd_running(local_softirq_pending()))
|
|
return;
|
|
|
|
@@ -382,6 +855,18 @@ static inline void invoke_softirq(void)
|
|
} else {
|
|
wakeup_softirqd();
|
|
}
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+ unsigned long flags;
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ if (__this_cpu_read(ksoftirqd) &&
|
|
+ __this_cpu_read(ksoftirqd)->softirqs_raised)
|
|
+ wakeup_softirqd();
|
|
+ if (__this_cpu_read(ktimer_softirqd) &&
|
|
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised)
|
|
+ wakeup_timer_softirqd();
|
|
+ local_irq_restore(flags);
|
|
+#endif
|
|
}
|
|
|
|
static inline void tick_irq_exit(void)
|
|
@@ -417,26 +902,6 @@ void irq_exit(void)
|
|
trace_hardirq_exit(); /* must be last! */
|
|
}
|
|
|
|
-/*
|
|
- * This function must run with irqs disabled!
|
|
- */
|
|
-inline void raise_softirq_irqoff(unsigned int nr)
|
|
-{
|
|
- __raise_softirq_irqoff(nr);
|
|
-
|
|
- /*
|
|
- * If we're in an interrupt or softirq, we're done
|
|
- * (this also catches softirq-disabled code). We will
|
|
- * actually run the softirq once we return from
|
|
- * the irq or softirq.
|
|
- *
|
|
- * Otherwise we wake up ksoftirqd to make sure we
|
|
- * schedule the softirq soon.
|
|
- */
|
|
- if (!in_interrupt())
|
|
- wakeup_softirqd();
|
|
-}
|
|
-
|
|
void raise_softirq(unsigned int nr)
|
|
{
|
|
unsigned long flags;
|
|
@@ -446,12 +911,6 @@ void raise_softirq(unsigned int nr)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-void __raise_softirq_irqoff(unsigned int nr)
|
|
-{
|
|
- trace_softirq_raise(nr);
|
|
- or_softirq_pending(1UL << nr);
|
|
-}
|
|
-
|
|
void open_softirq(int nr, void (*action)(struct softirq_action *))
|
|
{
|
|
softirq_vec[nr].action = action;
|
|
@@ -476,11 +935,44 @@ static void __tasklet_schedule_common(struct tasklet_struct *t,
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
+ if (!tasklet_trylock(t)) {
|
|
+ local_irq_restore(flags);
|
|
+ return;
|
|
+ }
|
|
+
|
|
head = this_cpu_ptr(headp);
|
|
- t->next = NULL;
|
|
- *head->tail = t;
|
|
- head->tail = &(t->next);
|
|
- raise_softirq_irqoff(softirq_nr);
|
|
+again:
|
|
+ /* We may have been preempted before tasklet_trylock
|
|
+ * and __tasklet_action may have already run.
|
|
+ * So double check the sched bit while the takslet
|
|
+ * is locked before adding it to the list.
|
|
+ */
|
|
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
+ if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) {
|
|
+ tasklet_unlock(t);
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+ t->next = NULL;
|
|
+ *head->tail = t;
|
|
+ head->tail = &(t->next);
|
|
+ raise_softirq_irqoff(softirq_nr);
|
|
+ tasklet_unlock(t);
|
|
+ } else {
|
|
+ /* This is subtle. If we hit the corner case above
|
|
+ * It is possible that we get preempted right here,
|
|
+ * and another task has successfully called
|
|
+ * tasklet_schedule(), then this function, and
|
|
+ * failed on the trylock. Thus we must be sure
|
|
+ * before releasing the tasklet lock, that the
|
|
+ * SCHED_BIT is clear. Otherwise the tasklet
|
|
+ * may get its SCHED_BIT set, but not added to the
|
|
+ * list
|
|
+ */
|
|
+ if (!tasklet_tryunlock(t))
|
|
+ goto again;
|
|
+ }
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
@@ -498,11 +990,21 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
|
}
|
|
EXPORT_SYMBOL(__tasklet_hi_schedule);
|
|
|
|
+void tasklet_enable(struct tasklet_struct *t)
|
|
+{
|
|
+ if (!atomic_dec_and_test(&t->count))
|
|
+ return;
|
|
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
|
|
+ tasklet_schedule(t);
|
|
+}
|
|
+EXPORT_SYMBOL(tasklet_enable);
|
|
+
|
|
static void tasklet_action_common(struct softirq_action *a,
|
|
struct tasklet_head *tl_head,
|
|
unsigned int softirq_nr)
|
|
{
|
|
struct tasklet_struct *list;
|
|
+ int loops = 1000000;
|
|
|
|
local_irq_disable();
|
|
list = tl_head->head;
|
|
@@ -514,25 +1016,60 @@ static void tasklet_action_common(struct softirq_action *a,
|
|
struct tasklet_struct *t = list;
|
|
|
|
list = list->next;
|
|
+ /*
|
|
+ * Should always succeed - after a tasklist got on the
|
|
+ * list (after getting the SCHED bit set from 0 to 1),
|
|
+ * nothing but the tasklet softirq it got queued to can
|
|
+ * lock it:
|
|
+ */
|
|
+ if (!tasklet_trylock(t)) {
|
|
+ WARN_ON(1);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ t->next = NULL;
|
|
|
|
- if (tasklet_trylock(t)) {
|
|
- if (!atomic_read(&t->count)) {
|
|
- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
|
|
- &t->state))
|
|
- BUG();
|
|
- t->func(t->data);
|
|
+ if (unlikely(atomic_read(&t->count))) {
|
|
+out_disabled:
|
|
+ /* implicit unlock: */
|
|
+ wmb();
|
|
+ t->state = TASKLET_STATEF_PENDING;
|
|
+ continue;
|
|
+ }
|
|
+ /*
|
|
+ * After this point on the tasklet might be rescheduled
|
|
+ * on another CPU, but it can only be added to another
|
|
+ * CPU's tasklet list if we unlock the tasklet (which we
|
|
+ * dont do yet).
|
|
+ */
|
|
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
+ WARN_ON(1);
|
|
+again:
|
|
+ t->func(t->data);
|
|
+
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
+ while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) {
|
|
+#else
|
|
+ while (!tasklet_tryunlock(t)) {
|
|
+#endif
|
|
+ /*
|
|
+ * If it got disabled meanwhile, bail out:
|
|
+ */
|
|
+ if (atomic_read(&t->count))
|
|
+ goto out_disabled;
|
|
+ /*
|
|
+ * If it got scheduled meanwhile, re-execute
|
|
+ * the tasklet function:
|
|
+ */
|
|
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
+ goto again;
|
|
+ if (!--loops) {
|
|
+ printk("hm, tasklet state: %08lx\n", t->state);
|
|
+ WARN_ON(1);
|
|
tasklet_unlock(t);
|
|
- continue;
|
|
+ break;
|
|
}
|
|
- tasklet_unlock(t);
|
|
}
|
|
-
|
|
- local_irq_disable();
|
|
- t->next = NULL;
|
|
- *tl_head->tail = t;
|
|
- tl_head->tail = &t->next;
|
|
- __raise_softirq_irqoff(softirq_nr);
|
|
- local_irq_enable();
|
|
}
|
|
}
|
|
|
|
@@ -564,7 +1101,7 @@ void tasklet_kill(struct tasklet_struct *t)
|
|
|
|
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
|
|
do {
|
|
- yield();
|
|
+ msleep(1);
|
|
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
|
|
}
|
|
tasklet_unlock_wait(t);
|
|
@@ -638,25 +1175,26 @@ void __init softirq_init(void)
|
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
|
}
|
|
|
|
-static int ksoftirqd_should_run(unsigned int cpu)
|
|
-{
|
|
- return local_softirq_pending();
|
|
-}
|
|
-
|
|
-static void run_ksoftirqd(unsigned int cpu)
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
+void tasklet_unlock_wait(struct tasklet_struct *t)
|
|
{
|
|
- local_irq_disable();
|
|
- if (local_softirq_pending()) {
|
|
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
|
|
/*
|
|
- * We can safely run softirq on inline stack, as we are not deep
|
|
- * in the task stack here.
|
|
+ * Hack for now to avoid this busy-loop:
|
|
*/
|
|
- __do_softirq();
|
|
- local_irq_enable();
|
|
- cond_resched();
|
|
- return;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ msleep(1);
|
|
+#else
|
|
+ barrier();
|
|
+#endif
|
|
}
|
|
- local_irq_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(tasklet_unlock_wait);
|
|
+#endif
|
|
+
|
|
+static int ksoftirqd_should_run(unsigned int cpu)
|
|
+{
|
|
+ return ksoftirqd_softirq_pending();
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -723,17 +1261,31 @@ static int takeover_tasklets(unsigned int cpu)
|
|
|
|
static struct smp_hotplug_thread softirq_threads = {
|
|
.store = &ksoftirqd,
|
|
+ .setup = ksoftirqd_set_sched_params,
|
|
.thread_should_run = ksoftirqd_should_run,
|
|
.thread_fn = run_ksoftirqd,
|
|
.thread_comm = "ksoftirqd/%u",
|
|
};
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static struct smp_hotplug_thread softirq_timer_threads = {
|
|
+ .store = &ktimer_softirqd,
|
|
+ .setup = ktimer_softirqd_set_sched_params,
|
|
+ .cleanup = ktimer_softirqd_clr_sched_params,
|
|
+ .thread_should_run = ktimer_softirqd_should_run,
|
|
+ .thread_fn = run_ksoftirqd,
|
|
+ .thread_comm = "ktimersoftd/%u",
|
|
+};
|
|
+#endif
|
|
+
|
|
static __init int spawn_ksoftirqd(void)
|
|
{
|
|
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
|
|
takeover_tasklets);
|
|
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
|
|
-
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
|
|
+#endif
|
|
return 0;
|
|
}
|
|
early_initcall(spawn_ksoftirqd);
|
|
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
|
|
index 7e103738f..a2b1319a1 100644
|
|
--- a/kernel/stop_machine.c
|
|
+++ b/kernel/stop_machine.c
|
|
@@ -92,8 +92,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
|
enabled = stopper->enabled;
|
|
if (enabled)
|
|
__cpu_stop_queue_work(stopper, work, &wakeq);
|
|
- else if (work->done)
|
|
- cpu_stop_signal_done(work->done);
|
|
+ else {
|
|
+ work->disabled = true;
|
|
+ if (work->done)
|
|
+ cpu_stop_signal_done(work->done);
|
|
+ }
|
|
raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
|
wake_up_q(&wakeq);
|
|
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
|
|
index 56af8a97c..85c7f4a6b 100644
|
|
--- a/kernel/time/alarmtimer.c
|
|
+++ b/kernel/time/alarmtimer.c
|
|
@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm)
|
|
int ret = alarm_try_to_cancel(alarm);
|
|
if (ret >= 0)
|
|
return ret;
|
|
- cpu_relax();
|
|
+ hrtimer_grab_expiry_lock(&alarm->timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_cancel);
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index 8512f06f0..a522cebda 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -150,6 +150,11 @@ static struct hrtimer_cpu_base migration_cpu_base = {
|
|
|
|
#define migration_base migration_cpu_base.clock_base[0]
|
|
|
|
+static inline bool is_migration_base(struct hrtimer_clock_base *base)
|
|
+{
|
|
+ return base == &migration_base;
|
|
+}
|
|
+
|
|
/*
|
|
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
|
|
* means that all timers which are tied to this base via timer->base are
|
|
@@ -274,6 +279,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
+static inline bool is_migration_base(struct hrtimer_clock_base *base)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
static inline struct hrtimer_clock_base *
|
|
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
|
{
|
|
@@ -957,6 +967,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_forward);
|
|
|
|
+void hrtimer_grab_expiry_lock(const struct hrtimer *timer)
|
|
+{
|
|
+ struct hrtimer_clock_base *base = READ_ONCE(timer->base);
|
|
+
|
|
+ if (timer->is_soft && !is_migration_base(base)) {
|
|
+ spin_lock(&base->cpu_base->softirq_expiry_lock);
|
|
+ spin_unlock(&base->cpu_base->softirq_expiry_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* enqueue_hrtimer - internal function to (re)start a timer
|
|
*
|
|
@@ -1175,7 +1195,9 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
|
|
* match.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
|
|
+#endif
|
|
|
|
base = lock_hrtimer_base(timer, &flags);
|
|
|
|
@@ -1238,7 +1260,7 @@ int hrtimer_cancel(struct hrtimer *timer)
|
|
|
|
if (ret >= 0)
|
|
return ret;
|
|
- cpu_relax();
|
|
+ hrtimer_grab_expiry_lock(timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
|
@@ -1335,10 +1357,17 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
|
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
|
enum hrtimer_mode mode)
|
|
{
|
|
- bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
|
|
- int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
|
|
+ bool softtimer;
|
|
+ int base;
|
|
struct hrtimer_cpu_base *cpu_base;
|
|
|
|
+ softtimer = !!(mode & HRTIMER_MODE_SOFT);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (!softtimer && !(mode & HRTIMER_MODE_HARD))
|
|
+ softtimer = true;
|
|
+#endif
|
|
+ base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
|
|
+
|
|
memset(timer, 0, sizeof(struct hrtimer));
|
|
|
|
cpu_base = raw_cpu_ptr(&hrtimer_bases);
|
|
@@ -1535,6 +1564,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
|
|
unsigned long flags;
|
|
ktime_t now;
|
|
|
|
+ spin_lock(&cpu_base->softirq_expiry_lock);
|
|
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
|
|
|
now = hrtimer_update_base(cpu_base);
|
|
@@ -1544,6 +1574,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
|
|
hrtimer_update_softirq_timer(cpu_base, true);
|
|
|
|
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
|
+ spin_unlock(&cpu_base->softirq_expiry_lock);
|
|
}
|
|
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
@@ -1715,13 +1746,52 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
|
|
return HRTIMER_NORESTART;
|
|
}
|
|
|
|
-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
|
|
+static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
|
|
+ clockid_t clock_id,
|
|
+ enum hrtimer_mode mode,
|
|
+ struct task_struct *task)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) {
|
|
+ if (task_is_realtime(current) || system_state != SYSTEM_RUNNING)
|
|
+ mode |= HRTIMER_MODE_HARD;
|
|
+ else
|
|
+ mode |= HRTIMER_MODE_SOFT;
|
|
+ }
|
|
+#endif
|
|
+ __hrtimer_init(&sl->timer, clock_id, mode);
|
|
sl->timer.function = hrtimer_wakeup;
|
|
sl->task = task;
|
|
}
|
|
+
|
|
+/**
|
|
+ * hrtimer_init_sleeper - initialize sleeper to the given clock
|
|
+ * @sl: sleeper to be initialized
|
|
+ * @clock_id: the clock to be used
|
|
+ * @mode: timer mode abs/rel
|
|
+ * @task: the task to wake up
|
|
+ */
|
|
+void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
|
|
+ enum hrtimer_mode mode, struct task_struct *task)
|
|
+{
|
|
+ debug_init(&sl->timer, clock_id, mode);
|
|
+ __hrtimer_init_sleeper(sl, clock_id, mode, task);
|
|
+
|
|
+}
|
|
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
|
|
|
|
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
|
+void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
|
|
+ clockid_t clock_id,
|
|
+ enum hrtimer_mode mode,
|
|
+ struct task_struct *task)
|
|
+{
|
|
+ debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
|
|
+ __hrtimer_init_sleeper(sl, clock_id, mode, task);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
|
|
+#endif
|
|
+
|
|
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
|
|
{
|
|
switch(restart->nanosleep.type) {
|
|
@@ -1745,8 +1815,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
|
|
{
|
|
struct restart_block *restart;
|
|
|
|
- hrtimer_init_sleeper(t, current);
|
|
-
|
|
do {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
hrtimer_start_expires(&t->timer, mode);
|
|
@@ -1754,12 +1822,12 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
|
|
if (likely(t->task))
|
|
freezable_schedule();
|
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
hrtimer_cancel(&t->timer);
|
|
mode = HRTIMER_MODE_ABS;
|
|
|
|
} while (t->task && !signal_pending(current));
|
|
|
|
- __set_current_state(TASK_RUNNING);
|
|
|
|
if (!t->task)
|
|
return 0;
|
|
@@ -1783,10 +1851,9 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
|
|
struct hrtimer_sleeper t;
|
|
int ret;
|
|
|
|
- hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
|
|
- HRTIMER_MODE_ABS);
|
|
+ hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
|
|
+ HRTIMER_MODE_ABS, current);
|
|
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
|
|
-
|
|
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
|
|
destroy_hrtimer_on_stack(&t.timer);
|
|
return ret;
|
|
@@ -1804,7 +1871,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp,
|
|
if (dl_task(current) || rt_task(current))
|
|
slack = 0;
|
|
|
|
- hrtimer_init_on_stack(&t.timer, clockid, mode);
|
|
+ hrtimer_init_sleeper_on_stack(&t, clockid, mode, current);
|
|
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
|
|
ret = do_nanosleep(&t, mode);
|
|
if (ret != -ERESTART_RESTARTBLOCK)
|
|
@@ -1866,6 +1933,38 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * Sleep for 1 ms in hope whoever holds what we want will let it go.
|
|
+ */
|
|
+void cpu_chill(void)
|
|
+{
|
|
+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
|
|
+ struct task_struct *self = current;
|
|
+ ktime_t chill_time;
|
|
+
|
|
+ raw_spin_lock_irq(&self->pi_lock);
|
|
+ self->saved_state = self->state;
|
|
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
|
|
+ raw_spin_unlock_irq(&self->pi_lock);
|
|
+
|
|
+ chill_time = ktime_set(0, NSEC_PER_MSEC);
|
|
+
|
|
+ current->flags |= PF_NOFREEZE;
|
|
+ sleeping_lock_inc();
|
|
+ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
|
|
+ sleeping_lock_dec();
|
|
+ if (!freeze_flag)
|
|
+ current->flags &= ~PF_NOFREEZE;
|
|
+
|
|
+ raw_spin_lock_irq(&self->pi_lock);
|
|
+ __set_current_state_no_track(self->saved_state);
|
|
+ self->saved_state = TASK_RUNNING;
|
|
+ raw_spin_unlock_irq(&self->pi_lock);
|
|
+}
|
|
+EXPORT_SYMBOL(cpu_chill);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Functions related to boot-time initialization:
|
|
*/
|
|
@@ -1887,6 +1986,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
|
|
cpu_base->softirq_next_timer = NULL;
|
|
cpu_base->expires_next = KTIME_MAX;
|
|
cpu_base->softirq_expires_next = KTIME_MAX;
|
|
+ spin_lock_init(&cpu_base->softirq_expiry_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -2005,11 +2105,9 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
|
|
return -EINTR;
|
|
}
|
|
|
|
- hrtimer_init_on_stack(&t.timer, clock_id, mode);
|
|
+ hrtimer_init_sleeper_on_stack(&t, clock_id, mode, current);
|
|
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
|
|
|
|
- hrtimer_init_sleeper(&t, current);
|
|
-
|
|
hrtimer_start_expires(&t.timer, mode);
|
|
|
|
if (likely(t.task))
|
|
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
|
|
index 2e2b335ef..48d977f94 100644
|
|
--- a/kernel/time/itimer.c
|
|
+++ b/kernel/time/itimer.c
|
|
@@ -211,6 +211,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
|
|
/* We are sharing ->siglock with it_real_fn() */
|
|
if (hrtimer_try_to_cancel(timer) < 0) {
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
+ hrtimer_grab_expiry_lock(timer);
|
|
goto again;
|
|
}
|
|
expires = timeval_to_ktime(value->it_value);
|
|
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
|
|
index 497719127..62acb8914 100644
|
|
--- a/kernel/time/jiffies.c
|
|
+++ b/kernel/time/jiffies.c
|
|
@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
|
|
.max_cycles = 10,
|
|
};
|
|
|
|
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
|
|
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
|
|
+__cacheline_aligned_in_smp seqcount_t jiffies_seq;
|
|
|
|
#if (BITS_PER_LONG < 64)
|
|
u64 get_jiffies_64(void)
|
|
@@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
|
|
u64 ret;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&jiffies_lock);
|
|
+ seq = read_seqcount_begin(&jiffies_seq);
|
|
ret = jiffies_64;
|
|
- } while (read_seqretry(&jiffies_lock, seq));
|
|
+ } while (read_seqcount_retry(&jiffies_seq, seq));
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(get_jiffies_64);
|
|
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
|
|
index 5ea7ccfb6..8af8cedaa 100644
|
|
--- a/kernel/time/posix-cpu-timers.c
|
|
+++ b/kernel/time/posix-cpu-timers.c
|
|
@@ -3,8 +3,10 @@
|
|
* Implement CPU time clocks for the POSIX clock interface.
|
|
*/
|
|
|
|
+#include <uapi/linux/sched/types.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/cputime.h>
|
|
+#include <linux/sched/rt.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/math64.h>
|
|
@@ -15,6 +17,7 @@
|
|
#include <linux/workqueue.h>
|
|
#include <linux/compat.h>
|
|
#include <linux/sched/deadline.h>
|
|
+#include <linux/smpboot.h>
|
|
|
|
#include "posix-timers.h"
|
|
|
|
@@ -785,6 +788,7 @@ check_timers_list(struct list_head *timers,
|
|
return t->expires;
|
|
|
|
t->firing = 1;
|
|
+ t->firing_cpu = smp_processor_id();
|
|
list_move_tail(&t->entry, firing);
|
|
}
|
|
|
|
@@ -1131,18 +1135,31 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|
return 0;
|
|
}
|
|
|
|
+static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock);
|
|
+
|
|
+void cpu_timers_grab_expiry_lock(struct k_itimer *timer)
|
|
+{
|
|
+ int cpu = timer->it.cpu.firing_cpu;
|
|
+
|
|
+ if (cpu >= 0) {
|
|
+ spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu);
|
|
+
|
|
+ spin_lock_irq(expiry_lock);
|
|
+ spin_unlock_irq(expiry_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* This is called from the timer interrupt handler. The irq handler has
|
|
* already updated our counts. We need to check if any timers fire now.
|
|
* Interrupts are disabled.
|
|
*/
|
|
-void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+static void __run_posix_cpu_timers(struct task_struct *tsk)
|
|
{
|
|
LIST_HEAD(firing);
|
|
struct k_itimer *timer, *next;
|
|
unsigned long flags;
|
|
-
|
|
- lockdep_assert_irqs_disabled();
|
|
+ spinlock_t *expiry_lock;
|
|
|
|
/*
|
|
* The fast path checks that there are no expired thread or thread
|
|
@@ -1151,8 +1168,13 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
if (!fastpath_timer_check(tsk))
|
|
return;
|
|
|
|
- if (!lock_task_sighand(tsk, &flags))
|
|
+ expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock);
|
|
+ spin_lock(expiry_lock);
|
|
+
|
|
+ if (!lock_task_sighand(tsk, &flags)) {
|
|
+ spin_unlock(expiry_lock);
|
|
return;
|
|
+ }
|
|
/*
|
|
* Here we take off tsk->signal->cpu_timers[N] and
|
|
* tsk->cpu_timers[N] all the timers that are firing, and
|
|
@@ -1185,6 +1207,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
list_del_init(&timer->it.cpu.entry);
|
|
cpu_firing = timer->it.cpu.firing;
|
|
timer->it.cpu.firing = 0;
|
|
+ timer->it.cpu.firing_cpu = -1;
|
|
/*
|
|
* The firing flag is -1 if we collided with a reset
|
|
* of the timer, which already reported this
|
|
@@ -1194,8 +1217,156 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
cpu_timer_fire(timer);
|
|
spin_unlock(&timer->it_lock);
|
|
}
|
|
+ spin_unlock(expiry_lock);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/cpu.h>
|
|
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
|
|
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
|
|
+DEFINE_PER_CPU(bool, posix_timer_th_active);
|
|
+
|
|
+static void posix_cpu_kthread_fn(unsigned int cpu)
|
|
+{
|
|
+ struct task_struct *tsk = NULL;
|
|
+ struct task_struct *next = NULL;
|
|
+
|
|
+ BUG_ON(per_cpu(posix_timer_task, cpu) != current);
|
|
+
|
|
+ /* grab task list */
|
|
+ raw_local_irq_disable();
|
|
+ tsk = per_cpu(posix_timer_tasklist, cpu);
|
|
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
|
|
+ raw_local_irq_enable();
|
|
+
|
|
+ /* its possible the list is empty, just return */
|
|
+ if (!tsk)
|
|
+ return;
|
|
+
|
|
+ /* Process task list */
|
|
+ while (1) {
|
|
+ /* save next */
|
|
+ next = tsk->posix_timer_list;
|
|
+
|
|
+ /* run the task timers, clear its ptr and
|
|
+ * unreference it
|
|
+ */
|
|
+ __run_posix_cpu_timers(tsk);
|
|
+ tsk->posix_timer_list = NULL;
|
|
+ put_task_struct(tsk);
|
|
+
|
|
+ /* check if this is the last on the list */
|
|
+ if (next == tsk)
|
|
+ break;
|
|
+ tsk = next;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline int __fastpath_timer_check(struct task_struct *tsk)
|
|
+{
|
|
+ /* tsk == current, ensure it is safe to use ->signal/sighand */
|
|
+ if (unlikely(tsk->exit_state))
|
|
+ return 0;
|
|
+
|
|
+ if (!task_cputime_zero(&tsk->cputime_expires))
|
|
+ return 1;
|
|
+
|
|
+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
+void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+{
|
|
+ unsigned int cpu = smp_processor_id();
|
|
+ struct task_struct *tasklist;
|
|
+
|
|
+ BUG_ON(!irqs_disabled());
|
|
+
|
|
+ if (per_cpu(posix_timer_th_active, cpu) != true)
|
|
+ return;
|
|
+
|
|
+ /* get per-cpu references */
|
|
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
|
|
+
|
|
+ /* check to see if we're already queued */
|
|
+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
|
|
+ get_task_struct(tsk);
|
|
+ if (tasklist) {
|
|
+ tsk->posix_timer_list = tasklist;
|
|
+ } else {
|
|
+ /*
|
|
+ * The list is terminated by a self-pointing
|
|
+ * task_struct
|
|
+ */
|
|
+ tsk->posix_timer_list = tsk;
|
|
+ }
|
|
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
|
|
+
|
|
+ wake_up_process(per_cpu(posix_timer_task, cpu));
|
|
+ }
|
|
+}
|
|
+
|
|
+static int posix_cpu_kthread_should_run(unsigned int cpu)
|
|
+{
|
|
+ return __this_cpu_read(posix_timer_tasklist) != NULL;
|
|
+}
|
|
+
|
|
+static void posix_cpu_kthread_park(unsigned int cpu)
|
|
+{
|
|
+ this_cpu_write(posix_timer_th_active, false);
|
|
+}
|
|
+
|
|
+static void posix_cpu_kthread_unpark(unsigned int cpu)
|
|
+{
|
|
+ this_cpu_write(posix_timer_th_active, true);
|
|
+}
|
|
+
|
|
+static void posix_cpu_kthread_setup(unsigned int cpu)
|
|
+{
|
|
+ struct sched_param sp;
|
|
+
|
|
+ sp.sched_priority = MAX_RT_PRIO - 1;
|
|
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
|
+ posix_cpu_kthread_unpark(cpu);
|
|
+}
|
|
+
|
|
+static struct smp_hotplug_thread posix_cpu_thread = {
|
|
+ .store = &posix_timer_task,
|
|
+ .thread_should_run = posix_cpu_kthread_should_run,
|
|
+ .thread_fn = posix_cpu_kthread_fn,
|
|
+ .thread_comm = "posixcputmr/%u",
|
|
+ .setup = posix_cpu_kthread_setup,
|
|
+ .park = posix_cpu_kthread_park,
|
|
+ .unpark = posix_cpu_kthread_unpark,
|
|
+};
|
|
+
|
|
+static int __init posix_cpu_thread_init(void)
|
|
+{
|
|
+ /* Start one for boot CPU. */
|
|
+ unsigned long cpu;
|
|
+ int ret;
|
|
+
|
|
+ /* init the per-cpu posix_timer_tasklets */
|
|
+ for_each_possible_cpu(cpu)
|
|
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
|
|
+
|
|
+ ret = smpboot_register_percpu_thread(&posix_cpu_thread);
|
|
+ WARN_ON(ret);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(posix_cpu_thread_init);
|
|
+#else /* CONFIG_PREEMPT_RT_BASE */
|
|
+void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+{
|
|
+ lockdep_assert_irqs_disabled();
|
|
+ __run_posix_cpu_timers(tsk);
|
|
+}
|
|
+#endif /* CONFIG_PREEMPT_RT_BASE */
|
|
+
|
|
/*
|
|
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
|
|
* The tsk->sighand->siglock must be held by the caller.
|
|
@@ -1314,6 +1485,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
|
|
spin_unlock_irq(&timer.it_lock);
|
|
|
|
while (error == TIMER_RETRY) {
|
|
+
|
|
+ cpu_timers_grab_expiry_lock(&timer);
|
|
/*
|
|
* We need to handle case when timer was or is in the
|
|
* middle of firing. In other cases we already freed
|
|
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
|
|
index d86fdabc5..2fe47ec39 100644
|
|
--- a/kernel/time/posix-timers.c
|
|
+++ b/kernel/time/posix-timers.c
|
|
@@ -463,7 +463,7 @@ static struct k_itimer * alloc_posix_timer(void)
|
|
|
|
static void k_itimer_rcu_free(struct rcu_head *head)
|
|
{
|
|
- struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
|
|
+ struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
|
|
|
|
kmem_cache_free(posix_timers_cache, tmr);
|
|
}
|
|
@@ -480,7 +480,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
|
|
}
|
|
put_pid(tmr->it_pid);
|
|
sigqueue_free(tmr->sigq);
|
|
- call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
|
|
+ call_rcu(&tmr->rcu, k_itimer_rcu_free);
|
|
}
|
|
|
|
static int common_timer_create(struct k_itimer *new_timer)
|
|
@@ -826,6 +826,17 @@ static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
|
|
return hrtimer_try_to_cancel(&timr->it.real.timer);
|
|
}
|
|
|
|
+static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer)
|
|
+{
|
|
+ if (kc->timer_arm == common_hrtimer_arm)
|
|
+ hrtimer_grab_expiry_lock(&timer->it.real.timer);
|
|
+ else if (kc == &alarm_clock)
|
|
+ hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer);
|
|
+ else
|
|
+ /* posix-cpu-timers */
|
|
+ cpu_timers_grab_expiry_lock(timer);
|
|
+}
|
|
+
|
|
/* Set a POSIX.1b interval timer. */
|
|
int common_timer_set(struct k_itimer *timr, int flags,
|
|
struct itimerspec64 *new_setting,
|
|
@@ -891,11 +902,15 @@ static int do_timer_settime(timer_t timer_id, int flags,
|
|
else
|
|
error = kc->timer_set(timr, flags, new_spec64, old_spec64);
|
|
|
|
- unlock_timer(timr, flag);
|
|
if (error == TIMER_RETRY) {
|
|
+ rcu_read_lock();
|
|
+ unlock_timer(timr, flag);
|
|
+ timer_wait_for_callback(kc, timr);
|
|
+ rcu_read_unlock();
|
|
old_spec64 = NULL; // We already got the old time...
|
|
goto retry;
|
|
}
|
|
+ unlock_timer(timr, flag);
|
|
|
|
return error;
|
|
}
|
|
@@ -957,13 +972,21 @@ int common_timer_del(struct k_itimer *timer)
|
|
return 0;
|
|
}
|
|
|
|
-static inline int timer_delete_hook(struct k_itimer *timer)
|
|
+static int timer_delete_hook(struct k_itimer *timer)
|
|
{
|
|
const struct k_clock *kc = timer->kclock;
|
|
+ int ret;
|
|
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_del))
|
|
return -EINVAL;
|
|
- return kc->timer_del(timer);
|
|
+ ret = kc->timer_del(timer);
|
|
+ if (ret == TIMER_RETRY) {
|
|
+ rcu_read_lock();
|
|
+ spin_unlock_irq(&timer->it_lock);
|
|
+ timer_wait_for_callback(kc, timer);
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
|
|
/* Delete a POSIX.1b interval timer. */
|
|
@@ -977,10 +1000,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
|
|
if (!timer)
|
|
return -EINVAL;
|
|
|
|
- if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
- unlock_timer(timer, flags);
|
|
+ if (timer_delete_hook(timer) == TIMER_RETRY)
|
|
goto retry_delete;
|
|
- }
|
|
|
|
spin_lock(¤t->sighand->siglock);
|
|
list_del(&timer->list);
|
|
@@ -1006,10 +1027,9 @@ static void itimer_delete(struct k_itimer *timer)
|
|
retry_delete:
|
|
spin_lock_irqsave(&timer->it_lock, flags);
|
|
|
|
- if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
- unlock_timer(timer, flags);
|
|
+ if (timer_delete_hook(timer) == TIMER_RETRY)
|
|
goto retry_delete;
|
|
- }
|
|
+
|
|
list_del(&timer->list);
|
|
/*
|
|
* This keeps any tasks waiting on the spin lock from thinking
|
|
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
|
|
index ddb211452..725bd230a 100644
|
|
--- a/kernel/time/posix-timers.h
|
|
+++ b/kernel/time/posix-timers.h
|
|
@@ -32,6 +32,8 @@ extern const struct k_clock clock_process;
|
|
extern const struct k_clock clock_thread;
|
|
extern const struct k_clock alarm_clock;
|
|
|
|
+extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer);
|
|
+
|
|
int posix_timer_event(struct k_itimer *timr, int si_private);
|
|
|
|
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
|
|
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
|
|
index a836efd34..c50e8f326 100644
|
|
--- a/kernel/time/tick-broadcast-hrtimer.c
|
|
+++ b/kernel/time/tick-broadcast-hrtimer.c
|
|
@@ -107,7 +107,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
|
|
|
|
void tick_setup_hrtimer_broadcast(void)
|
|
{
|
|
- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
|
bctimer.function = bc_handler;
|
|
clockevents_register_device(&ce_broadcast_hrtimer);
|
|
}
|
|
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
|
|
index 0a3cc37e4..7bd136b64 100644
|
|
--- a/kernel/time/tick-common.c
|
|
+++ b/kernel/time/tick-common.c
|
|
@@ -80,13 +80,15 @@ int tick_is_oneshot_available(void)
|
|
static void tick_periodic(int cpu)
|
|
{
|
|
if (tick_do_timer_cpu == cpu) {
|
|
- write_seqlock(&jiffies_lock);
|
|
+ raw_spin_lock(&jiffies_lock);
|
|
+ write_seqcount_begin(&jiffies_seq);
|
|
|
|
/* Keep track of the next tick event */
|
|
tick_next_period = ktime_add(tick_next_period, tick_period);
|
|
|
|
do_timer(1);
|
|
- write_sequnlock(&jiffies_lock);
|
|
+ write_seqcount_end(&jiffies_seq);
|
|
+ raw_spin_unlock(&jiffies_lock);
|
|
update_wall_time();
|
|
}
|
|
|
|
@@ -158,9 +160,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
|
ktime_t next;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&jiffies_lock);
|
|
+ seq = read_seqcount_begin(&jiffies_seq);
|
|
next = tick_next_period;
|
|
- } while (read_seqretry(&jiffies_lock, seq));
|
|
+ } while (read_seqcount_retry(&jiffies_seq, seq));
|
|
|
|
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
|
|
|
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
|
index 59c28d912..ee40a2c26 100644
|
|
--- a/kernel/time/tick-sched.c
|
|
+++ b/kernel/time/tick-sched.c
|
|
@@ -67,7 +67,8 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
return;
|
|
|
|
/* Reevaluate with jiffies_lock held */
|
|
- write_seqlock(&jiffies_lock);
|
|
+ raw_spin_lock(&jiffies_lock);
|
|
+ write_seqcount_begin(&jiffies_seq);
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
|
if (delta >= tick_period) {
|
|
@@ -90,10 +91,12 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
/* Keep the tick_next_period variable up to date */
|
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
|
} else {
|
|
- write_sequnlock(&jiffies_lock);
|
|
+ write_seqcount_end(&jiffies_seq);
|
|
+ raw_spin_unlock(&jiffies_lock);
|
|
return;
|
|
}
|
|
- write_sequnlock(&jiffies_lock);
|
|
+ write_seqcount_end(&jiffies_seq);
|
|
+ raw_spin_unlock(&jiffies_lock);
|
|
update_wall_time();
|
|
}
|
|
|
|
@@ -104,12 +107,14 @@ static ktime_t tick_init_jiffy_update(void)
|
|
{
|
|
ktime_t period;
|
|
|
|
- write_seqlock(&jiffies_lock);
|
|
+ raw_spin_lock(&jiffies_lock);
|
|
+ write_seqcount_begin(&jiffies_seq);
|
|
/* Did we start the jiffies update yet ? */
|
|
if (last_jiffies_update == 0)
|
|
last_jiffies_update = tick_next_period;
|
|
period = last_jiffies_update;
|
|
- write_sequnlock(&jiffies_lock);
|
|
+ write_seqcount_end(&jiffies_seq);
|
|
+ raw_spin_unlock(&jiffies_lock);
|
|
return period;
|
|
}
|
|
|
|
@@ -232,6 +237,7 @@ static void nohz_full_kick_func(struct irq_work *work)
|
|
|
|
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
|
.func = nohz_full_kick_func,
|
|
+ .flags = IRQ_WORK_HARD_IRQ,
|
|
};
|
|
|
|
/*
|
|
@@ -659,10 +665,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
|
|
|
/* Read jiffies and the time when jiffies were updated last */
|
|
do {
|
|
- seq = read_seqbegin(&jiffies_lock);
|
|
+ seq = read_seqcount_begin(&jiffies_seq);
|
|
basemono = last_jiffies_update;
|
|
basejiff = jiffies;
|
|
- } while (read_seqretry(&jiffies_lock, seq));
|
|
+ } while (read_seqcount_retry(&jiffies_seq, seq));
|
|
ts->last_jiffies = basejiff;
|
|
ts->timer_expires_base = basemono;
|
|
|
|
@@ -893,14 +899,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
|
return false;
|
|
|
|
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
|
- static int ratelimit;
|
|
-
|
|
- if (ratelimit < 10 &&
|
|
- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
|
|
- pr_warn("NOHZ: local_softirq_pending %02x\n",
|
|
- (unsigned int) local_softirq_pending());
|
|
- ratelimit++;
|
|
- }
|
|
+ softirq_check_pending_idle();
|
|
return false;
|
|
}
|
|
|
|
@@ -1319,7 +1318,7 @@ void tick_setup_sched_timer(void)
|
|
/*
|
|
* Emulate tick processing via per-CPU hrtimers:
|
|
*/
|
|
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
|
ts->sched_timer.function = tick_sched_timer;
|
|
|
|
/* Get the next period (per-CPU) */
|
|
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
|
|
index 0ebfe476b..f25a19919 100644
|
|
--- a/kernel/time/timekeeping.c
|
|
+++ b/kernel/time/timekeeping.c
|
|
@@ -2399,8 +2399,10 @@ EXPORT_SYMBOL(hardpps);
|
|
*/
|
|
void xtime_update(unsigned long ticks)
|
|
{
|
|
- write_seqlock(&jiffies_lock);
|
|
+ raw_spin_lock(&jiffies_lock);
|
|
+ write_seqcount_begin(&jiffies_seq);
|
|
do_timer(ticks);
|
|
- write_sequnlock(&jiffies_lock);
|
|
+ write_seqcount_end(&jiffies_seq);
|
|
+ raw_spin_unlock(&jiffies_lock);
|
|
update_wall_time();
|
|
}
|
|
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
|
|
index 141ab3ab0..099737f6f 100644
|
|
--- a/kernel/time/timekeeping.h
|
|
+++ b/kernel/time/timekeeping.h
|
|
@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { }
|
|
extern void do_timer(unsigned long ticks);
|
|
extern void update_wall_time(void);
|
|
|
|
-extern seqlock_t jiffies_lock;
|
|
+extern raw_spinlock_t jiffies_lock;
|
|
+extern seqcount_t jiffies_seq;
|
|
|
|
#define CS_NAME_LEN 32
|
|
|
|
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
|
index 8b6b33b81..4b8d3c37f 100644
|
|
--- a/kernel/time/timer.c
|
|
+++ b/kernel/time/timer.c
|
|
@@ -198,6 +198,7 @@ EXPORT_SYMBOL(jiffies_64);
|
|
struct timer_base {
|
|
raw_spinlock_t lock;
|
|
struct timer_list *running_timer;
|
|
+ spinlock_t expiry_lock;
|
|
unsigned long clk;
|
|
unsigned long next_expiry;
|
|
unsigned int cpu;
|
|
@@ -214,8 +215,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
|
|
static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
|
|
static DEFINE_MUTEX(timer_keys_mutex);
|
|
|
|
-static void timer_update_keys(struct work_struct *work);
|
|
-static DECLARE_WORK(timer_update_work, timer_update_keys);
|
|
+static struct swork_event timer_update_swork;
|
|
|
|
#ifdef CONFIG_SMP
|
|
unsigned int sysctl_timer_migration = 1;
|
|
@@ -233,7 +233,7 @@ static void timers_update_migration(void)
|
|
static inline void timers_update_migration(void) { }
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
-static void timer_update_keys(struct work_struct *work)
|
|
+static void timer_update_keys(struct swork_event *event)
|
|
{
|
|
mutex_lock(&timer_keys_mutex);
|
|
timers_update_migration();
|
|
@@ -243,9 +243,17 @@ static void timer_update_keys(struct work_struct *work)
|
|
|
|
void timers_update_nohz(void)
|
|
{
|
|
- schedule_work(&timer_update_work);
|
|
+ swork_queue(&timer_update_swork);
|
|
}
|
|
|
|
+static __init int hrtimer_init_thread(void)
|
|
+{
|
|
+ WARN_ON(swork_get());
|
|
+ INIT_SWORK(&timer_update_swork, timer_update_keys);
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(hrtimer_init_thread);
|
|
+
|
|
int timer_migration_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp,
|
|
loff_t *ppos)
|
|
@@ -1220,14 +1228,8 @@ int del_timer(struct timer_list *timer)
|
|
}
|
|
EXPORT_SYMBOL(del_timer);
|
|
|
|
-/**
|
|
- * try_to_del_timer_sync - Try to deactivate a timer
|
|
- * @timer: timer to delete
|
|
- *
|
|
- * This function tries to deactivate a timer. Upon successful (ret >= 0)
|
|
- * exit the timer is not queued and the handler is not running on any CPU.
|
|
- */
|
|
-int try_to_del_timer_sync(struct timer_list *timer)
|
|
+static int __try_to_del_timer_sync(struct timer_list *timer,
|
|
+ struct timer_base **basep)
|
|
{
|
|
struct timer_base *base;
|
|
unsigned long flags;
|
|
@@ -1235,7 +1237,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
|
|
|
|
debug_assert_init(timer);
|
|
|
|
- base = lock_timer_base(timer, &flags);
|
|
+ *basep = base = lock_timer_base(timer, &flags);
|
|
|
|
if (base->running_timer != timer)
|
|
ret = detach_if_pending(timer, base, true);
|
|
@@ -1244,9 +1246,42 @@ int try_to_del_timer_sync(struct timer_list *timer)
|
|
|
|
return ret;
|
|
}
|
|
+
|
|
+/**
|
|
+ * try_to_del_timer_sync - Try to deactivate a timer
|
|
+ * @timer: timer to delete
|
|
+ *
|
|
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
|
|
+ * exit the timer is not queued and the handler is not running on any CPU.
|
|
+ */
|
|
+int try_to_del_timer_sync(struct timer_list *timer)
|
|
+{
|
|
+ struct timer_base *base;
|
|
+
|
|
+ return __try_to_del_timer_sync(timer, &base);
|
|
+}
|
|
EXPORT_SYMBOL(try_to_del_timer_sync);
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
+static int __del_timer_sync(struct timer_list *timer)
|
|
+{
|
|
+ struct timer_base *base;
|
|
+ int ret;
|
|
+
|
|
+ for (;;) {
|
|
+ ret = __try_to_del_timer_sync(timer, &base);
|
|
+ if (ret >= 0)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * When accessing the lock, timers of base are no longer expired
|
|
+ * and so timer is no longer running.
|
|
+ */
|
|
+ spin_lock(&base->expiry_lock);
|
|
+ spin_unlock(&base->expiry_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
/**
|
|
* del_timer_sync - deactivate a timer and wait for the handler to finish.
|
|
* @timer: the timer to be deactivated
|
|
@@ -1302,12 +1337,8 @@ int del_timer_sync(struct timer_list *timer)
|
|
* could lead to deadlock.
|
|
*/
|
|
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
|
|
- for (;;) {
|
|
- int ret = try_to_del_timer_sync(timer);
|
|
- if (ret >= 0)
|
|
- return ret;
|
|
- cpu_relax();
|
|
- }
|
|
+
|
|
+ return __del_timer_sync(timer);
|
|
}
|
|
EXPORT_SYMBOL(del_timer_sync);
|
|
#endif
|
|
@@ -1367,13 +1398,20 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
|
|
|
|
fn = timer->function;
|
|
|
|
- if (timer->flags & TIMER_IRQSAFE) {
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
|
|
+ timer->flags & TIMER_IRQSAFE) {
|
|
raw_spin_unlock(&base->lock);
|
|
call_timer_fn(timer, fn);
|
|
+ base->running_timer = NULL;
|
|
+ spin_unlock(&base->expiry_lock);
|
|
+ spin_lock(&base->expiry_lock);
|
|
raw_spin_lock(&base->lock);
|
|
} else {
|
|
raw_spin_unlock_irq(&base->lock);
|
|
call_timer_fn(timer, fn);
|
|
+ base->running_timer = NULL;
|
|
+ spin_unlock(&base->expiry_lock);
|
|
+ spin_lock(&base->expiry_lock);
|
|
raw_spin_lock_irq(&base->lock);
|
|
}
|
|
}
|
|
@@ -1670,6 +1708,7 @@ static inline void __run_timers(struct timer_base *base)
|
|
if (!time_after_eq(jiffies, base->clk))
|
|
return;
|
|
|
|
+ spin_lock(&base->expiry_lock);
|
|
raw_spin_lock_irq(&base->lock);
|
|
|
|
/*
|
|
@@ -1696,8 +1735,8 @@ static inline void __run_timers(struct timer_base *base)
|
|
while (levels--)
|
|
expire_timers(base, heads + levels);
|
|
}
|
|
- base->running_timer = NULL;
|
|
raw_spin_unlock_irq(&base->lock);
|
|
+ spin_unlock(&base->expiry_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -1707,6 +1746,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
+ irq_work_tick_soft();
|
|
+
|
|
__run_timers(base);
|
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
|
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
|
|
@@ -1942,6 +1983,7 @@ static void __init init_timer_cpu(int cpu)
|
|
base->cpu = cpu;
|
|
raw_spin_lock_init(&base->lock);
|
|
base->clk = jiffies;
|
|
+ spin_lock_init(&base->expiry_lock);
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index 64fd3c3f8..13d9e885d 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -2125,6 +2125,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
|
struct task_struct *tsk = current;
|
|
|
|
entry->preempt_count = pc & 0xff;
|
|
+ entry->preempt_lazy_count = preempt_lazy_count();
|
|
entry->pid = (tsk) ? tsk->pid : 0;
|
|
entry->flags =
|
|
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
|
@@ -2135,8 +2136,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
|
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
|
|
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
|
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
|
|
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
|
+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
|
+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
|
|
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
|
|
+
|
|
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
|
|
|
|
@@ -3338,14 +3342,17 @@ get_total_entries(struct trace_buffer *buf,
|
|
|
|
static void print_lat_help_header(struct seq_file *m)
|
|
{
|
|
- seq_puts(m, "# _------=> CPU# \n"
|
|
- "# / _-----=> irqs-off \n"
|
|
- "# | / _----=> need-resched \n"
|
|
- "# || / _---=> hardirq/softirq \n"
|
|
- "# ||| / _--=> preempt-depth \n"
|
|
- "# |||| / delay \n"
|
|
- "# cmd pid ||||| time | caller \n"
|
|
- "# \\ / ||||| \\ | / \n");
|
|
+ seq_puts(m, "# _--------=> CPU# \n"
|
|
+ "# / _-------=> irqs-off \n"
|
|
+ "# | / _------=> need-resched \n"
|
|
+ "# || / _-----=> need-resched_lazy \n"
|
|
+ "# ||| / _----=> hardirq/softirq \n"
|
|
+ "# |||| / _---=> preempt-depth \n"
|
|
+ "# ||||| / _--=> preempt-lazy-depth\n"
|
|
+ "# |||||| / _-=> migrate-disable \n"
|
|
+ "# ||||||| / delay \n"
|
|
+ "# cmd pid |||||||| time | caller \n"
|
|
+ "# \\ / |||||||| \\ | / \n");
|
|
}
|
|
|
|
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
|
|
@@ -3383,15 +3390,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
|
|
tgid ? tgid_space : space);
|
|
seq_printf(m, "# %s / _----=> need-resched\n",
|
|
tgid ? tgid_space : space);
|
|
- seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
|
|
+ seq_printf(m, "# %s| / _---=> need-resched_lazy\n",
|
|
+ tgid ? tgid_space : space);
|
|
+ seq_printf(m, "# %s|| / _--=> hardirq/softirq\n",
|
|
tgid ? tgid_space : space);
|
|
- seq_printf(m, "# %s|| / _--=> preempt-depth\n",
|
|
+ seq_printf(m, "# %s||| / preempt-depth\n",
|
|
tgid ? tgid_space : space);
|
|
- seq_printf(m, "# %s||| / delay\n",
|
|
+ seq_printf(m, "# %s|||| / delay\n",
|
|
tgid ? tgid_space : space);
|
|
- seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
|
|
+ seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n",
|
|
tgid ? " TGID " : space);
|
|
- seq_printf(m, "# | | %s | |||| | |\n",
|
|
+ seq_printf(m, "# | | %s | ||||| | |\n",
|
|
tgid ? " | " : space);
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
|
|
index d05230d21..08841925d 100644
|
|
--- a/kernel/trace/trace.h
|
|
+++ b/kernel/trace/trace.h
|
|
@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head {
|
|
* NEED_RESCHED - reschedule is requested
|
|
* HARDIRQ - inside an interrupt handler
|
|
* SOFTIRQ - inside a softirq handler
|
|
+ * NEED_RESCHED_LAZY - lazy reschedule is requested
|
|
*/
|
|
enum trace_flag_type {
|
|
TRACE_FLAG_IRQS_OFF = 0x01,
|
|
@@ -136,6 +137,7 @@ enum trace_flag_type {
|
|
TRACE_FLAG_SOFTIRQ = 0x10,
|
|
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
|
|
TRACE_FLAG_NMI = 0x40,
|
|
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
|
|
};
|
|
|
|
#define TRACE_BUF_SIZE 1024
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index a000adbbe..1c3dc788a 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -188,6 +188,8 @@ static int trace_define_common_fields(void)
|
|
__common_field(unsigned char, flags);
|
|
__common_field(unsigned char, preempt_count);
|
|
__common_field(int, pid);
|
|
+ __common_field(unsigned char, migrate_disable);
|
|
+ __common_field(unsigned char, preempt_lazy_count);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
|
|
index c6cd54cf7..ae84fa5be 100644
|
|
--- a/kernel/trace/trace_hwlat.c
|
|
+++ b/kernel/trace/trace_hwlat.c
|
|
@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
|
|
* of this thread, than stop migrating for the duration
|
|
* of the current test.
|
|
*/
|
|
- if (!cpumask_equal(current_mask, ¤t->cpus_allowed))
|
|
+ if (!cpumask_equal(current_mask, current->cpus_ptr))
|
|
goto disable;
|
|
|
|
get_online_cpus();
|
|
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
|
|
index 6e6cc64fa..3f78b0afb 100644
|
|
--- a/kernel/trace/trace_output.c
|
|
+++ b/kernel/trace/trace_output.c
|
|
@@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|
{
|
|
char hardsoft_irq;
|
|
char need_resched;
|
|
+ char need_resched_lazy;
|
|
char irqs_off;
|
|
int hardirq;
|
|
int softirq;
|
|
@@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|
break;
|
|
}
|
|
|
|
+ need_resched_lazy =
|
|
+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
|
|
+
|
|
hardsoft_irq =
|
|
(nmi && hardirq) ? 'Z' :
|
|
nmi ? 'z' :
|
|
@@ -486,14 +490,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|
softirq ? 's' :
|
|
'.' ;
|
|
|
|
- trace_seq_printf(s, "%c%c%c",
|
|
- irqs_off, need_resched, hardsoft_irq);
|
|
+ trace_seq_printf(s, "%c%c%c%c",
|
|
+ irqs_off, need_resched, need_resched_lazy,
|
|
+ hardsoft_irq);
|
|
|
|
if (entry->preempt_count)
|
|
trace_seq_printf(s, "%x", entry->preempt_count);
|
|
else
|
|
trace_seq_putc(s, '.');
|
|
|
|
+ if (entry->preempt_lazy_count)
|
|
+ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
|
|
+ else
|
|
+ trace_seq_putc(s, '.');
|
|
+
|
|
+ if (entry->migrate_disable)
|
|
+ trace_seq_printf(s, "%x", entry->migrate_disable);
|
|
+ else
|
|
+ trace_seq_putc(s, '.');
|
|
+
|
|
return !trace_seq_has_overflowed(s);
|
|
}
|
|
|
|
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
|
|
index 463c4c11c..26b8a86cf 100644
|
|
--- a/kernel/watchdog.c
|
|
+++ b/kernel/watchdog.c
|
|
@@ -500,7 +500,7 @@ static void watchdog_enable(unsigned int cpu)
|
|
* Start the timer first to prevent the NMI watchdog triggering
|
|
* before the timer has a chance to fire.
|
|
*/
|
|
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
hrtimer->function = watchdog_timer_fn;
|
|
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
|
|
HRTIMER_MODE_REL_PINNED);
|
|
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
|
|
index 43832b102..e07b84dab 100644
|
|
--- a/kernel/watchdog_hld.c
|
|
+++ b/kernel/watchdog_hld.c
|
|
@@ -23,6 +23,7 @@
|
|
|
|
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
|
|
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
|
|
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
|
|
|
|
static unsigned long hardlockup_allcpu_dumped;
|
|
|
|
@@ -331,6 +332,13 @@ void watchdog_hardlockup_check(struct pt_regs *regs)
|
|
/* only print hardlockups once */
|
|
if (__this_cpu_read(hard_watchdog_warn) == true)
|
|
return;
|
|
+ /*
|
|
+ * If early-printk is enabled then make sure we do not
|
|
+ * lock up in printk() and kill console logging:
|
|
+ */
|
|
+ printk_kill();
|
|
+
|
|
+ raw_spin_lock(&watchdog_output_lock);
|
|
|
|
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
|
print_modules();
|
|
@@ -348,6 +356,7 @@ void watchdog_hardlockup_check(struct pt_regs *regs)
|
|
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
|
|
trigger_allbutself_cpu_backtrace();
|
|
|
|
+ raw_spin_unlock(&watchdog_output_lock);
|
|
if (hardlockup_panic)
|
|
nmi_panic(regs, "Hard LOCKUP");
|
|
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index 3ce68188a..bdbd8a35f 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -129,7 +129,7 @@ enum {
|
|
*
|
|
* PL: wq_pool_mutex protected.
|
|
*
|
|
- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
|
|
+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
|
|
*
|
|
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
|
|
*
|
|
@@ -138,7 +138,7 @@ enum {
|
|
*
|
|
* WQ: wq->mutex protected.
|
|
*
|
|
- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
|
|
+ * WR: wq->mutex protected for writes. RCU protected for reads.
|
|
*
|
|
* MD: wq_mayday_lock protected.
|
|
*/
|
|
@@ -146,7 +146,7 @@ enum {
|
|
/* struct worker is defined in workqueue_internal.h */
|
|
|
|
struct worker_pool {
|
|
- spinlock_t lock; /* the pool lock */
|
|
+ raw_spinlock_t lock; /* the pool lock */
|
|
int cpu; /* I: the associated cpu */
|
|
int node; /* I: the associated node ID */
|
|
int id; /* I: pool ID */
|
|
@@ -185,7 +185,7 @@ struct worker_pool {
|
|
atomic_t nr_running ____cacheline_aligned_in_smp;
|
|
|
|
/*
|
|
- * Destruction of pool is sched-RCU protected to allow dereferences
|
|
+ * Destruction of pool is RCU protected to allow dereferences
|
|
* from get_work_pool().
|
|
*/
|
|
struct rcu_head rcu;
|
|
@@ -231,7 +231,7 @@ struct pool_workqueue {
|
|
/*
|
|
* Release of unbound pwq is punted to system_wq. See put_pwq()
|
|
* and pwq_unbound_release_workfn() for details. pool_workqueue
|
|
- * itself is also sched-RCU protected so that the first pwq can be
|
|
+ * itself is also RCU protected so that the first pwq can be
|
|
* determined without grabbing wq->mutex.
|
|
*/
|
|
struct work_struct unbound_release_work;
|
|
@@ -316,8 +316,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
|
|
|
|
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
|
|
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
|
|
-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
|
-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
|
|
+static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
|
+static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
|
|
|
|
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
|
|
static bool workqueue_freezing; /* PL: have wqs started freezing? */
|
|
@@ -376,20 +376,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
#include <trace/events/workqueue.h>
|
|
|
|
#define assert_rcu_or_pool_mutex() \
|
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
|
!lockdep_is_held(&wq_pool_mutex), \
|
|
- "sched RCU or wq_pool_mutex should be held")
|
|
+ "RCU or wq_pool_mutex should be held")
|
|
|
|
#define assert_rcu_or_wq_mutex(wq) \
|
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
|
!lockdep_is_held(&wq->mutex), \
|
|
- "sched RCU or wq->mutex should be held")
|
|
+ "RCU or wq->mutex should be held")
|
|
|
|
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
|
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
|
!lockdep_is_held(&wq->mutex) && \
|
|
!lockdep_is_held(&wq_pool_mutex), \
|
|
- "sched RCU, wq->mutex or wq_pool_mutex should be held")
|
|
+ "RCU, wq->mutex or wq_pool_mutex should be held")
|
|
|
|
#define for_each_cpu_worker_pool(pool, cpu) \
|
|
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
|
|
@@ -401,7 +401,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
* @pool: iteration cursor
|
|
* @pi: integer used for iteration
|
|
*
|
|
- * This must be called either with wq_pool_mutex held or sched RCU read
|
|
+ * This must be called either with wq_pool_mutex held or RCU read
|
|
* locked. If the pool needs to be used beyond the locking in effect, the
|
|
* caller is responsible for guaranteeing that the pool stays online.
|
|
*
|
|
@@ -433,7 +433,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
* @pwq: iteration cursor
|
|
* @wq: the target workqueue
|
|
*
|
|
- * This must be called either with wq->mutex held or sched RCU read locked.
|
|
+ * This must be called either with wq->mutex held or RCU read locked.
|
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
|
* responsible for guaranteeing that the pwq stays online.
|
|
*
|
|
@@ -569,7 +569,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
|
|
* @wq: the target workqueue
|
|
* @node: the node ID
|
|
*
|
|
- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
|
|
+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
|
|
* read locked.
|
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
|
* responsible for guaranteeing that the pwq stays online.
|
|
@@ -713,8 +713,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
|
|
* @work: the work item of interest
|
|
*
|
|
* Pools are created and destroyed under wq_pool_mutex, and allows read
|
|
- * access under sched-RCU read lock. As such, this function should be
|
|
- * called under wq_pool_mutex or with preemption disabled.
|
|
+ * access under RCU read lock. As such, this function should be
|
|
+ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
|
|
*
|
|
* All fields of the returned pool are accessible as long as the above
|
|
* mentioned locking is in effect. If the returned pool needs to be used
|
|
@@ -847,7 +847,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
|
|
* Wake up the first idle worker of @pool.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void wake_up_worker(struct worker_pool *pool)
|
|
{
|
|
@@ -900,7 +900,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|
return;
|
|
|
|
worker->sleeping = 1;
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
/*
|
|
* The counterpart of the following dec_and_test, implied mb,
|
|
@@ -919,7 +919,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|
if (next)
|
|
wake_up_process(next->task);
|
|
}
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
|
|
/**
|
|
@@ -930,7 +930,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
|
* Set @flags in @worker->flags and adjust nr_running accordingly.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock)
|
|
+ * raw_spin_lock_irq(pool->lock)
|
|
*/
|
|
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
|
{
|
|
@@ -955,7 +955,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
|
* Clear @flags in @worker->flags and adjust nr_running accordingly.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock)
|
|
+ * raw_spin_lock_irq(pool->lock)
|
|
*/
|
|
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
|
{
|
|
@@ -1003,7 +1003,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
|
* actually occurs, it should be easy to locate the culprit work function.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*
|
|
* Return:
|
|
* Pointer to worker which is executing @work if found, %NULL
|
|
@@ -1038,7 +1038,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
|
|
* nested inside outer list_for_each_entry_safe().
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void move_linked_works(struct work_struct *work, struct list_head *head,
|
|
struct work_struct **nextp)
|
|
@@ -1113,12 +1113,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
|
{
|
|
if (pwq) {
|
|
/*
|
|
- * As both pwqs and pools are sched-RCU protected, the
|
|
+ * As both pwqs and pools are RCU protected, the
|
|
* following lock operations are safe.
|
|
*/
|
|
- spin_lock_irq(&pwq->pool->lock);
|
|
+ raw_spin_lock_irq(&pwq->pool->lock);
|
|
put_pwq(pwq);
|
|
- spin_unlock_irq(&pwq->pool->lock);
|
|
+ raw_spin_unlock_irq(&pwq->pool->lock);
|
|
}
|
|
}
|
|
|
|
@@ -1151,7 +1151,7 @@ static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
|
|
* decrement nr_in_flight of its pwq and handle workqueue flushing.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
|
|
{
|
|
@@ -1241,6 +1241,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
|
|
return 0;
|
|
|
|
+ rcu_read_lock();
|
|
/*
|
|
* The queueing is in progress, or it is already queued. Try to
|
|
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
|
|
@@ -1249,7 +1250,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|
if (!pool)
|
|
goto fail;
|
|
|
|
- spin_lock(&pool->lock);
|
|
+ raw_spin_lock(&pool->lock);
|
|
/*
|
|
* work->data is guaranteed to point to pwq only while the work
|
|
* item is queued on pwq->wq, and both updating work->data to point
|
|
@@ -1282,11 +1283,13 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|
/* work->data points to pwq iff queued, point to pool */
|
|
set_work_pool_and_keep_pending(work, pool->id);
|
|
|
|
- spin_unlock(&pool->lock);
|
|
+ raw_spin_unlock(&pool->lock);
|
|
+ rcu_read_unlock();
|
|
return 1;
|
|
}
|
|
- spin_unlock(&pool->lock);
|
|
+ raw_spin_unlock(&pool->lock);
|
|
fail:
|
|
+ rcu_read_unlock();
|
|
local_irq_restore(*flags);
|
|
if (work_is_canceling(work))
|
|
return -ENOENT;
|
|
@@ -1305,7 +1308,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|
* work_struct flags.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
|
|
struct list_head *head, unsigned int extra_flags)
|
|
@@ -1399,6 +1402,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
if (unlikely(wq->flags & __WQ_DRAINING) &&
|
|
WARN_ON_ONCE(!is_chained_work(wq)))
|
|
return;
|
|
+ rcu_read_lock();
|
|
retry:
|
|
/* pwq which will be used unless @work is executing elsewhere */
|
|
if (wq->flags & WQ_UNBOUND) {
|
|
@@ -1420,7 +1424,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
if (last_pool && last_pool != pwq->pool) {
|
|
struct worker *worker;
|
|
|
|
- spin_lock(&last_pool->lock);
|
|
+ raw_spin_lock(&last_pool->lock);
|
|
|
|
worker = find_worker_executing_work(last_pool, work);
|
|
|
|
@@ -1428,11 +1432,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
pwq = worker->current_pwq;
|
|
} else {
|
|
/* meh... not running there, queue here */
|
|
- spin_unlock(&last_pool->lock);
|
|
- spin_lock(&pwq->pool->lock);
|
|
+ raw_spin_unlock(&last_pool->lock);
|
|
+ raw_spin_lock(&pwq->pool->lock);
|
|
}
|
|
} else {
|
|
- spin_lock(&pwq->pool->lock);
|
|
+ raw_spin_lock(&pwq->pool->lock);
|
|
}
|
|
|
|
/*
|
|
@@ -1445,7 +1449,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
*/
|
|
if (unlikely(!pwq->refcnt)) {
|
|
if (wq->flags & WQ_UNBOUND) {
|
|
- spin_unlock(&pwq->pool->lock);
|
|
+ raw_spin_unlock(&pwq->pool->lock);
|
|
cpu_relax();
|
|
goto retry;
|
|
}
|
|
@@ -1457,10 +1461,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
/* pwq determined, queue */
|
|
trace_workqueue_queue_work(req_cpu, pwq, work);
|
|
|
|
- if (WARN_ON(!list_empty(&work->entry))) {
|
|
- spin_unlock(&pwq->pool->lock);
|
|
- return;
|
|
- }
|
|
+ if (WARN_ON(!list_empty(&work->entry)))
|
|
+ goto out;
|
|
|
|
pwq->nr_in_flight[pwq->work_color]++;
|
|
work_flags = work_color_to_flags(pwq->work_color);
|
|
@@ -1479,7 +1481,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
debug_work_activate(work);
|
|
insert_work(pwq, work, worklist, work_flags);
|
|
|
|
- spin_unlock(&pwq->pool->lock);
|
|
+out:
|
|
+ raw_spin_unlock(&pwq->pool->lock);
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
/**
|
|
@@ -1598,9 +1602,11 @@ EXPORT_SYMBOL_GPL(queue_work_node);
|
|
void delayed_work_timer_fn(struct timer_list *t)
|
|
{
|
|
struct delayed_work *dwork = from_timer(dwork, t, timer);
|
|
+ unsigned long flags;
|
|
|
|
- /* should have been called from irqsafe timer with irq already off */
|
|
+ local_irq_save(flags);
|
|
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
|
+ local_irq_restore(flags);
|
|
}
|
|
EXPORT_SYMBOL(delayed_work_timer_fn);
|
|
|
|
@@ -1747,7 +1753,7 @@ EXPORT_SYMBOL(queue_rcu_work);
|
|
* necessary.
|
|
*
|
|
* LOCKING:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void worker_enter_idle(struct worker *worker)
|
|
{
|
|
@@ -1787,7 +1793,7 @@ static void worker_enter_idle(struct worker *worker)
|
|
* @worker is leaving idle state. Update stats.
|
|
*
|
|
* LOCKING:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void worker_leave_idle(struct worker *worker)
|
|
{
|
|
@@ -1922,11 +1928,11 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|
worker_attach_to_pool(worker, pool);
|
|
|
|
/* start the newly created worker */
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
worker->pool->nr_workers++;
|
|
worker_enter_idle(worker);
|
|
wake_up_process(worker->task);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
return worker;
|
|
|
|
@@ -1945,7 +1951,7 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|
* be idle.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void destroy_worker(struct worker *worker)
|
|
{
|
|
@@ -1971,7 +1977,7 @@ static void idle_worker_timeout(struct timer_list *t)
|
|
{
|
|
struct worker_pool *pool = from_timer(pool, t, idle_timer);
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
while (too_many_workers(pool)) {
|
|
struct worker *worker;
|
|
@@ -1989,7 +1995,7 @@ static void idle_worker_timeout(struct timer_list *t)
|
|
destroy_worker(worker);
|
|
}
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
|
|
static void send_mayday(struct work_struct *work)
|
|
@@ -2020,8 +2026,8 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
|
|
struct work_struct *work;
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
- spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
|
|
|
if (need_to_create_worker(pool)) {
|
|
/*
|
|
@@ -2034,8 +2040,8 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|
send_mayday(work);
|
|
}
|
|
|
|
- spin_unlock(&wq_mayday_lock);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock(&wq_mayday_lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
|
}
|
|
@@ -2054,7 +2060,7 @@ static void pool_mayday_timeout(struct timer_list *t)
|
|
* may_start_working() %true.
|
|
*
|
|
* LOCKING:
|
|
- * spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
* multiple times. Does GFP_KERNEL allocations. Called only from
|
|
* manager.
|
|
*/
|
|
@@ -2063,7 +2069,7 @@ __releases(&pool->lock)
|
|
__acquires(&pool->lock)
|
|
{
|
|
restart:
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
|
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
|
|
@@ -2079,7 +2085,7 @@ __acquires(&pool->lock)
|
|
}
|
|
|
|
del_timer_sync(&pool->mayday_timer);
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
/*
|
|
* This is necessary even after a new worker was just successfully
|
|
* created as @pool->lock was dropped and the new worker might have
|
|
@@ -2102,7 +2108,7 @@ __acquires(&pool->lock)
|
|
* and may_start_working() is true.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
* multiple times. Does GFP_KERNEL allocations.
|
|
*
|
|
* Return:
|
|
@@ -2125,7 +2131,7 @@ static bool manage_workers(struct worker *worker)
|
|
|
|
pool->manager = NULL;
|
|
pool->flags &= ~POOL_MANAGER_ACTIVE;
|
|
- wake_up(&wq_manager_wait);
|
|
+ swake_up_one(&wq_manager_wait);
|
|
return true;
|
|
}
|
|
|
|
@@ -2141,7 +2147,7 @@ static bool manage_workers(struct worker *worker)
|
|
* call this function to process a work.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock) which is released and regrabbed.
|
|
+ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
|
|
*/
|
|
static void process_one_work(struct worker *worker, struct work_struct *work)
|
|
__releases(&pool->lock)
|
|
@@ -2224,7 +2230,7 @@ __acquires(&pool->lock)
|
|
*/
|
|
set_work_pool_and_clear_pending(work, pool->id);
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
lock_map_acquire(&lockdep_map);
|
|
@@ -2279,7 +2285,7 @@ __acquires(&pool->lock)
|
|
*/
|
|
cond_resched();
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
/* clear cpu intensive status */
|
|
if (unlikely(cpu_intensive))
|
|
@@ -2315,7 +2321,7 @@ __acquires(&pool->lock)
|
|
* fetches a work from the top and executes it.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
* multiple times.
|
|
*/
|
|
static void process_scheduled_works(struct worker *worker)
|
|
@@ -2357,11 +2363,11 @@ static int worker_thread(void *__worker)
|
|
/* tell the scheduler that this is a workqueue worker */
|
|
set_pf_worker(true);
|
|
woke_up:
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
/* am I supposed to die? */
|
|
if (unlikely(worker->flags & WORKER_DIE)) {
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
WARN_ON_ONCE(!list_empty(&worker->entry));
|
|
set_pf_worker(false);
|
|
|
|
@@ -2427,7 +2433,7 @@ static int worker_thread(void *__worker)
|
|
*/
|
|
worker_enter_idle(worker);
|
|
__set_current_state(TASK_IDLE);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
schedule();
|
|
goto woke_up;
|
|
}
|
|
@@ -2481,7 +2487,7 @@ static int rescuer_thread(void *__rescuer)
|
|
should_stop = kthread_should_stop();
|
|
|
|
/* see whether any pwq is asking for help */
|
|
- spin_lock_irq(&wq_mayday_lock);
|
|
+ raw_spin_lock_irq(&wq_mayday_lock);
|
|
|
|
while (!list_empty(&wq->maydays)) {
|
|
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
|
|
@@ -2493,11 +2499,11 @@ static int rescuer_thread(void *__rescuer)
|
|
__set_current_state(TASK_RUNNING);
|
|
list_del_init(&pwq->mayday_node);
|
|
|
|
- spin_unlock_irq(&wq_mayday_lock);
|
|
+ raw_spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
worker_attach_to_pool(rescuer, pool);
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
/*
|
|
* Slurp in all works issued via this workqueue and
|
|
@@ -2526,7 +2532,7 @@ static int rescuer_thread(void *__rescuer)
|
|
* incur MAYDAY_INTERVAL delay inbetween.
|
|
*/
|
|
if (need_to_create_worker(pool)) {
|
|
- spin_lock(&wq_mayday_lock);
|
|
+ raw_spin_lock(&wq_mayday_lock);
|
|
/*
|
|
* Queue iff we aren't racing destruction
|
|
* and somebody else hasn't queued it already.
|
|
@@ -2535,7 +2541,7 @@ static int rescuer_thread(void *__rescuer)
|
|
get_pwq(pwq);
|
|
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
|
}
|
|
- spin_unlock(&wq_mayday_lock);
|
|
+ raw_spin_unlock(&wq_mayday_lock);
|
|
}
|
|
}
|
|
|
|
@@ -2553,14 +2559,14 @@ static int rescuer_thread(void *__rescuer)
|
|
if (need_more_worker(pool))
|
|
wake_up_worker(pool);
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
worker_detach_from_pool(rescuer);
|
|
|
|
- spin_lock_irq(&wq_mayday_lock);
|
|
+ raw_spin_lock_irq(&wq_mayday_lock);
|
|
}
|
|
|
|
- spin_unlock_irq(&wq_mayday_lock);
|
|
+ raw_spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
if (should_stop) {
|
|
__set_current_state(TASK_RUNNING);
|
|
@@ -2640,7 +2646,7 @@ static void wq_barrier_func(struct work_struct *work)
|
|
* underneath us, so we can't reliably determine pwq from @target.
|
|
*
|
|
* CONTEXT:
|
|
- * spin_lock_irq(pool->lock).
|
|
+ * raw_spin_lock_irq(pool->lock).
|
|
*/
|
|
static void insert_wq_barrier(struct pool_workqueue *pwq,
|
|
struct wq_barrier *barr,
|
|
@@ -2735,7 +2741,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|
for_each_pwq(pwq, wq) {
|
|
struct worker_pool *pool = pwq->pool;
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
if (flush_color >= 0) {
|
|
WARN_ON_ONCE(pwq->flush_color != -1);
|
|
@@ -2752,7 +2758,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|
pwq->work_color = work_color;
|
|
}
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
|
|
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
|
|
@@ -2952,9 +2958,9 @@ void drain_workqueue(struct workqueue_struct *wq)
|
|
for_each_pwq(pwq, wq) {
|
|
bool drained;
|
|
|
|
- spin_lock_irq(&pwq->pool->lock);
|
|
+ raw_spin_lock_irq(&pwq->pool->lock);
|
|
drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
|
|
- spin_unlock_irq(&pwq->pool->lock);
|
|
+ raw_spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
if (drained)
|
|
continue;
|
|
@@ -3028,14 +3034,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
|
|
might_sleep();
|
|
|
|
- local_irq_disable();
|
|
+ rcu_read_lock();
|
|
pool = get_work_pool(work);
|
|
if (!pool) {
|
|
- local_irq_enable();
|
|
+ rcu_read_unlock();
|
|
return false;
|
|
}
|
|
|
|
- spin_lock(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
/* see the comment in try_to_grab_pending() with the same code */
|
|
pwq = get_work_pwq(work);
|
|
if (pwq) {
|
|
@@ -3059,7 +3065,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
check_flush_dependency(pwq->wq, work);
|
|
|
|
insert_wq_barrier(pwq, barr, work, worker);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
/*
|
|
* Force a lock recursion deadlock when using flush_work() inside a
|
|
@@ -3075,10 +3081,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
lock_map_release(&pwq->wq->lockdep_map);
|
|
}
|
|
-
|
|
+ rcu_read_unlock();
|
|
return true;
|
|
already_gone:
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
+ rcu_read_unlock();
|
|
return false;
|
|
}
|
|
|
|
@@ -3416,7 +3423,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
|
|
*
|
|
* Undo alloc_workqueue_attrs().
|
|
*/
|
|
-void free_workqueue_attrs(struct workqueue_attrs *attrs)
|
|
+static void free_workqueue_attrs(struct workqueue_attrs *attrs)
|
|
{
|
|
if (attrs) {
|
|
free_cpumask_var(attrs->cpumask);
|
|
@@ -3426,21 +3433,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
|
|
|
|
/**
|
|
* alloc_workqueue_attrs - allocate a workqueue_attrs
|
|
- * @gfp_mask: allocation mask to use
|
|
*
|
|
* Allocate a new workqueue_attrs, initialize with default settings and
|
|
* return it.
|
|
*
|
|
* Return: The allocated new workqueue_attr on success. %NULL on failure.
|
|
*/
|
|
-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
|
|
+static struct workqueue_attrs *alloc_workqueue_attrs(void)
|
|
{
|
|
struct workqueue_attrs *attrs;
|
|
|
|
- attrs = kzalloc(sizeof(*attrs), gfp_mask);
|
|
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
|
|
if (!attrs)
|
|
goto fail;
|
|
- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
|
|
+ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
|
|
goto fail;
|
|
|
|
cpumask_copy(attrs->cpumask, cpu_possible_mask);
|
|
@@ -3497,7 +3503,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
|
|
*/
|
|
static int init_worker_pool(struct worker_pool *pool)
|
|
{
|
|
- spin_lock_init(&pool->lock);
|
|
+ raw_spin_lock_init(&pool->lock);
|
|
pool->id = -1;
|
|
pool->cpu = -1;
|
|
pool->node = NUMA_NO_NODE;
|
|
@@ -3518,7 +3524,7 @@ static int init_worker_pool(struct worker_pool *pool)
|
|
pool->refcnt = 1;
|
|
|
|
/* shouldn't fail above this point */
|
|
- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
|
+ pool->attrs = alloc_workqueue_attrs();
|
|
if (!pool->attrs)
|
|
return -ENOMEM;
|
|
return 0;
|
|
@@ -3551,7 +3557,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
|
|
* put_unbound_pool - put a worker_pool
|
|
* @pool: worker_pool to put
|
|
*
|
|
- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
|
|
+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
|
|
* safe manner. get_unbound_pool() calls this function on its failure path
|
|
* and this function should be able to release pools which went through,
|
|
* successfully or not, init_worker_pool().
|
|
@@ -3583,15 +3589,15 @@ static void put_unbound_pool(struct worker_pool *pool)
|
|
* @pool's workers from blocking on attach_mutex. We're the last
|
|
* manager and @pool gets freed with the flag set.
|
|
*/
|
|
- spin_lock_irq(&pool->lock);
|
|
- wait_event_lock_irq(wq_manager_wait,
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
+ swait_event_lock_irq(wq_manager_wait,
|
|
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
|
|
pool->flags |= POOL_MANAGER_ACTIVE;
|
|
|
|
while ((worker = first_idle_worker(pool)))
|
|
destroy_worker(worker);
|
|
WARN_ON(pool->nr_workers || pool->nr_idle);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
if (!list_empty(&pool->workers))
|
|
@@ -3605,8 +3611,8 @@ static void put_unbound_pool(struct worker_pool *pool)
|
|
del_timer_sync(&pool->idle_timer);
|
|
del_timer_sync(&pool->mayday_timer);
|
|
|
|
- /* sched-RCU protected to allow dereferences from get_work_pool() */
|
|
- call_rcu_sched(&pool->rcu, rcu_free_pool);
|
|
+ /* RCU protected to allow dereferences from get_work_pool() */
|
|
+ call_rcu(&pool->rcu, rcu_free_pool);
|
|
}
|
|
|
|
/**
|
|
@@ -3719,14 +3725,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
|
put_unbound_pool(pool);
|
|
mutex_unlock(&wq_pool_mutex);
|
|
|
|
- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
|
|
+ call_rcu(&pwq->rcu, rcu_free_pwq);
|
|
|
|
/*
|
|
* If we're the last pwq going away, @wq is already dead and no one
|
|
* is gonna access it anymore. Schedule RCU free.
|
|
*/
|
|
if (is_last)
|
|
- call_rcu_sched(&wq->rcu, rcu_free_wq);
|
|
+ call_rcu(&wq->rcu, rcu_free_wq);
|
|
}
|
|
|
|
/**
|
|
@@ -3751,7 +3757,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
|
return;
|
|
|
|
/* this function can be called during early boot w/ irq disabled */
|
|
- spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
+ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
|
|
/*
|
|
* During [un]freezing, the caller is responsible for ensuring that
|
|
@@ -3781,7 +3787,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
|
pwq->max_active = 0;
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
}
|
|
|
|
/* initialize newly alloced @pwq which is associated with @wq and @pool */
|
|
@@ -3954,8 +3960,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
|
|
|
|
ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
|
|
|
|
- new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
|
- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
|
+ new_attrs = alloc_workqueue_attrs();
|
|
+ tmp_attrs = alloc_workqueue_attrs();
|
|
if (!ctx || !new_attrs || !tmp_attrs)
|
|
goto out_free;
|
|
|
|
@@ -4094,7 +4100,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
|
*
|
|
* Return: 0 on success and -errno on failure.
|
|
*/
|
|
-int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
+static int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
const struct workqueue_attrs *attrs)
|
|
{
|
|
int ret;
|
|
@@ -4105,7 +4111,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
|
return ret;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
|
|
|
|
/**
|
|
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
|
|
@@ -4183,9 +4188,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
|
|
|
|
use_dfl_pwq:
|
|
mutex_lock(&wq->mutex);
|
|
- spin_lock_irq(&wq->dfl_pwq->pool->lock);
|
|
+ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
|
|
get_pwq(wq->dfl_pwq);
|
|
- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
|
|
+ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
|
|
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
|
|
out_unlock:
|
|
mutex_unlock(&wq->mutex);
|
|
@@ -4306,7 +4311,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
|
return NULL;
|
|
|
|
if (flags & WQ_UNBOUND) {
|
|
- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
|
+ wq->unbound_attrs = alloc_workqueue_attrs();
|
|
if (!wq->unbound_attrs)
|
|
goto err_free_wq;
|
|
}
|
|
@@ -4393,9 +4398,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
struct worker *rescuer = wq->rescuer;
|
|
|
|
/* this prevents new queueing */
|
|
- spin_lock_irq(&wq_mayday_lock);
|
|
+ raw_spin_lock_irq(&wq_mayday_lock);
|
|
wq->rescuer = NULL;
|
|
- spin_unlock_irq(&wq_mayday_lock);
|
|
+ raw_spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
/* rescuer will empty maydays list before exiting */
|
|
kthread_stop(rescuer->task);
|
|
@@ -4438,7 +4443,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
* The base ref is never dropped on per-cpu pwqs. Directly
|
|
* schedule RCU free.
|
|
*/
|
|
- call_rcu_sched(&wq->rcu, rcu_free_wq);
|
|
+ call_rcu(&wq->rcu, rcu_free_wq);
|
|
} else {
|
|
/*
|
|
* We're the sole accessor of @wq at this point. Directly
|
|
@@ -4548,7 +4553,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
|
struct pool_workqueue *pwq;
|
|
bool ret;
|
|
|
|
- rcu_read_lock_sched();
|
|
+ rcu_read_lock();
|
|
+ preempt_disable();
|
|
|
|
if (cpu == WORK_CPU_UNBOUND)
|
|
cpu = smp_processor_id();
|
|
@@ -4559,7 +4565,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
|
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
|
|
|
|
ret = !list_empty(&pwq->inactive_works);
|
|
- rcu_read_unlock_sched();
|
|
+ preempt_enable();
|
|
+ rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
@@ -4585,15 +4592,15 @@ unsigned int work_busy(struct work_struct *work)
|
|
if (work_pending(work))
|
|
ret |= WORK_BUSY_PENDING;
|
|
|
|
- local_irq_save(flags);
|
|
+ rcu_read_lock();
|
|
pool = get_work_pool(work);
|
|
if (pool) {
|
|
- spin_lock(&pool->lock);
|
|
+ raw_spin_lock_irqsave(&pool->lock, flags);
|
|
if (find_worker_executing_work(pool, work))
|
|
ret |= WORK_BUSY_RUNNING;
|
|
- spin_unlock(&pool->lock);
|
|
+ raw_spin_unlock_irqrestore(&pool->lock, flags);
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
@@ -4778,7 +4785,7 @@ void show_workqueue_state(void)
|
|
unsigned long flags;
|
|
int pi;
|
|
|
|
- rcu_read_lock_sched();
|
|
+ rcu_read_lock();
|
|
|
|
pr_info("Showing busy workqueues and worker pools:\n");
|
|
|
|
@@ -4798,10 +4805,10 @@ void show_workqueue_state(void)
|
|
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
- spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
+ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
|
|
show_pwq(pwq);
|
|
- spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
|
/*
|
|
* We could be printing a lot from atomic context, e.g.
|
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
|
@@ -4815,7 +4822,7 @@ void show_workqueue_state(void)
|
|
struct worker *worker;
|
|
bool first = true;
|
|
|
|
- spin_lock_irqsave(&pool->lock, flags);
|
|
+ raw_spin_lock_irqsave(&pool->lock, flags);
|
|
if (pool->nr_workers == pool->nr_idle)
|
|
goto next_pool;
|
|
|
|
@@ -4834,7 +4841,7 @@ void show_workqueue_state(void)
|
|
}
|
|
pr_cont("\n");
|
|
next_pool:
|
|
- spin_unlock_irqrestore(&pool->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&pool->lock, flags);
|
|
/*
|
|
* We could be printing a lot from atomic context, e.g.
|
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
|
@@ -4843,7 +4850,7 @@ void show_workqueue_state(void)
|
|
touch_nmi_watchdog();
|
|
}
|
|
|
|
- rcu_read_unlock_sched();
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
/* used to show worker information through /proc/PID/{comm,stat,status} */
|
|
@@ -4864,7 +4871,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
if (pool) {
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
/*
|
|
* ->desc tracks information (wq name or
|
|
* set_worker_desc()) for the latest execution. If
|
|
@@ -4878,7 +4885,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
|
|
scnprintf(buf + off, size - off, "-%s",
|
|
worker->desc);
|
|
}
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
}
|
|
|
|
@@ -4909,7 +4916,7 @@ static void unbind_workers(int cpu)
|
|
|
|
for_each_cpu_worker_pool(pool, cpu) {
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
/*
|
|
* We've blocked all attach/detach operations. Make all workers
|
|
@@ -4923,7 +4930,7 @@ static void unbind_workers(int cpu)
|
|
|
|
pool->flags |= POOL_DISASSOCIATED;
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
/*
|
|
@@ -4949,9 +4956,9 @@ static void unbind_workers(int cpu)
|
|
* worker blocking could lead to lengthy stalls. Kick off
|
|
* unbound chain execution of currently pending work items.
|
|
*/
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
wake_up_worker(pool);
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
}
|
|
|
|
@@ -4978,7 +4985,7 @@ static void rebind_workers(struct worker_pool *pool)
|
|
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
|
|
pool->attrs->cpumask) < 0);
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
+ raw_spin_lock_irq(&pool->lock);
|
|
|
|
pool->flags &= ~POOL_DISASSOCIATED;
|
|
|
|
@@ -5017,7 +5024,7 @@ static void rebind_workers(struct worker_pool *pool)
|
|
WRITE_ONCE(worker->flags, worker_flags);
|
|
}
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
+ raw_spin_unlock_irq(&pool->lock);
|
|
}
|
|
|
|
/**
|
|
@@ -5230,16 +5237,16 @@ bool freeze_workqueues_busy(void)
|
|
* nr_active is monotonically decreasing. It's safe
|
|
* to peek without lock.
|
|
*/
|
|
- rcu_read_lock_sched();
|
|
+ rcu_read_lock();
|
|
for_each_pwq(pwq, wq) {
|
|
WARN_ON_ONCE(pwq->nr_active < 0);
|
|
if (pwq->nr_active) {
|
|
busy = true;
|
|
- rcu_read_unlock_sched();
|
|
+ rcu_read_unlock();
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
- rcu_read_unlock_sched();
|
|
+ rcu_read_unlock();
|
|
}
|
|
out_unlock:
|
|
mutex_unlock(&wq_pool_mutex);
|
|
@@ -5434,7 +5441,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
|
|
const char *delim = "";
|
|
int node, written = 0;
|
|
|
|
- rcu_read_lock_sched();
|
|
+ get_online_cpus();
|
|
+ rcu_read_lock();
|
|
for_each_node(node) {
|
|
written += scnprintf(buf + written, PAGE_SIZE - written,
|
|
"%s%d:%d", delim, node,
|
|
@@ -5442,7 +5450,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
|
|
delim = " ";
|
|
}
|
|
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
|
|
- rcu_read_unlock_sched();
|
|
+ rcu_read_unlock();
|
|
+ put_online_cpus();
|
|
|
|
return written;
|
|
}
|
|
@@ -5467,7 +5476,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
|
|
|
|
lockdep_assert_held(&wq_pool_mutex);
|
|
|
|
- attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
|
+ attrs = alloc_workqueue_attrs();
|
|
if (!attrs)
|
|
return NULL;
|
|
|
|
@@ -5896,7 +5905,7 @@ static void __init wq_numa_init(void)
|
|
return;
|
|
}
|
|
|
|
- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
|
|
+ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
|
|
BUG_ON(!wq_update_unbound_numa_attrs_buf);
|
|
|
|
/*
|
|
@@ -5971,7 +5980,7 @@ int __init workqueue_init_early(void)
|
|
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
|
|
struct workqueue_attrs *attrs;
|
|
|
|
- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
|
|
+ BUG_ON(!(attrs = alloc_workqueue_attrs()));
|
|
attrs->nice = std_nice[i];
|
|
unbound_std_wq_attrs[i] = attrs;
|
|
|
|
@@ -5980,7 +5989,7 @@ int __init workqueue_init_early(void)
|
|
* guaranteed by max_active which is enforced by pwqs.
|
|
* Turn off NUMA so that dfl_pwq is used for all nodes.
|
|
*/
|
|
- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
|
|
+ BUG_ON(!(attrs = alloc_workqueue_attrs()));
|
|
attrs->nice = std_nice[i];
|
|
attrs->no_numa = true;
|
|
ordered_wq_attrs[i] = attrs;
|
|
diff --git a/lib/Kconfig b/lib/Kconfig
|
|
index edb7d40d1..5bf23108d 100644
|
|
--- a/lib/Kconfig
|
|
+++ b/lib/Kconfig
|
|
@@ -441,6 +441,7 @@ config CHECK_SIGNATURE
|
|
|
|
config CPUMASK_OFFSTACK
|
|
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
|
|
+ depends on !PREEMPT_RT_FULL
|
|
help
|
|
Use dynamic allocation for cpumask_var_t, instead of putting
|
|
them on the stack. This is a bit more expensive, but avoids
|
|
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
|
|
index 5bf4ad9f9..7b9187caa 100644
|
|
--- a/lib/Kconfig.debug
|
|
+++ b/lib/Kconfig.debug
|
|
@@ -1245,7 +1245,7 @@ config DEBUG_ATOMIC_SLEEP
|
|
|
|
config DEBUG_LOCKING_API_SELFTESTS
|
|
bool "Locking API boot-time self-tests"
|
|
- depends on DEBUG_KERNEL
|
|
+ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL
|
|
help
|
|
Say Y here if you want the kernel to run a short self-test during
|
|
bootup. The self-test checks whether common types of locking bugs
|
|
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
|
|
index 14afeeb7d..e28481c40 100644
|
|
--- a/lib/debugobjects.c
|
|
+++ b/lib/debugobjects.c
|
|
@@ -376,7 +376,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
|
|
struct debug_obj *obj;
|
|
unsigned long flags;
|
|
|
|
- fill_pool();
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (preempt_count() == 0 && !irqs_disabled())
|
|
+#endif
|
|
+ fill_pool();
|
|
|
|
db = get_bucket((unsigned long) addr);
|
|
|
|
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
|
|
index 86a709954..9c069ef83 100644
|
|
--- a/lib/irq_poll.c
|
|
+++ b/lib/irq_poll.c
|
|
@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
|
|
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(irq_poll_sched);
|
|
|
|
@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
|
|
local_irq_save(flags);
|
|
__irq_poll_complete(iop);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(irq_poll_complete);
|
|
|
|
@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
|
|
}
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Even though interrupts have been re-enabled, this
|
|
* access is safe because interrupts can only add new
|
|
@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/**
|
|
@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
|
|
this_cpu_ptr(&blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
|
|
index 1e1bbf171..32db9532d 100644
|
|
--- a/lib/locking-selftest.c
|
|
+++ b/lib/locking-selftest.c
|
|
@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
|
|
|
|
@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Enabling hardirqs with a softirq-safe lock held:
|
|
*/
|
|
@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
|
|
#undef E1
|
|
#undef E2
|
|
|
|
+#endif
|
|
+
|
|
/*
|
|
* Enabling irqs with an irq-safe lock held:
|
|
*/
|
|
@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
|
|
|
|
@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
|
|
@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
|
|
|
|
@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
#undef E3
|
|
@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
|
|
|
|
@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
/*
|
|
* read-lock / write-lock irq inversion.
|
|
*
|
|
@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
/*
|
|
* read-lock / write-lock recursion that is actually safe.
|
|
*/
|
|
@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#endif
|
|
+
|
|
/*
|
|
* read-lock / write-lock recursion that is unsafe.
|
|
*/
|
|
@@ -2057,6 +2084,7 @@ void locking_selftest(void)
|
|
|
|
printk(" --------------------------------------------------------------------------\n");
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* irq-context testcases:
|
|
*/
|
|
@@ -2069,6 +2097,28 @@ void locking_selftest(void)
|
|
|
|
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
|
|
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
|
|
+#else
|
|
+ /* On -rt, we only do hardirq context test for raw spinlock */
|
|
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
|
|
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
|
|
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
|
|
+#endif
|
|
|
|
ww_tests();
|
|
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index e5cab5c4e..9309e813b 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -38,7 +38,7 @@
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
-
|
|
+#include <linux/locallock.h>
|
|
|
|
/* Number of nodes in fully populated tree of given height */
|
|
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
|
|
@@ -87,6 +87,7 @@ struct radix_tree_preload {
|
|
struct radix_tree_node *nodes;
|
|
};
|
|
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
|
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
|
|
|
|
static inline struct radix_tree_node *entry_to_node(void *ptr)
|
|
{
|
|
@@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
|
|
* succeed in getting a node here (and never reach
|
|
* kmem_cache_alloc)
|
|
*/
|
|
- rtp = this_cpu_ptr(&radix_tree_preloads);
|
|
+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
|
|
if (rtp->nr) {
|
|
ret = rtp->nodes;
|
|
rtp->nodes = ret->parent;
|
|
rtp->nr--;
|
|
}
|
|
+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
|
|
/*
|
|
* Update the allocation stack trace as this is more useful
|
|
* for debugging.
|
|
@@ -476,14 +478,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
|
|
*/
|
|
gfp_mask &= ~__GFP_ACCOUNT;
|
|
|
|
- preempt_disable();
|
|
+ local_lock(radix_tree_preloads_lock);
|
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
|
while (rtp->nr < nr) {
|
|
- preempt_enable();
|
|
+ local_unlock(radix_tree_preloads_lock);
|
|
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
|
if (node == NULL)
|
|
goto out;
|
|
- preempt_disable();
|
|
+ local_lock(radix_tree_preloads_lock);
|
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
|
if (rtp->nr < nr) {
|
|
node->parent = rtp->nodes;
|
|
@@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
|
|
if (gfpflags_allow_blocking(gfp_mask))
|
|
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
|
|
/* Preloading doesn't help anything with this gfp mask, skip it */
|
|
- preempt_disable();
|
|
+ local_lock(radix_tree_preloads_lock);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(radix_tree_maybe_preload);
|
|
@@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
|
|
|
|
/* Preloading doesn't help anything with this gfp mask, skip it */
|
|
if (!gfpflags_allow_blocking(gfp_mask)) {
|
|
- preempt_disable();
|
|
+ local_lock(radix_tree_preloads_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
|
|
return __radix_tree_preload(gfp_mask, nr_nodes);
|
|
}
|
|
|
|
+void radix_tree_preload_end(void)
|
|
+{
|
|
+ local_unlock(radix_tree_preloads_lock);
|
|
+}
|
|
+EXPORT_SYMBOL(radix_tree_preload_end);
|
|
+
|
|
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
|
|
struct radix_tree_node **nodep, unsigned long *maxindex)
|
|
{
|
|
@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
|
|
void idr_preload(gfp_t gfp_mask)
|
|
{
|
|
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
|
|
- preempt_disable();
|
|
+ local_lock(radix_tree_preloads_lock);
|
|
}
|
|
EXPORT_SYMBOL(idr_preload);
|
|
|
|
+void idr_preload_end(void)
|
|
+{
|
|
+ local_unlock(radix_tree_preloads_lock);
|
|
+}
|
|
+EXPORT_SYMBOL(idr_preload_end);
|
|
+
|
|
int ida_pre_get(struct ida *ida, gfp_t gfp)
|
|
{
|
|
/*
|
|
@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
|
|
* to return to the ida_pre_get() step.
|
|
*/
|
|
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
|
|
- preempt_enable();
|
|
+ local_unlock(radix_tree_preloads_lock);
|
|
|
|
if (!this_cpu_read(ida_bitmap)) {
|
|
struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
|
|
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
|
|
index cf18373f4..388b25a20 100644
|
|
--- a/lib/scatterlist.c
|
|
+++ b/lib/scatterlist.c
|
|
@@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
|
|
flush_kernel_dcache_page(miter->page);
|
|
|
|
if (miter->__flags & SG_MITER_ATOMIC) {
|
|
- WARN_ON_ONCE(preemptible());
|
|
+ WARN_ON_ONCE(!pagefault_disabled());
|
|
kunmap_atomic(miter->addr);
|
|
} else
|
|
kunmap(miter->page);
|
|
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
|
|
index 85925aaa4..2e7398534 100644
|
|
--- a/lib/smp_processor_id.c
|
|
+++ b/lib/smp_processor_id.c
|
|
@@ -22,7 +22,12 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
|
|
* Kernel threads bound to a single CPU can safely use
|
|
* smp_processor_id():
|
|
*/
|
|
- if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
|
|
+#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG))
|
|
+ if (current->migrate_disable)
|
|
+ goto out;
|
|
+#endif
|
|
+
|
|
+ if (current->nr_cpus_allowed == 1)
|
|
goto out;
|
|
|
|
/*
|
|
diff --git a/lib/ubsan.c b/lib/ubsan.c
|
|
index 1e9e2ab25..199c75e03 100644
|
|
--- a/lib/ubsan.c
|
|
+++ b/lib/ubsan.c
|
|
@@ -143,25 +143,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
|
|
}
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(report_lock);
|
|
-
|
|
-static void ubsan_prologue(struct source_location *location,
|
|
- unsigned long *flags)
|
|
+static void ubsan_prologue(struct source_location *location)
|
|
{
|
|
current->in_ubsan++;
|
|
- spin_lock_irqsave(&report_lock, *flags);
|
|
|
|
pr_err("========================================"
|
|
"========================================\n");
|
|
print_source_location("UBSAN: Undefined behaviour in", location);
|
|
}
|
|
|
|
-static void ubsan_epilogue(unsigned long *flags)
|
|
+static void ubsan_epilogue(void)
|
|
{
|
|
dump_stack();
|
|
pr_err("========================================"
|
|
"========================================\n");
|
|
- spin_unlock_irqrestore(&report_lock, *flags);
|
|
+
|
|
current->in_ubsan--;
|
|
}
|
|
|
|
@@ -170,14 +166,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
|
|
{
|
|
|
|
struct type_descriptor *type = data->type;
|
|
- unsigned long flags;
|
|
char lhs_val_str[VALUE_LENGTH];
|
|
char rhs_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
|
|
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
|
|
@@ -189,7 +184,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
|
|
rhs_val_str,
|
|
type->type_name);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
|
|
void __ubsan_handle_add_overflow(struct overflow_data *data,
|
|
@@ -217,20 +212,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
|
|
void __ubsan_handle_negate_overflow(struct overflow_data *data,
|
|
void *old_val)
|
|
{
|
|
- unsigned long flags;
|
|
char old_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
|
|
|
|
pr_err("negation of %s cannot be represented in type %s:\n",
|
|
old_val_str, data->type->type_name);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
|
|
|
|
@@ -238,13 +232,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
|
|
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
- unsigned long flags;
|
|
char rhs_val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
|
|
|
|
@@ -254,58 +247,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
|
|
else
|
|
pr_err("division by zero\n");
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
|
|
|
|
static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
|
|
{
|
|
- unsigned long flags;
|
|
-
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(data->location, &flags);
|
|
+ ubsan_prologue(data->location);
|
|
|
|
pr_err("%s null pointer of type %s\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
data->type->type_name);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
|
|
static void handle_misaligned_access(struct type_mismatch_data_common *data,
|
|
unsigned long ptr)
|
|
{
|
|
- unsigned long flags;
|
|
-
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(data->location, &flags);
|
|
+ ubsan_prologue(data->location);
|
|
|
|
pr_err("%s misaligned address %p for type %s\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
(void *)ptr, data->type->type_name);
|
|
pr_err("which requires %ld byte alignment\n", data->alignment);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
|
|
static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
|
|
unsigned long ptr)
|
|
{
|
|
- unsigned long flags;
|
|
-
|
|
if (suppress_report(data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(data->location, &flags);
|
|
+ ubsan_prologue(data->location);
|
|
pr_err("%s address %p with insufficient space\n",
|
|
type_check_kinds[data->type_check_kind],
|
|
(void *) ptr);
|
|
pr_err("for an object of type %s\n", data->type->type_name);
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
|
|
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
|
|
@@ -352,42 +339,39 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
|
|
void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
|
|
void *bound)
|
|
{
|
|
- unsigned long flags;
|
|
char bound_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(bound_str, sizeof(bound_str), data->type, bound);
|
|
pr_err("variable length array bound value %s <= 0\n", bound_str);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
|
|
|
|
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
|
|
{
|
|
- unsigned long flags;
|
|
char index_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(index_str, sizeof(index_str), data->index_type, index);
|
|
pr_err("index %s is out of range for type %s\n", index_str,
|
|
data->array_type->type_name);
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
|
|
|
|
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
|
|
void *lhs, void *rhs)
|
|
{
|
|
- unsigned long flags;
|
|
struct type_descriptor *rhs_type = data->rhs_type;
|
|
struct type_descriptor *lhs_type = data->lhs_type;
|
|
char rhs_str[VALUE_LENGTH];
|
|
@@ -396,7 +380,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
|
|
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
|
|
@@ -419,18 +403,16 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
|
|
lhs_str, rhs_str,
|
|
lhs_type->type_name);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
|
|
|
|
|
|
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
|
|
{
|
|
- unsigned long flags;
|
|
-
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
pr_err("calling __builtin_unreachable()\n");
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
panic("can't return from __builtin_unreachable()");
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
|
|
@@ -438,19 +420,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
|
|
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
|
|
void *val)
|
|
{
|
|
- unsigned long flags;
|
|
char val_str[VALUE_LENGTH];
|
|
|
|
if (suppress_report(&data->location))
|
|
return;
|
|
|
|
- ubsan_prologue(&data->location, &flags);
|
|
+ ubsan_prologue(&data->location);
|
|
|
|
val_to_string(val_str, sizeof(val_str), data->type, val);
|
|
|
|
pr_err("load of value %s is not a valid value for type %s\n",
|
|
val_str, data->type->type_name);
|
|
|
|
- ubsan_epilogue(&flags);
|
|
+ ubsan_epilogue();
|
|
}
|
|
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
|
|
diff --git a/localversion-rt b/localversion-rt
|
|
new file mode 100644
|
|
index 000000000..e0a0b11b0
|
|
--- /dev/null
|
|
+++ b/localversion-rt
|
|
@@ -0,0 +1 @@
|
|
+-rt103
|
|
diff --git a/mm/Kconfig b/mm/Kconfig
|
|
index e607d1576..cef749b67 100644
|
|
--- a/mm/Kconfig
|
|
+++ b/mm/Kconfig
|
|
@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
|
|
|
|
config TRANSPARENT_HUGEPAGE
|
|
bool "Transparent Hugepage Support"
|
|
- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
|
+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
|
|
select COMPACTION
|
|
select RADIX_TREE_MULTIORDER
|
|
help
|
|
diff --git a/mm/compaction.c b/mm/compaction.c
|
|
index 1d991e443..6d726b900 100644
|
|
--- a/mm/compaction.c
|
|
+++ b/mm/compaction.c
|
|
@@ -1672,10 +1672,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
|
|
block_start_pfn(cc->migrate_pfn, cc->order);
|
|
|
|
if (cc->last_migrated_pfn < current_block_start) {
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
+ local_lock_irq(swapvec_lock);
|
|
lru_add_drain_cpu(cpu);
|
|
+ local_unlock_irq(swapvec_lock);
|
|
drain_local_pages(zone);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
/* No more flushing until we migrate again */
|
|
cc->last_migrated_pfn = 0;
|
|
}
|
|
diff --git a/mm/highmem.c b/mm/highmem.c
|
|
index 59db3223a..22aa3ddbd 100644
|
|
--- a/mm/highmem.c
|
|
+++ b/mm/highmem.c
|
|
@@ -30,10 +30,11 @@
|
|
#include <linux/kgdb.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
-
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
|
|
DEFINE_PER_CPU(int, __kmap_atomic_idx);
|
|
#endif
|
|
+#endif
|
|
|
|
/*
|
|
* Virtual_count is not a pure "count".
|
|
@@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
|
|
unsigned long totalhigh_pages __read_mostly;
|
|
EXPORT_SYMBOL(totalhigh_pages);
|
|
|
|
-
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
|
|
+#endif
|
|
|
|
unsigned int nr_free_highpages (void)
|
|
{
|
|
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
|
|
index 3a8ddf8ba..b209dbaef 100644
|
|
--- a/mm/kasan/quarantine.c
|
|
+++ b/mm/kasan/quarantine.c
|
|
@@ -103,7 +103,7 @@ static int quarantine_head;
|
|
static int quarantine_tail;
|
|
/* Total size of all objects in global_quarantine across all batches. */
|
|
static unsigned long quarantine_size;
|
|
-static DEFINE_SPINLOCK(quarantine_lock);
|
|
+static DEFINE_RAW_SPINLOCK(quarantine_lock);
|
|
DEFINE_STATIC_SRCU(remove_cache_srcu);
|
|
|
|
/* Maximum size of the global queue. */
|
|
@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
|
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
|
|
qlist_move_all(q, &temp);
|
|
|
|
- spin_lock(&quarantine_lock);
|
|
+ raw_spin_lock(&quarantine_lock);
|
|
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
|
|
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
|
|
if (global_quarantine[quarantine_tail].bytes >=
|
|
@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
|
if (new_tail != quarantine_head)
|
|
quarantine_tail = new_tail;
|
|
}
|
|
- spin_unlock(&quarantine_lock);
|
|
+ raw_spin_unlock(&quarantine_lock);
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
@@ -230,7 +230,7 @@ void quarantine_reduce(void)
|
|
* expected case).
|
|
*/
|
|
srcu_idx = srcu_read_lock(&remove_cache_srcu);
|
|
- spin_lock_irqsave(&quarantine_lock, flags);
|
|
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
|
|
|
|
/*
|
|
* Update quarantine size in case of hotplug. Allocate a fraction of
|
|
@@ -254,7 +254,7 @@ void quarantine_reduce(void)
|
|
quarantine_head = 0;
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
|
|
qlist_free_all(&to_free, NULL);
|
|
srcu_read_unlock(&remove_cache_srcu, srcu_idx);
|
|
@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache)
|
|
*/
|
|
on_each_cpu(per_cpu_remove_cache, cache, 1);
|
|
|
|
- spin_lock_irqsave(&quarantine_lock, flags);
|
|
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
|
|
for (i = 0; i < QUARANTINE_BATCHES; i++) {
|
|
if (qlist_empty(&global_quarantine[i]))
|
|
continue;
|
|
qlist_move_cache(&global_quarantine[i], &to_free, cache);
|
|
/* Scanning whole quarantine can take a while. */
|
|
- spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
cond_resched();
|
|
- spin_lock_irqsave(&quarantine_lock, flags);
|
|
+ raw_spin_lock_irqsave(&quarantine_lock, flags);
|
|
}
|
|
- spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
|
|
qlist_free_all(&to_free, cache);
|
|
|
|
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
|
|
index f54734abf..e8a7eebc2 100644
|
|
--- a/mm/kmemleak.c
|
|
+++ b/mm/kmemleak.c
|
|
@@ -26,7 +26,7 @@
|
|
*
|
|
* The following locks and mutexes are used by kmemleak:
|
|
*
|
|
- * - kmemleak_lock (rwlock): protects the object_list modifications and
|
|
+ * - kmemleak_lock (raw spinlock): protects the object_list modifications and
|
|
* accesses to the object_tree_root. The object_list is the main list
|
|
* holding the metadata (struct kmemleak_object) for the allocated memory
|
|
* blocks. The object_tree_root is a red black tree used to look-up
|
|
@@ -147,7 +147,7 @@ struct kmemleak_scan_area {
|
|
* (use_count) and freed using the RCU mechanism.
|
|
*/
|
|
struct kmemleak_object {
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
unsigned int flags; /* object status flags */
|
|
struct list_head object_list;
|
|
struct list_head gray_list;
|
|
@@ -197,7 +197,7 @@ static LIST_HEAD(gray_list);
|
|
/* search tree for object boundaries */
|
|
static struct rb_root object_tree_root = RB_ROOT;
|
|
/* rw_lock protecting the access to object_list and object_tree_root */
|
|
-static DEFINE_RWLOCK(kmemleak_lock);
|
|
+static DEFINE_RAW_SPINLOCK(kmemleak_lock);
|
|
|
|
/* allocation caches for kmemleak internal data */
|
|
static struct kmem_cache *object_cache;
|
|
@@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
|
|
struct kmemleak_object *object;
|
|
|
|
rcu_read_lock();
|
|
- read_lock_irqsave(&kmemleak_lock, flags);
|
|
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
|
|
object = lookup_object(ptr, alias);
|
|
- read_unlock_irqrestore(&kmemleak_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
|
|
|
|
/* check whether the object is still available */
|
|
if (object && !get_object(object))
|
|
@@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
|
|
unsigned long flags;
|
|
struct kmemleak_object *object;
|
|
|
|
- write_lock_irqsave(&kmemleak_lock, flags);
|
|
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
|
|
object = lookup_object(ptr, alias);
|
|
if (object) {
|
|
rb_erase(&object->rb_node, &object_tree_root);
|
|
list_del_rcu(&object->object_list);
|
|
}
|
|
- write_unlock_irqrestore(&kmemleak_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
|
|
|
|
return object;
|
|
}
|
|
@@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
|
|
INIT_LIST_HEAD(&object->object_list);
|
|
INIT_LIST_HEAD(&object->gray_list);
|
|
INIT_HLIST_HEAD(&object->area_list);
|
|
- spin_lock_init(&object->lock);
|
|
+ raw_spin_lock_init(&object->lock);
|
|
atomic_set(&object->use_count, 1);
|
|
object->flags = OBJECT_ALLOCATED;
|
|
object->pointer = ptr;
|
|
@@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
|
|
/* kernel backtrace */
|
|
object->trace_len = __save_stack_trace(object->trace);
|
|
|
|
- write_lock_irqsave(&kmemleak_lock, flags);
|
|
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
|
|
|
|
min_addr = min(min_addr, ptr);
|
|
max_addr = max(max_addr, ptr + size);
|
|
@@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
|
|
|
|
list_add_tail_rcu(&object->object_list, &object_list);
|
|
out:
|
|
- write_unlock_irqrestore(&kmemleak_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
|
|
return object;
|
|
}
|
|
|
|
@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object)
|
|
* Locking here also ensures that the corresponding memory block
|
|
* cannot be freed when it is being scanned.
|
|
*/
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
object->flags &= ~OBJECT_ALLOCATED;
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
put_object(object);
|
|
}
|
|
|
|
@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
__paint_it(object, color);
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
|
|
static void paint_ptr(unsigned long ptr, int color)
|
|
@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
|
goto out;
|
|
}
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if (size == SIZE_MAX) {
|
|
size = object->pointer + object->size - ptr;
|
|
} else if (ptr + size > object->pointer + object->size) {
|
|
@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
|
|
|
hlist_add_head(&area->node, &object->area_list);
|
|
out_unlock:
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
out:
|
|
put_object(object);
|
|
}
|
|
@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
object->excess_ref = excess_ref;
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
put_object(object);
|
|
}
|
|
|
|
@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr)
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
object->flags |= OBJECT_NO_SCAN;
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
put_object(object);
|
|
}
|
|
|
|
@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log)
|
|
log->min_count, GFP_ATOMIC);
|
|
if (!object)
|
|
goto out;
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
for (i = 0; i < log->trace_len; i++)
|
|
object->trace[i] = log->trace[i];
|
|
object->trace_len = log->trace_len;
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
out:
|
|
rcu_read_unlock();
|
|
}
|
|
@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr)
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
object->trace_len = __save_stack_trace(object->trace);
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
|
|
put_object(object);
|
|
}
|
|
@@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end,
|
|
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
|
|
unsigned long flags;
|
|
|
|
- read_lock_irqsave(&kmemleak_lock, flags);
|
|
+ raw_spin_lock_irqsave(&kmemleak_lock, flags);
|
|
for (ptr = start; ptr < end; ptr++) {
|
|
struct kmemleak_object *object;
|
|
unsigned long pointer;
|
|
@@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end,
|
|
* previously acquired in scan_object(). These locks are
|
|
* enclosed by scan_mutex.
|
|
*/
|
|
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
/* only pass surplus references (object already gray) */
|
|
if (color_gray(object)) {
|
|
excess_ref = object->excess_ref;
|
|
@@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end,
|
|
excess_ref = 0;
|
|
update_refs(object);
|
|
}
|
|
- spin_unlock(&object->lock);
|
|
+ raw_spin_unlock(&object->lock);
|
|
|
|
if (excess_ref) {
|
|
object = lookup_object(excess_ref, 0);
|
|
@@ -1362,12 +1362,12 @@ static void scan_block(void *_start, void *_end,
|
|
if (object == scanned)
|
|
/* circular reference, ignore */
|
|
continue;
|
|
- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
update_refs(object);
|
|
- spin_unlock(&object->lock);
|
|
+ raw_spin_unlock(&object->lock);
|
|
}
|
|
}
|
|
- read_unlock_irqrestore(&kmemleak_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object)
|
|
* Once the object->lock is acquired, the corresponding memory block
|
|
* cannot be freed (the same lock is acquired in delete_object).
|
|
*/
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if (object->flags & OBJECT_NO_SCAN)
|
|
goto out;
|
|
if (!(object->flags & OBJECT_ALLOCATED))
|
|
@@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object)
|
|
if (start >= end)
|
|
break;
|
|
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
cond_resched();
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
} while (object->flags & OBJECT_ALLOCATED);
|
|
} else
|
|
hlist_for_each_entry(area, &object->area_list, node)
|
|
@@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object)
|
|
(void *)(area->start + area->size),
|
|
object);
|
|
out:
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -1482,7 +1482,7 @@ static void kmemleak_scan(void)
|
|
/* prepare the kmemleak_object's */
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
#ifdef DEBUG
|
|
/*
|
|
* With a few exceptions there should be a maximum of
|
|
@@ -1499,7 +1499,7 @@ static void kmemleak_scan(void)
|
|
if (color_gray(object) && get_object(object))
|
|
list_add_tail(&object->gray_list, &gray_list);
|
|
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
@@ -1564,14 +1564,14 @@ static void kmemleak_scan(void)
|
|
*/
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
|
|
&& update_checksum(object) && get_object(object)) {
|
|
/* color it gray temporarily */
|
|
object->count = object->min_count;
|
|
list_add_tail(&object->gray_list, &gray_list);
|
|
}
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
@@ -1591,13 +1591,13 @@ static void kmemleak_scan(void)
|
|
*/
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if (unreferenced_object(object) &&
|
|
!(object->flags & OBJECT_REPORTED)) {
|
|
object->flags |= OBJECT_REPORTED;
|
|
new_leaks++;
|
|
}
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
@@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
|
|
struct kmemleak_object *object = v;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
|
|
print_unreferenced(seq, object);
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
dump_object_info(object);
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
|
|
put_object(object);
|
|
return 0;
|
|
@@ -1803,11 +1803,11 @@ static void kmemleak_clear(void)
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
|
- spin_lock_irqsave(&object->lock, flags);
|
|
+ raw_spin_lock_irqsave(&object->lock, flags);
|
|
if ((object->flags & OBJECT_REPORTED) &&
|
|
unreferenced_object(object))
|
|
__paint_it(object, KMEMLEAK_GREY);
|
|
- spin_unlock_irqrestore(&object->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&object->lock, flags);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 29d430885..8dd6dabf8 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -71,6 +71,7 @@
|
|
#include <net/sock.h>
|
|
#include <net/ip.h>
|
|
#include "slab.h"
|
|
+#include <linux/locallock.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
@@ -96,6 +97,8 @@ int do_swap_account __read_mostly;
|
|
#define do_swap_account 0
|
|
#endif
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
|
|
+
|
|
/* Whether legacy memory+swap accounting is active */
|
|
static bool do_memsw_account(void)
|
|
{
|
|
@@ -2224,7 +2227,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
* as well as workers from this path always operate on the local
|
|
* per-cpu data. CPU up doesn't touch memcg_stock at all.
|
|
*/
|
|
- curcpu = get_cpu();
|
|
+ curcpu = get_cpu_light();
|
|
for_each_online_cpu(cpu) {
|
|
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
|
struct mem_cgroup *memcg;
|
|
@@ -2244,7 +2247,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
schedule_work_on(cpu, &stock->work);
|
|
}
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
}
|
|
|
|
@@ -5773,12 +5776,12 @@ static int mem_cgroup_move_account(struct page *page,
|
|
|
|
ret = 0;
|
|
|
|
- local_irq_disable();
|
|
+ local_lock_irq(event_lock);
|
|
mem_cgroup_charge_statistics(to, page, compound, nr_pages);
|
|
memcg_check_events(to, page);
|
|
mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
|
|
memcg_check_events(from, page);
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(event_lock);
|
|
out_unlock:
|
|
unlock_page(page);
|
|
out:
|
|
@@ -6925,10 +6928,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
|
|
|
commit_charge(page, memcg, lrucare);
|
|
|
|
- local_irq_disable();
|
|
+ local_lock_irq(event_lock);
|
|
mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
|
|
memcg_check_events(memcg, page);
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(event_lock);
|
|
|
|
if (do_memsw_account() && PageSwapCache(page)) {
|
|
swp_entry_t entry = { .val = page_private(page) };
|
|
@@ -6997,7 +7000,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
|
memcg_oom_recover(ug->memcg);
|
|
}
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
|
|
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
|
|
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
|
|
@@ -7005,7 +7008,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
|
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
|
|
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
|
|
memcg_check_events(ug->memcg, ug->dummy_page);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
|
|
if (!mem_cgroup_is_root(ug->memcg))
|
|
css_put_many(&ug->memcg->css, nr_pages);
|
|
@@ -7168,10 +7171,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
|
|
|
|
commit_charge(newpage, memcg, false);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
|
|
memcg_check_events(memcg, newpage);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
}
|
|
|
|
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
|
|
@@ -7357,6 +7360,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
|
struct mem_cgroup *memcg, *swap_memcg;
|
|
unsigned int nr_entries;
|
|
unsigned short oldid;
|
|
+ unsigned long flags;
|
|
|
|
VM_BUG_ON_PAGE(PageLRU(page), page);
|
|
VM_BUG_ON_PAGE(page_count(page), page);
|
|
@@ -7402,10 +7406,14 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
|
* important here to have the interrupts disabled because it is the
|
|
* only synchronisation we have for updating the per-CPU variables.
|
|
*/
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
VM_BUG_ON(!irqs_disabled());
|
|
+#endif
|
|
mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
|
|
-nr_entries);
|
|
memcg_check_events(memcg, page);
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
|
|
if (!mem_cgroup_is_root(memcg))
|
|
css_put_many(&memcg->css, nr_entries);
|
|
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
|
|
index 3e612ae74..d0ccc0709 100644
|
|
--- a/mm/mmu_context.c
|
|
+++ b/mm/mmu_context.c
|
|
@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm)
|
|
struct task_struct *tsk = current;
|
|
|
|
task_lock(tsk);
|
|
+ preempt_disable_rt();
|
|
active_mm = tsk->active_mm;
|
|
if (active_mm != mm) {
|
|
mmgrab(mm);
|
|
@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm)
|
|
}
|
|
tsk->mm = mm;
|
|
switch_mm(active_mm, mm, tsk);
|
|
+ preempt_enable_rt();
|
|
task_unlock(tsk);
|
|
#ifdef finish_arch_post_lock_switch
|
|
finish_arch_post_lock_switch();
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index fc8be4b00..84f01e7fb 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -60,6 +60,7 @@
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/sched/mm.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/page_owner.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/memcontrol.h>
|
|
@@ -333,6 +334,18 @@ EXPORT_SYMBOL(nr_node_ids);
|
|
EXPORT_SYMBOL(nr_online_nodes);
|
|
#endif
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define cpu_lock_irqsave(cpu, flags) \
|
|
+ local_lock_irqsave_on(pa_lock, flags, cpu)
|
|
+# define cpu_unlock_irqrestore(cpu, flags) \
|
|
+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
|
|
+#else
|
|
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
|
|
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
|
|
+#endif
|
|
+
|
|
int page_group_by_mobility_disabled __read_mostly;
|
|
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
@@ -1153,7 +1166,7 @@ static inline void prefetch_buddy(struct page *page)
|
|
}
|
|
|
|
/*
|
|
- * Frees a number of pages from the PCP lists
|
|
+ * Frees a number of pages which have been collected from the pcp lists.
|
|
* Assumes all pages on list are in same zone, and of same order.
|
|
* count is the number of pages to free.
|
|
*
|
|
@@ -1163,15 +1176,57 @@ static inline void prefetch_buddy(struct page *page)
|
|
* And clear the zone's pages_scanned counter, to hold off the "all pages are
|
|
* pinned" detection logic.
|
|
*/
|
|
-static void free_pcppages_bulk(struct zone *zone, int count,
|
|
- struct per_cpu_pages *pcp)
|
|
+static void free_pcppages_bulk(struct zone *zone, struct list_head *head,
|
|
+ bool zone_retry)
|
|
+{
|
|
+ bool isolated_pageblocks;
|
|
+ struct page *page, *tmp;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&zone->lock, flags);
|
|
+ isolated_pageblocks = has_isolate_pageblock(zone);
|
|
+
|
|
+ /*
|
|
+ * Use safe version since after __free_one_page(),
|
|
+ * page->lru.next will not point to original list.
|
|
+ */
|
|
+ list_for_each_entry_safe(page, tmp, head, lru) {
|
|
+ int mt = get_pcppage_migratetype(page);
|
|
+
|
|
+ if (page_zone(page) != zone) {
|
|
+ /*
|
|
+ * free_unref_page_list() sorts pages by zone. If we end
|
|
+ * up with pages from a different NUMA nodes belonging
|
|
+ * to the same ZONE index then we need to redo with the
|
|
+ * correct ZONE pointer. Skip the page for now, redo it
|
|
+ * on the next iteration.
|
|
+ */
|
|
+ WARN_ON_ONCE(zone_retry == false);
|
|
+ if (zone_retry)
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* MIGRATE_ISOLATE page should not go to pcplists */
|
|
+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
|
|
+ /* Pageblock could have been isolated meanwhile */
|
|
+ if (unlikely(isolated_pageblocks))
|
|
+ mt = get_pageblock_migratetype(page);
|
|
+
|
|
+ list_del(&page->lru);
|
|
+ __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
|
|
+ trace_mm_page_pcpu_drain(page, 0, mt);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&zone->lock, flags);
|
|
+}
|
|
+
|
|
+static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp,
|
|
+ struct list_head *dst)
|
|
+
|
|
{
|
|
int migratetype = 0;
|
|
int batch_free = 0;
|
|
int prefetch_nr = 0;
|
|
- bool isolated_pageblocks;
|
|
- struct page *page, *tmp;
|
|
- LIST_HEAD(head);
|
|
+ struct page *page;
|
|
|
|
/*
|
|
* Ensure proper count is passed which otherwise would stuck in the
|
|
@@ -1208,7 +1263,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
if (bulkfree_pcp_prepare(page))
|
|
continue;
|
|
|
|
- list_add_tail(&page->lru, &head);
|
|
+ list_add_tail(&page->lru, dst);
|
|
|
|
/*
|
|
* We are going to put the page back to the global
|
|
@@ -1223,26 +1278,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
prefetch_buddy(page);
|
|
} while (--count && --batch_free && !list_empty(list));
|
|
}
|
|
-
|
|
- spin_lock(&zone->lock);
|
|
- isolated_pageblocks = has_isolate_pageblock(zone);
|
|
-
|
|
- /*
|
|
- * Use safe version since after __free_one_page(),
|
|
- * page->lru.next will not point to original list.
|
|
- */
|
|
- list_for_each_entry_safe(page, tmp, &head, lru) {
|
|
- int mt = get_pcppage_migratetype(page);
|
|
- /* MIGRATE_ISOLATE page should not go to pcplists */
|
|
- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
|
|
- /* Pageblock could have been isolated meanwhile */
|
|
- if (unlikely(isolated_pageblocks))
|
|
- mt = get_pageblock_migratetype(page);
|
|
-
|
|
- __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
|
|
- trace_mm_page_pcpu_drain(page, 0, mt);
|
|
- }
|
|
- spin_unlock(&zone->lock);
|
|
}
|
|
|
|
static void free_one_page(struct zone *zone,
|
|
@@ -1338,11 +1373,11 @@ static void __free_pages_ok(struct page *page, unsigned int order,
|
|
return;
|
|
|
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
__count_vm_events(PGFREE, 1 << order);
|
|
free_one_page(page_zone(page), page, pfn, order, migratetype,
|
|
fpi_flags);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
}
|
|
|
|
void __free_pages_core(struct page *page, unsigned int order)
|
|
@@ -2689,13 +2724,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
|
|
{
|
|
unsigned long flags;
|
|
int to_drain, batch;
|
|
+ LIST_HEAD(dst);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
batch = READ_ONCE(pcp->batch);
|
|
to_drain = min(pcp->count, batch);
|
|
if (to_drain > 0)
|
|
- free_pcppages_bulk(zone, to_drain, pcp);
|
|
- local_irq_restore(flags);
|
|
+ isolate_pcp_pages(to_drain, pcp, &dst);
|
|
+
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
+
|
|
+ if (to_drain > 0)
|
|
+ free_pcppages_bulk(zone, &dst, false);
|
|
}
|
|
#endif
|
|
|
|
@@ -2711,14 +2751,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
|
|
unsigned long flags;
|
|
struct per_cpu_pageset *pset;
|
|
struct per_cpu_pages *pcp;
|
|
+ LIST_HEAD(dst);
|
|
+ int count;
|
|
|
|
- local_irq_save(flags);
|
|
+ cpu_lock_irqsave(cpu, flags);
|
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
|
|
|
pcp = &pset->pcp;
|
|
- if (pcp->count)
|
|
- free_pcppages_bulk(zone, pcp->count, pcp);
|
|
- local_irq_restore(flags);
|
|
+ count = pcp->count;
|
|
+ if (count)
|
|
+ isolate_pcp_pages(count, pcp, &dst);
|
|
+
|
|
+ cpu_unlock_irqrestore(cpu, flags);
|
|
+
|
|
+ if (count)
|
|
+ free_pcppages_bulk(zone, &dst, false);
|
|
}
|
|
|
|
/*
|
|
@@ -2753,6 +2800,7 @@ void drain_local_pages(struct zone *zone)
|
|
drain_pages(cpu);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
static void drain_local_pages_wq(struct work_struct *work)
|
|
{
|
|
/*
|
|
@@ -2766,6 +2814,7 @@ static void drain_local_pages_wq(struct work_struct *work)
|
|
drain_local_pages(NULL);
|
|
preempt_enable();
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
|
|
@@ -2832,7 +2881,14 @@ void drain_all_pages(struct zone *zone)
|
|
else
|
|
cpumask_clear_cpu(cpu, &cpus_with_pcps);
|
|
}
|
|
-
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ for_each_cpu(cpu, &cpus_with_pcps) {
|
|
+ if (zone)
|
|
+ drain_pages_zone(cpu, zone);
|
|
+ else
|
|
+ drain_pages(cpu);
|
|
+ }
|
|
+#else
|
|
for_each_cpu(cpu, &cpus_with_pcps) {
|
|
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
|
|
INIT_WORK(work, drain_local_pages_wq);
|
|
@@ -2840,6 +2896,7 @@ void drain_all_pages(struct zone *zone)
|
|
}
|
|
for_each_cpu(cpu, &cpus_with_pcps)
|
|
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
|
|
+#endif
|
|
|
|
mutex_unlock(&pcpu_drain_mutex);
|
|
}
|
|
@@ -2911,7 +2968,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
|
|
return true;
|
|
}
|
|
|
|
-static void free_unref_page_commit(struct page *page, unsigned long pfn)
|
|
+static void free_unref_page_commit(struct page *page, unsigned long pfn,
|
|
+ struct list_head *dst)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
struct per_cpu_pages *pcp;
|
|
@@ -2941,7 +2999,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
|
|
pcp->count++;
|
|
if (pcp->count >= pcp->high) {
|
|
unsigned long batch = READ_ONCE(pcp->batch);
|
|
- free_pcppages_bulk(zone, batch, pcp);
|
|
+
|
|
+ isolate_pcp_pages(batch, pcp, dst);
|
|
}
|
|
}
|
|
|
|
@@ -2952,7 +3011,8 @@ void free_unref_page(struct page *page)
|
|
{
|
|
unsigned long flags;
|
|
unsigned long pfn = page_to_pfn(page);
|
|
-
|
|
+ struct zone *zone = page_zone(page);
|
|
+ LIST_HEAD(dst);
|
|
/* Free dynamic hugetlb page */
|
|
if (dhugetlb_enabled && PagePool(page)) {
|
|
free_page_to_dhugetlb_pool(page);
|
|
@@ -2962,9 +3022,11 @@ void free_unref_page(struct page *page)
|
|
if (!free_unref_page_prepare(page, pfn))
|
|
return;
|
|
|
|
- local_irq_save(flags);
|
|
- free_unref_page_commit(page, pfn);
|
|
- local_irq_restore(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
+ free_unref_page_commit(page, pfn, &dst);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
+ if (!list_empty(&dst))
|
|
+ free_pcppages_bulk(zone, &dst, false);
|
|
}
|
|
|
|
/*
|
|
@@ -2975,7 +3037,11 @@ void free_unref_page_list(struct list_head *list)
|
|
struct page *page, *next;
|
|
unsigned long flags, pfn;
|
|
int batch_count = 0;
|
|
+ struct list_head dsts[__MAX_NR_ZONES];
|
|
+ int i;
|
|
|
|
+ for (i = 0; i < __MAX_NR_ZONES; i++)
|
|
+ INIT_LIST_HEAD(&dsts[i]);
|
|
/* Free dynamic hugetlb pages */
|
|
if (dhugetlb_enabled) {
|
|
list_for_each_entry_safe(page, next, list, lru) {
|
|
@@ -2994,25 +3060,42 @@ void free_unref_page_list(struct list_head *list)
|
|
set_page_private(page, pfn);
|
|
}
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
list_for_each_entry_safe(page, next, list, lru) {
|
|
unsigned long pfn = page_private(page);
|
|
+ enum zone_type type;
|
|
|
|
set_page_private(page, 0);
|
|
trace_mm_page_free_batched(page);
|
|
- free_unref_page_commit(page, pfn);
|
|
+ type = page_zonenum(page);
|
|
+ free_unref_page_commit(page, pfn, &dsts[type]);
|
|
|
|
/*
|
|
* Guard against excessive IRQ disabled times when we get
|
|
* a large list of pages to free.
|
|
*/
|
|
if (++batch_count == SWAP_CLUSTER_MAX) {
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
batch_count = 0;
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
}
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
+
|
|
+ for (i = 0; i < __MAX_NR_ZONES; ) {
|
|
+ struct page *page;
|
|
+ struct zone *zone;
|
|
+
|
|
+ if (list_empty(&dsts[i])) {
|
|
+ i++;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ page = list_first_entry(&dsts[i], struct page, lru);
|
|
+ zone = page_zone(page);
|
|
+
|
|
+ free_pcppages_bulk(zone, &dsts[i], true);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -3166,7 +3249,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
|
struct page *page;
|
|
unsigned long flags;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
|
list = &pcp->lists[migratetype];
|
|
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
|
|
@@ -3174,7 +3257,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
|
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
|
zone_statistics(preferred_zone, zone);
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
return page;
|
|
}
|
|
|
|
@@ -3201,7 +3284,7 @@ struct page *rmqueue(struct zone *preferred_zone,
|
|
* allocate greater than order-1 page units with __GFP_NOFAIL.
|
|
*/
|
|
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
|
|
- spin_lock_irqsave(&zone->lock, flags);
|
|
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
|
|
|
|
do {
|
|
page = NULL;
|
|
@@ -3221,14 +3304,14 @@ struct page *rmqueue(struct zone *preferred_zone,
|
|
|
|
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
|
zone_statistics(preferred_zone, zone);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
|
|
out:
|
|
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
|
|
return page;
|
|
|
|
failed:
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -7666,8 +7749,9 @@ void __init free_area_init(unsigned long *zones_size)
|
|
|
|
static int page_alloc_cpu_dead(unsigned int cpu)
|
|
{
|
|
-
|
|
+ local_lock_irq_on(swapvec_lock, cpu);
|
|
lru_add_drain_cpu(cpu);
|
|
+ local_unlock_irq_on(swapvec_lock, cpu);
|
|
drain_pages(cpu);
|
|
|
|
/*
|
|
@@ -8585,7 +8669,7 @@ void zone_pcp_reset(struct zone *zone)
|
|
struct per_cpu_pageset *pset;
|
|
|
|
/* avoid races with drain_pages() */
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
if (zone->pageset != &boot_pageset) {
|
|
for_each_online_cpu(cpu) {
|
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
|
@@ -8594,7 +8678,7 @@ void zone_pcp_reset(struct zone *zone)
|
|
free_percpu(zone->pageset);
|
|
zone->pageset = &boot_pageset;
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 0dcce6822..f5398a95b 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
|
parent->shared = NULL;
|
|
parent->alien = NULL;
|
|
parent->colour_next = 0;
|
|
- spin_lock_init(&parent->list_lock);
|
|
+ raw_spin_lock_init(&parent->list_lock);
|
|
parent->free_objects = 0;
|
|
parent->free_touched = 0;
|
|
}
|
|
@@ -600,9 +600,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
|
|
page_node = page_to_nid(page);
|
|
n = get_node(cachep, page_node);
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, &objp, 1, page_node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -741,7 +741,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
|
struct kmem_cache_node *n = get_node(cachep, node);
|
|
|
|
if (ac->avail) {
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
/*
|
|
* Stuff objects into the remote nodes shared array first.
|
|
* That way we could avoid the overhead of putting the objects
|
|
@@ -752,7 +752,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
|
|
|
free_block(cachep, ac->entry, ac->avail, node, list);
|
|
ac->avail = 0;
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -825,9 +825,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
|
|
slabs_destroy(cachep, &list);
|
|
} else {
|
|
n = get_node(cachep, page_node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, &objp, 1, page_node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
return 1;
|
|
@@ -868,10 +868,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
|
|
*/
|
|
n = get_node(cachep, node);
|
|
if (n) {
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
|
|
cachep->num;
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -950,7 +950,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|
goto fail;
|
|
|
|
n = get_node(cachep, node);
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
if (n->shared && force_change) {
|
|
free_block(cachep, n->shared->entry,
|
|
n->shared->avail, node, &list);
|
|
@@ -968,7 +968,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|
new_alien = NULL;
|
|
}
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
|
|
/*
|
|
@@ -1007,7 +1007,7 @@ static void cpuup_canceled(long cpu)
|
|
if (!n)
|
|
continue;
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
/* Free limit for this kmem_cache_node */
|
|
n->free_limit -= cachep->batchcount;
|
|
@@ -1020,7 +1020,7 @@ static void cpuup_canceled(long cpu)
|
|
}
|
|
|
|
if (!cpumask_empty(mask)) {
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
goto free_slab;
|
|
}
|
|
|
|
@@ -1034,7 +1034,7 @@ static void cpuup_canceled(long cpu)
|
|
alien = n->alien;
|
|
n->alien = NULL;
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
kfree(shared);
|
|
if (alien) {
|
|
@@ -1218,7 +1218,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
|
|
/*
|
|
* Do not assume that spinlocks can be initialized via memcpy:
|
|
*/
|
|
- spin_lock_init(&ptr->list_lock);
|
|
+ raw_spin_lock_init(&ptr->list_lock);
|
|
|
|
MAKE_ALL_LISTS(cachep, ptr, nodeid);
|
|
cachep->node[nodeid] = ptr;
|
|
@@ -1389,11 +1389,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
unsigned long total_slabs, free_slabs, free_objs;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
total_slabs = n->total_slabs;
|
|
free_slabs = n->free_slabs;
|
|
free_objs = n->free_objects;
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
|
|
node, total_slabs - free_slabs, total_slabs,
|
|
@@ -2188,7 +2188,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
check_irq_off();
|
|
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
|
|
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -2196,7 +2196,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
check_irq_off();
|
|
- assert_spin_locked(&get_node(cachep, node)->list_lock);
|
|
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -2236,9 +2236,9 @@ static void do_drain(void *arg)
|
|
check_irq_off();
|
|
ac = cpu_cache_get(cachep);
|
|
n = get_node(cachep, node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, ac->entry, ac->avail, node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
ac->avail = 0;
|
|
}
|
|
@@ -2256,9 +2256,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
|
|
drain_alien_cache(cachep, n->alien);
|
|
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
drain_array_locked(cachep, n->shared, node, true, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -2280,10 +2280,10 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
nr_freed = 0;
|
|
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
p = n->slabs_free.prev;
|
|
if (p == &n->slabs_free) {
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
goto out;
|
|
}
|
|
|
|
@@ -2296,7 +2296,7 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
* to the cache.
|
|
*/
|
|
n->free_objects -= cache->num;
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slab_destroy(cache, page);
|
|
nr_freed++;
|
|
}
|
|
@@ -2744,7 +2744,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
|
|
INIT_LIST_HEAD(&page->lru);
|
|
n = get_node(cachep, page_to_nid(page));
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
n->total_slabs++;
|
|
if (!page->active) {
|
|
list_add_tail(&page->lru, &(n->slabs_free));
|
|
@@ -2754,7 +2754,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
|
|
|
|
STATS_INC_GROWN(cachep);
|
|
n->free_objects += cachep->num - page->active;
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
}
|
|
@@ -2922,7 +2922,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
|
|
{
|
|
struct page *page;
|
|
|
|
- assert_spin_locked(&n->list_lock);
|
|
+ assert_raw_spin_locked(&n->list_lock);
|
|
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
|
|
if (!page) {
|
|
n->free_touched = 1;
|
|
@@ -2948,10 +2948,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
|
|
if (!gfp_pfmemalloc_allowed(flags))
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
page = get_first_slab(n, true);
|
|
if (!page) {
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -2960,7 +2960,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
|
|
|
|
fixup_slab_list(cachep, n, page, &list);
|
|
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
|
return obj;
|
|
@@ -3019,7 +3019,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
if (!n->free_objects && (!shared || !shared->avail))
|
|
goto direct_grow;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
shared = READ_ONCE(n->shared);
|
|
|
|
/* See if we can refill from the shared array */
|
|
@@ -3043,7 +3043,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
must_grow:
|
|
n->free_objects -= ac->avail;
|
|
alloc_done:
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
|
direct_grow:
|
|
@@ -3268,7 +3268,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
BUG_ON(!n);
|
|
|
|
check_irq_off();
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
page = get_first_slab(n, false);
|
|
if (!page)
|
|
goto must_grow;
|
|
@@ -3286,12 +3286,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
|
|
fixup_slab_list(cachep, n, page, &list);
|
|
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
return obj;
|
|
|
|
must_grow:
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
|
|
if (page) {
|
|
/* This slab isn't counted yet so don't update free_objects */
|
|
@@ -3467,7 +3467,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|
|
|
check_irq_off();
|
|
n = get_node(cachep, node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
if (n->shared) {
|
|
struct array_cache *shared_array = n->shared;
|
|
int max = shared_array->limit - shared_array->avail;
|
|
@@ -3496,7 +3496,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|
STATS_SET_FREEABLE(cachep, i);
|
|
}
|
|
#endif
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
ac->avail -= batchcount;
|
|
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
|
|
@@ -3904,9 +3904,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
|
|
node = cpu_to_mem(cpu);
|
|
n = get_node(cachep, node);
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
free_block(cachep, ac->entry, ac->avail, node, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
free_percpu(prev);
|
|
@@ -4031,9 +4031,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
drain_array_locked(cachep, ac, node, false, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -4117,7 +4117,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
|
|
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
check_irq_on();
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
total_slabs += n->total_slabs;
|
|
free_slabs += n->free_slabs;
|
|
@@ -4126,7 +4126,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
|
|
if (n->shared)
|
|
shared_avail += n->shared->avail;
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
}
|
|
num_objs = total_slabs * cachep->num;
|
|
active_slabs = total_slabs - free_slabs;
|
|
@@ -4346,13 +4346,13 @@ static int leaks_show(struct seq_file *m, void *p)
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
|
|
check_irq_on();
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
list_for_each_entry(page, &n->slabs_full, lru)
|
|
handle_slab(x, cachep, page);
|
|
list_for_each_entry(page, &n->slabs_partial, lru)
|
|
handle_slab(x, cachep, page);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
}
|
|
} while (!is_store_user_clean(cachep));
|
|
|
|
diff --git a/mm/slab.h b/mm/slab.h
|
|
index c683b07ff..e45fc3a81 100644
|
|
--- a/mm/slab.h
|
|
+++ b/mm/slab.h
|
|
@@ -450,7 +450,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
* The slab lists for all objects.
|
|
*/
|
|
struct kmem_cache_node {
|
|
- spinlock_t list_lock;
|
|
+ raw_spinlock_t list_lock;
|
|
|
|
#ifdef CONFIG_SLAB
|
|
struct list_head slabs_partial; /* partial list first, better asm code */
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index 150362a66..bed971223 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -1188,7 +1188,7 @@ static noinline int free_debug_processing(
|
|
unsigned long uninitialized_var(flags);
|
|
int ret = 0;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
slab_lock(page);
|
|
|
|
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
|
|
@@ -1223,7 +1223,7 @@ static noinline int free_debug_processing(
|
|
bulk_cnt, cnt);
|
|
|
|
slab_unlock(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
if (!ret)
|
|
slab_fix(s, "Object at 0x%p not freed", object);
|
|
return ret;
|
|
@@ -1356,6 +1356,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
|
|
}
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
+struct slub_free_list {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
|
|
+
|
|
/*
|
|
* Hooks for other subsystems that check memory allocations. In a typical
|
|
* production configuration these hooks all should produce no code at all.
|
|
@@ -1597,10 +1603,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
void *start, *p;
|
|
int idx, order;
|
|
bool shuffle;
|
|
+ bool enableirqs = false;
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
if (gfpflags_allow_blocking(flags))
|
|
+ enableirqs = true;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (system_state > SYSTEM_BOOTING)
|
|
+ enableirqs = true;
|
|
+#endif
|
|
+ if (enableirqs)
|
|
local_irq_enable();
|
|
|
|
flags |= s->allocflags;
|
|
@@ -1659,7 +1672,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
page->frozen = 1;
|
|
|
|
out:
|
|
- if (gfpflags_allow_blocking(flags))
|
|
+ if (enableirqs)
|
|
local_irq_disable();
|
|
if (!page)
|
|
return NULL;
|
|
@@ -1717,6 +1730,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
__free_pages(page, order);
|
|
}
|
|
|
|
+static void free_delayed(struct list_head *h)
|
|
+{
|
|
+ while (!list_empty(h)) {
|
|
+ struct page *page = list_first_entry(h, struct page, lru);
|
|
+
|
|
+ list_del(&page->lru);
|
|
+ __free_slab(page->slab_cache, page);
|
|
+ }
|
|
+}
|
|
+
|
|
static void rcu_free_slab(struct rcu_head *h)
|
|
{
|
|
struct page *page = container_of(h, struct page, rcu_head);
|
|
@@ -1728,6 +1751,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
|
|
{
|
|
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
|
call_rcu(&page->rcu_head, rcu_free_slab);
|
|
+ } else if (irqs_disabled()) {
|
|
+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
|
|
+
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_add(&page->lru, &f->list);
|
|
+ raw_spin_unlock(&f->lock);
|
|
} else
|
|
__free_slab(s, page);
|
|
}
|
|
@@ -1835,7 +1864,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
if (!n || !n->nr_partial)
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
void *t;
|
|
|
|
@@ -1860,7 +1889,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
break;
|
|
|
|
}
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return object;
|
|
}
|
|
|
|
@@ -2112,7 +2141,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
* that acquire_slab() will see a slab page that
|
|
* is frozen
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
} else {
|
|
m = M_FULL;
|
|
@@ -2123,7 +2152,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
* slabs from diagnostic functions will not see
|
|
* any frozen slabs.
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -2158,7 +2187,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
goto redo;
|
|
|
|
if (lock)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
if (m == M_FREE) {
|
|
stat(s, DEACTIVATE_EMPTY);
|
|
@@ -2194,10 +2223,10 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
n2 = get_node(s, page_to_nid(page));
|
|
if (n != n2) {
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
n = n2;
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
|
|
do {
|
|
@@ -2226,7 +2255,7 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
}
|
|
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
while (discard_page) {
|
|
page = discard_page;
|
|
@@ -2263,14 +2292,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
pobjects = oldpage->pobjects;
|
|
pages = oldpage->pages;
|
|
if (drain && pobjects > s->cpu_partial) {
|
|
+ struct slub_free_list *f;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
/*
|
|
* partial array is full. Move the existing
|
|
* set to the per node partial list.
|
|
*/
|
|
local_irq_save(flags);
|
|
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
|
|
+ f = this_cpu_ptr(&slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock(&f->lock);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(&tofree);
|
|
oldpage = NULL;
|
|
pobjects = 0;
|
|
pages = 0;
|
|
@@ -2338,7 +2374,19 @@ static bool has_cpu_slab(int cpu, void *info)
|
|
|
|
static void flush_all(struct kmem_cache *s)
|
|
{
|
|
+ LIST_HEAD(tofree);
|
|
+ int cpu;
|
|
+
|
|
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
|
|
+ for_each_online_cpu(cpu) {
|
|
+ struct slub_free_list *f;
|
|
+
|
|
+ f = &per_cpu(slub_free_list, cpu);
|
|
+ raw_spin_lock_irq(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock_irq(&f->lock);
|
|
+ free_delayed(&tofree);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -2393,10 +2441,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
|
|
unsigned long x = 0;
|
|
struct page *page;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
x += get_count(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return x;
|
|
}
|
|
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
|
|
@@ -2536,8 +2584,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
|
|
* already disabled (which is the case for bulk allocation).
|
|
*/
|
|
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
- unsigned long addr, struct kmem_cache_cpu *c)
|
|
+ unsigned long addr, struct kmem_cache_cpu *c,
|
|
+ struct list_head *to_free)
|
|
{
|
|
+ struct slub_free_list *f;
|
|
void *freelist;
|
|
struct page *page;
|
|
|
|
@@ -2604,6 +2654,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
VM_BUG_ON(!c->page->frozen);
|
|
c->freelist = get_freepointer(s, freelist);
|
|
c->tid = next_tid(c->tid);
|
|
+
|
|
+out:
|
|
+ f = this_cpu_ptr(&slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, to_free);
|
|
+ raw_spin_unlock(&f->lock);
|
|
+
|
|
return freelist;
|
|
|
|
new_slab:
|
|
@@ -2619,7 +2676,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
|
if (unlikely(!freelist)) {
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
- return NULL;
|
|
+ goto out;
|
|
}
|
|
|
|
page = c->page;
|
|
@@ -2632,7 +2689,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
goto new_slab; /* Slab failed checks. Next slab needed */
|
|
|
|
deactivate_slab(s, page, get_freepointer(s, freelist), c);
|
|
- return freelist;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -2644,6 +2701,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
{
|
|
void *p;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
|
|
local_irq_save(flags);
|
|
#ifdef CONFIG_PREEMPT
|
|
@@ -2655,8 +2713,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
c = this_cpu_ptr(s->cpu_slab);
|
|
#endif
|
|
|
|
- p = ___slab_alloc(s, gfpflags, node, addr, c);
|
|
+ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(&tofree);
|
|
return p;
|
|
}
|
|
|
|
@@ -2842,7 +2901,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
|
|
do {
|
|
if (unlikely(n)) {
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
n = NULL;
|
|
}
|
|
prior = page->freelist;
|
|
@@ -2874,7 +2933,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
* Otherwise the list_lock will synchronize with
|
|
* other processors updating the list of slabs.
|
|
*/
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
}
|
|
}
|
|
@@ -2916,7 +2975,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
}
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return;
|
|
|
|
slab_empty:
|
|
@@ -2931,7 +2990,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
remove_full(s, n, page);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
stat(s, FREE_SLAB);
|
|
discard_slab(s, page);
|
|
}
|
|
@@ -3136,6 +3195,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
void **p)
|
|
{
|
|
struct kmem_cache_cpu *c;
|
|
+ LIST_HEAD(to_free);
|
|
int i;
|
|
|
|
/* memcg and kmem_cache debug support */
|
|
@@ -3168,7 +3228,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
* of re-populating per CPU c->freelist
|
|
*/
|
|
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
|
|
- _RET_IP_, c);
|
|
+ _RET_IP_, c, &to_free);
|
|
if (unlikely(!p[i]))
|
|
goto error;
|
|
|
|
@@ -3180,6 +3240,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
}
|
|
c->tid = next_tid(c->tid);
|
|
local_irq_enable();
|
|
+ free_delayed(&to_free);
|
|
|
|
/* Clear memory outside IRQ disabled fastpath loop */
|
|
if (unlikely(flags & __GFP_ZERO)) {
|
|
@@ -3194,6 +3255,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
return i;
|
|
error:
|
|
local_irq_enable();
|
|
+ free_delayed(&to_free);
|
|
slab_post_alloc_hook(s, flags, i, p);
|
|
__kmem_cache_free_bulk(s, i, p);
|
|
return 0;
|
|
@@ -3329,7 +3391,7 @@ static void
|
|
init_kmem_cache_node(struct kmem_cache_node *n)
|
|
{
|
|
n->nr_partial = 0;
|
|
- spin_lock_init(&n->list_lock);
|
|
+ raw_spin_lock_init(&n->list_lock);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
atomic_long_set(&n->nr_slabs, 0);
|
|
@@ -3678,6 +3740,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|
const char *text)
|
|
{
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ /* XXX move out of irq-off section */
|
|
+ slab_err(s, page, text, s->name);
|
|
+#else
|
|
+
|
|
void *addr = page_address(page);
|
|
void *p;
|
|
unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects),
|
|
@@ -3699,6 +3766,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|
slab_unlock(page);
|
|
kfree(map);
|
|
#endif
|
|
+#endif
|
|
}
|
|
|
|
/*
|
|
@@ -3712,7 +3780,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
struct page *page, *h;
|
|
|
|
BUG_ON(irqs_disabled());
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
if (!page->inuse) {
|
|
remove_partial(n, page);
|
|
@@ -3722,7 +3790,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
"Objects remaining in %s on __kmem_cache_shutdown()");
|
|
}
|
|
}
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
list_for_each_entry_safe(page, h, &discard, lru)
|
|
discard_slab(s, page);
|
|
@@ -3995,7 +4063,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
|
|
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
|
|
INIT_LIST_HEAD(promote + i);
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
/*
|
|
* Build lists of slabs to discard or promote.
|
|
@@ -4026,7 +4094,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
|
|
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
|
|
list_splice(promote + i, &n->partial);
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
/* Release empty slabs */
|
|
list_for_each_entry_safe(page, t, &discard, lru)
|
|
@@ -4240,6 +4308,12 @@ void __init kmem_cache_init(void)
|
|
static __initdata struct kmem_cache boot_kmem_cache,
|
|
boot_kmem_cache_node;
|
|
int node;
|
|
+ int cpu;
|
|
+
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
|
|
+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
|
|
+ }
|
|
|
|
if (debug_guardpage_minorder())
|
|
slub_max_order = 0;
|
|
@@ -4448,7 +4522,7 @@ static int validate_slab_node(struct kmem_cache *s,
|
|
struct page *page;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
list_for_each_entry(page, &n->partial, lru) {
|
|
validate_slab_slab(s, page, map);
|
|
@@ -4470,7 +4544,7 @@ static int validate_slab_node(struct kmem_cache *s,
|
|
s->name, count, atomic_long_read(&n->nr_slabs));
|
|
|
|
out:
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return count;
|
|
}
|
|
|
|
@@ -4527,6 +4601,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
|
|
struct location *l;
|
|
int order;
|
|
|
|
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC)
|
|
+ return 0;
|
|
+
|
|
order = get_order(sizeof(struct location) * max);
|
|
|
|
l = (void *)__get_free_pages(flags, order);
|
|
@@ -4660,12 +4737,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
|
|
if (!atomic_long_read(&n->nr_slabs))
|
|
continue;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
list_for_each_entry(page, &n->full, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
}
|
|
|
|
for (i = 0; i < t.count; i++) {
|
|
diff --git a/mm/swap.c b/mm/swap.c
|
|
index 002c98a81..7d99561c4 100644
|
|
--- a/mm/swap.c
|
|
+++ b/mm/swap.c
|
|
@@ -33,6 +33,7 @@
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/uio.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/page_idle.h>
|
|
|
|
@@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
|
|
#ifdef CONFIG_SMP
|
|
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
|
|
#endif
|
|
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
|
|
+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
|
|
|
|
/*
|
|
* This path almost never happens for VM activity - pages are normally
|
|
@@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page)
|
|
unsigned long flags;
|
|
|
|
get_page(page);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
pvec = this_cpu_ptr(&lru_rotate_pvecs);
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
}
|
|
}
|
|
|
|
@@ -310,12 +313,13 @@ void activate_page(struct page *page)
|
|
{
|
|
page = compound_head(page);
|
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ activate_page_pvecs);
|
|
|
|
get_page(page);
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
- put_cpu_var(activate_page_pvecs);
|
|
+ put_locked_var(swapvec_lock, activate_page_pvecs);
|
|
}
|
|
}
|
|
|
|
@@ -337,7 +341,7 @@ void activate_page(struct page *page)
|
|
|
|
static void __lru_cache_activate_page(struct page *page)
|
|
{
|
|
- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
|
|
int i;
|
|
|
|
/*
|
|
@@ -359,7 +363,7 @@ static void __lru_cache_activate_page(struct page *page)
|
|
}
|
|
}
|
|
|
|
- put_cpu_var(lru_add_pvec);
|
|
+ put_locked_var(swapvec_lock, lru_add_pvec);
|
|
}
|
|
|
|
/*
|
|
@@ -403,12 +407,12 @@ EXPORT_SYMBOL(mark_page_accessed);
|
|
|
|
static void __lru_cache_add(struct page *page)
|
|
{
|
|
- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
|
|
|
|
get_page(page);
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
__pagevec_lru_add(pvec);
|
|
- put_cpu_var(lru_add_pvec);
|
|
+ put_locked_var(swapvec_lock, lru_add_pvec);
|
|
}
|
|
|
|
/**
|
|
@@ -591,9 +595,15 @@ void lru_add_drain_cpu(int cpu)
|
|
unsigned long flags;
|
|
|
|
/* No harm done if a racing interrupt already did this */
|
|
- local_irq_save(flags);
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ local_lock_irqsave_on(rotate_lock, flags, cpu);
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
|
|
+#else
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
+ pagevec_move_tail(pvec);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
+#endif
|
|
}
|
|
|
|
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
|
|
@@ -625,11 +635,12 @@ void deactivate_file_page(struct page *page)
|
|
return;
|
|
|
|
if (likely(get_page_unless_zero(page))) {
|
|
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ lru_deactivate_file_pvecs);
|
|
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
|
- put_cpu_var(lru_deactivate_file_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
|
|
}
|
|
}
|
|
|
|
@@ -644,23 +655,34 @@ void mark_page_lazyfree(struct page *page)
|
|
{
|
|
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
|
|
!PageSwapCache(page) && !PageUnevictable(page)) {
|
|
- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ lru_lazyfree_pvecs);
|
|
|
|
get_page(page);
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
|
- put_cpu_var(lru_lazyfree_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_lazyfree_pvecs);
|
|
}
|
|
}
|
|
|
|
void lru_add_drain(void)
|
|
{
|
|
- lru_add_drain_cpu(get_cpu());
|
|
- put_cpu();
|
|
+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
|
|
+ local_unlock_cpu(swapvec_lock);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
|
|
+{
|
|
+ local_lock_on(swapvec_lock, cpu);
|
|
+ lru_add_drain_cpu(cpu);
|
|
+ local_unlock_on(swapvec_lock, cpu);
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
|
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|
@@ -668,6 +690,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|
lru_add_drain();
|
|
}
|
|
|
|
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
|
|
+{
|
|
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
|
+
|
|
+ INIT_WORK(work, lru_add_drain_per_cpu);
|
|
+ queue_work_on(cpu, mm_percpu_wq, work);
|
|
+ cpumask_set_cpu(cpu, has_work);
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
|
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
|
@@ -692,21 +724,19 @@ void lru_add_drain_all(void)
|
|
cpumask_clear(&has_work);
|
|
|
|
for_each_online_cpu(cpu) {
|
|
- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
|
|
|
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
|
|
- need_activate_page_drain(cpu)) {
|
|
- INIT_WORK(work, lru_add_drain_per_cpu);
|
|
- queue_work_on(cpu, mm_percpu_wq, work);
|
|
- cpumask_set_cpu(cpu, &has_work);
|
|
- }
|
|
+ need_activate_page_drain(cpu))
|
|
+ remote_lru_add_drain(cpu, &has_work);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
for_each_cpu(cpu, &has_work)
|
|
flush_work(&per_cpu(lru_add_drain_work, cpu));
|
|
+#endif
|
|
|
|
mutex_unlock(&lock);
|
|
}
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
|
index c3df82a8e..70012c329 100644
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -1717,7 +1717,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
|
struct vmap_block *vb;
|
|
struct vmap_area *va;
|
|
unsigned long vb_idx;
|
|
- int node, err;
|
|
+ int node, err, cpu;
|
|
void *vaddr;
|
|
|
|
node = numa_node_id();
|
|
@@ -1760,11 +1760,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
|
BUG_ON(err);
|
|
radix_tree_preload_end();
|
|
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
+ cpu = get_cpu_light();
|
|
+ vbq = this_cpu_ptr(&vmap_block_queue);
|
|
spin_lock(&vbq->lock);
|
|
list_add_tail_rcu(&vb->free_list, &vbq->free);
|
|
spin_unlock(&vbq->lock);
|
|
- put_cpu_var(vmap_block_queue);
|
|
+ put_cpu_light();
|
|
|
|
return vaddr;
|
|
}
|
|
@@ -1833,6 +1834,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
struct vmap_block *vb;
|
|
void *vaddr = NULL;
|
|
unsigned int order;
|
|
+ int cpu;
|
|
|
|
BUG_ON(offset_in_page(size));
|
|
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
|
@@ -1847,7 +1849,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
order = get_order(size);
|
|
|
|
rcu_read_lock();
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
+ cpu = get_cpu_light();
|
|
+ vbq = this_cpu_ptr(&vmap_block_queue);
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
unsigned long pages_off;
|
|
|
|
@@ -1870,7 +1873,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
break;
|
|
}
|
|
|
|
- put_cpu_var(vmap_block_queue);
|
|
+ put_cpu_light();
|
|
rcu_read_unlock();
|
|
|
|
/* Allocate new block if nothing was found */
|
|
diff --git a/mm/vmstat.c b/mm/vmstat.c
|
|
index 96028cc96..f2f7d9345 100644
|
|
--- a/mm/vmstat.c
|
|
+++ b/mm/vmstat.c
|
|
@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
|
long x;
|
|
long t;
|
|
|
|
+ preempt_disable_rt();
|
|
x = delta + __this_cpu_read(*p);
|
|
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
@@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
|
x = 0;
|
|
}
|
|
__this_cpu_write(*p, x);
|
|
+ preempt_enable_rt();
|
|
}
|
|
EXPORT_SYMBOL(__mod_zone_page_state);
|
|
|
|
@@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
|
|
long x;
|
|
long t;
|
|
|
|
+ preempt_disable_rt();
|
|
x = delta + __this_cpu_read(*p);
|
|
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
@@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
|
|
x = 0;
|
|
}
|
|
__this_cpu_write(*p, x);
|
|
+ preempt_enable_rt();
|
|
}
|
|
EXPORT_SYMBOL(__mod_node_page_state);
|
|
|
|
@@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_inc_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v > t)) {
|
|
@@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
zone_page_state_add(v + overstep, zone, item);
|
|
__this_cpu_write(*p, -overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
@@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
s8 __percpu *p = pcp->vm_node_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_inc_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v > t)) {
|
|
@@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
node_page_state_add(v + overstep, pgdat, item);
|
|
__this_cpu_write(*p, -overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
|
|
@@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_dec_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v < - t)) {
|
|
@@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
zone_page_state_add(v - overstep, zone, item);
|
|
__this_cpu_write(*p, overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
@@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
s8 __percpu *p = pcp->vm_node_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_dec_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v < - t)) {
|
|
@@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
node_page_state_add(v - overstep, pgdat, item);
|
|
__this_cpu_write(*p, overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
|
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
|
|
index e0ac8b80a..8bd784dc1 100644
|
|
--- a/mm/zsmalloc.c
|
|
+++ b/mm/zsmalloc.c
|
|
@@ -56,6 +56,7 @@
|
|
#include <linux/wait.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/fs.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#define ZSPAGE_MAGIC 0x58
|
|
|
|
@@ -73,9 +74,22 @@
|
|
*/
|
|
#define ZS_MAX_ZSPAGE_ORDER 2
|
|
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
|
|
-
|
|
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+struct zsmalloc_handle {
|
|
+ unsigned long addr;
|
|
+ struct mutex lock;
|
|
+};
|
|
+
|
|
+#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
|
|
+
|
|
+#else
|
|
+
|
|
+#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
|
|
+#endif
|
|
+
|
|
/*
|
|
* Object location (<PFN>, <obj_idx>) is encoded as
|
|
* as single (unsigned long) handle value.
|
|
@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
|
|
|
|
static int create_cache(struct zs_pool *pool)
|
|
{
|
|
- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
|
|
+ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
|
|
0, 0, NULL);
|
|
if (!pool->handle_cachep)
|
|
return 1;
|
|
@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool)
|
|
|
|
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
|
|
{
|
|
- return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
|
|
- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
|
+ void *p;
|
|
+
|
|
+ p = kmem_cache_alloc(pool->handle_cachep,
|
|
+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (p) {
|
|
+ struct zsmalloc_handle *zh = p;
|
|
+
|
|
+ mutex_init(&zh->lock);
|
|
+ }
|
|
+#endif
|
|
+ return (unsigned long)p;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
|
|
+{
|
|
+ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
|
|
+}
|
|
+#endif
|
|
+
|
|
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
|
|
{
|
|
kmem_cache_free(pool->handle_cachep, (void *)handle);
|
|
@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
|
|
|
|
static void record_obj(unsigned long handle, unsigned long obj)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ WRITE_ONCE(zh->addr, obj);
|
|
+#else
|
|
/*
|
|
* lsb of @obj represents handle lock while other bits
|
|
* represent object value the handle is pointing so
|
|
* updating shouldn't do store tearing.
|
|
*/
|
|
WRITE_ONCE(*(unsigned long *)handle, obj);
|
|
+#endif
|
|
}
|
|
|
|
/* zpool driver */
|
|
@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc");
|
|
|
|
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
|
|
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
|
|
|
|
static bool is_zspage_isolated(struct zspage *zspage)
|
|
{
|
|
@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
|
|
|
|
static unsigned long handle_to_obj(unsigned long handle)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ return zh->addr;
|
|
+#else
|
|
return *(unsigned long *)handle;
|
|
+#endif
|
|
}
|
|
|
|
static unsigned long obj_to_head(struct page *page, void *obj)
|
|
@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
|
|
|
|
static inline int testpin_tag(unsigned long handle)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ return mutex_is_locked(&zh->lock);
|
|
+#else
|
|
return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
|
|
+#endif
|
|
}
|
|
|
|
static inline int trypin_tag(unsigned long handle)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ return mutex_trylock(&zh->lock);
|
|
+#else
|
|
return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
|
|
+#endif
|
|
}
|
|
|
|
static void pin_tag(unsigned long handle)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ return mutex_lock(&zh->lock);
|
|
+#else
|
|
bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
|
|
+#endif
|
|
}
|
|
|
|
static void unpin_tag(unsigned long handle)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
|
|
+
|
|
+ return mutex_unlock(&zh->lock);
|
|
+#else
|
|
bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
|
|
+#endif
|
|
}
|
|
|
|
static void reset_page(struct page *page)
|
|
@@ -1344,7 +1412,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|
class = pool->size_class[class_idx];
|
|
off = (class->size * obj_idx) & ~PAGE_MASK;
|
|
|
|
- area = &get_cpu_var(zs_map_area);
|
|
+ area = &get_locked_var(zs_map_area_lock, zs_map_area);
|
|
area->vm_mm = mm;
|
|
if (off + class->size <= PAGE_SIZE) {
|
|
/* this object is contained entirely within a page */
|
|
@@ -1398,7 +1466,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
|
|
|
__zs_unmap_object(area, pages, off, class->size);
|
|
}
|
|
- put_cpu_var(zs_map_area);
|
|
+ put_locked_var(zs_map_area_lock, zs_map_area);
|
|
|
|
migrate_read_unlock(zspage);
|
|
unpin_tag(handle);
|
|
diff --git a/mm/zswap.c b/mm/zswap.c
|
|
index 6c686888d..ed79b94fa 100644
|
|
--- a/mm/zswap.c
|
|
+++ b/mm/zswap.c
|
|
@@ -27,6 +27,7 @@
|
|
#include <linux/highmem.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/types.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/frontswap.h>
|
|
@@ -990,6 +991,8 @@ static void zswap_fill_page(void *ptr, unsigned long value)
|
|
memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
|
|
}
|
|
|
|
+/* protect zswap_dstmem from concurrency */
|
|
+static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock);
|
|
/*********************************
|
|
* frontswap hooks
|
|
**********************************/
|
|
@@ -1066,12 +1069,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
}
|
|
|
|
/* compress */
|
|
- dst = get_cpu_var(zswap_dstmem);
|
|
- tfm = *get_cpu_ptr(entry->pool->tfm);
|
|
+ dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem);
|
|
+ tfm = *this_cpu_ptr(entry->pool->tfm);
|
|
src = kmap_atomic(page);
|
|
ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
|
|
kunmap_atomic(src);
|
|
- put_cpu_ptr(entry->pool->tfm);
|
|
if (ret) {
|
|
ret = -EINVAL;
|
|
goto put_dstmem;
|
|
@@ -1094,7 +1096,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
memcpy(buf, &zhdr, hlen);
|
|
memcpy(buf + hlen, dst, dlen);
|
|
zpool_unmap_handle(entry->pool->zpool, handle);
|
|
- put_cpu_var(zswap_dstmem);
|
|
+ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
|
|
|
|
/* populate entry */
|
|
entry->offset = offset;
|
|
@@ -1122,7 +1124,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
return 0;
|
|
|
|
put_dstmem:
|
|
- put_cpu_var(zswap_dstmem);
|
|
+ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
|
|
zswap_pool_put(entry->pool);
|
|
freepage:
|
|
zswap_entry_cache_free(entry);
|
|
diff --git a/net/Kconfig b/net/Kconfig
|
|
index 4bef62b4c..490db7aef 100644
|
|
--- a/net/Kconfig
|
|
+++ b/net/Kconfig
|
|
@@ -273,7 +273,7 @@ config CGROUP_NET_CLASSID
|
|
|
|
config NET_RX_BUSY_POLL
|
|
bool
|
|
- default y
|
|
+ default y if !PREEMPT_RT_FULL
|
|
|
|
config BQL
|
|
bool
|
|
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
|
|
index c044ff2f7..75bc8102c 100644
|
|
--- a/net/bluetooth/rfcomm/sock.c
|
|
+++ b/net/bluetooth/rfcomm/sock.c
|
|
@@ -64,15 +64,13 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
|
|
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
|
|
{
|
|
struct sock *sk = d->owner, *parent;
|
|
- unsigned long flags;
|
|
|
|
if (!sk)
|
|
return;
|
|
|
|
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
|
|
|
|
- local_irq_save(flags);
|
|
- bh_lock_sock(sk);
|
|
+ spin_lock_bh(&sk->sk_lock.slock);
|
|
|
|
if (err)
|
|
sk->sk_err = err;
|
|
@@ -93,8 +91,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
|
|
sk->sk_state_change(sk);
|
|
}
|
|
|
|
- bh_unlock_sock(sk);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_bh(&sk->sk_lock.slock);
|
|
|
|
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
|
|
/* We have to drop DLC lock here, otherwise
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 9e258466d..4a6e03e5b 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -219,14 +219,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
|
|
static inline void rps_lock(struct softnet_data *sd)
|
|
{
|
|
#ifdef CONFIG_RPS
|
|
- spin_lock(&sd->input_pkt_queue.lock);
|
|
+ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
|
|
#endif
|
|
}
|
|
|
|
static inline void rps_unlock(struct softnet_data *sd)
|
|
{
|
|
#ifdef CONFIG_RPS
|
|
- spin_unlock(&sd->input_pkt_queue.lock);
|
|
+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -2725,6 +2725,7 @@ static void __netif_reschedule(struct Qdisc *q)
|
|
sd->output_queue_tailp = &q->next_sched;
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
void __netif_schedule(struct Qdisc *q)
|
|
@@ -2787,6 +2788,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
|
|
__this_cpu_write(softnet_data.completion_queue, skb);
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__dev_kfree_skb_irq);
|
|
|
|
@@ -3470,7 +3472,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
* This permits qdisc->running owner to get the lock more
|
|
* often and dequeue packets faster.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ contended = true;
|
|
+#else
|
|
contended = qdisc_is_running(q);
|
|
+#endif
|
|
if (unlikely(contended))
|
|
spin_lock(&q->busylock);
|
|
|
|
@@ -3832,10 +3838,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
|
if (dev->flags & IFF_UP) {
|
|
int cpu = smp_processor_id(); /* ok because BHs are off */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (READ_ONCE(txq->xmit_lock_owner) != current) {
|
|
+#else
|
|
/* Other cpus might concurrently change txq->xmit_lock_owner
|
|
* to -1 or to their cpu id, but not to our id.
|
|
*/
|
|
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
|
|
+#endif
|
|
if (dev_xmit_recursion())
|
|
goto recursion_alert;
|
|
|
|
@@ -4272,6 +4282,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|
rps_unlock(sd);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
|
|
atomic_long_inc(&skb->dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
@@ -4486,7 +4497,7 @@ static int netif_rx_internal(struct sk_buff *skb)
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
int cpu;
|
|
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
rcu_read_lock();
|
|
|
|
cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
|
@@ -4496,14 +4507,14 @@ static int netif_rx_internal(struct sk_buff *skb)
|
|
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
|
|
rcu_read_unlock();
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
} else
|
|
#endif
|
|
{
|
|
unsigned int qtail;
|
|
|
|
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
|
|
- put_cpu();
|
|
+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
|
|
+ put_cpu_light();
|
|
}
|
|
return ret;
|
|
}
|
|
@@ -4537,11 +4548,9 @@ int netif_rx_ni(struct sk_buff *skb)
|
|
|
|
trace_netif_rx_ni_entry(skb);
|
|
|
|
- preempt_disable();
|
|
+ local_bh_disable();
|
|
err = netif_rx_internal(skb);
|
|
- if (local_softirq_pending())
|
|
- do_softirq();
|
|
- preempt_enable();
|
|
+ local_bh_enable();
|
|
|
|
return err;
|
|
}
|
|
@@ -5820,12 +5829,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
|
|
sd->rps_ipi_list = NULL;
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Send pending IPI's to kick RPS processing on remote cpus. */
|
|
net_rps_send_ipi(remsd);
|
|
} else
|
|
#endif
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
|
|
@@ -5855,7 +5866,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
while (again) {
|
|
struct sk_buff *skb;
|
|
|
|
+ local_irq_disable();
|
|
while ((skb = __skb_dequeue(&sd->process_queue))) {
|
|
+ local_irq_enable();
|
|
rcu_read_lock();
|
|
__netif_receive_skb(skb);
|
|
rcu_read_unlock();
|
|
@@ -5863,9 +5876,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
if (++work >= quota)
|
|
return work;
|
|
|
|
+ local_irq_disable();
|
|
}
|
|
|
|
- local_irq_disable();
|
|
rps_lock(sd);
|
|
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
|
/*
|
|
@@ -5903,6 +5916,7 @@ void __napi_schedule(struct napi_struct *n)
|
|
local_irq_save(flags);
|
|
____napi_schedule(this_cpu_ptr(&softnet_data), n);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__napi_schedule);
|
|
|
|
@@ -5939,6 +5953,7 @@ bool napi_schedule_prep(struct napi_struct *n)
|
|
}
|
|
EXPORT_SYMBOL(napi_schedule_prep);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* __napi_schedule_irqoff - schedule for receive
|
|
* @n: entry to schedule
|
|
@@ -5950,6 +5965,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
|
|
____napi_schedule(this_cpu_ptr(&softnet_data), n);
|
|
}
|
|
EXPORT_SYMBOL(__napi_schedule_irqoff);
|
|
+#endif
|
|
|
|
bool napi_complete_done(struct napi_struct *n, int work_done)
|
|
{
|
|
@@ -6330,13 +6346,17 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
|
|
unsigned long time_limit = jiffies +
|
|
usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
|
|
int budget = READ_ONCE(netdev_budget);
|
|
+ struct sk_buff_head tofree_q;
|
|
+ struct sk_buff *skb;
|
|
LIST_HEAD(list);
|
|
LIST_HEAD(repoll);
|
|
-
|
|
+ __skb_queue_head_init(&tofree_q);
|
|
local_irq_disable();
|
|
+ skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
|
|
list_splice_init(&sd->poll_list, &list);
|
|
local_irq_enable();
|
|
-
|
|
+ while ((skb = __skb_dequeue(&tofree_q)))
|
|
+ kfree_skb(skb);
|
|
for (;;) {
|
|
struct napi_struct *n;
|
|
|
|
@@ -6366,7 +6386,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
|
|
list_splice_tail(&repoll, &list);
|
|
list_splice(&list, &sd->poll_list);
|
|
if (!list_empty(&sd->poll_list))
|
|
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
|
|
|
|
net_rps_action_and_irq_enable(sd);
|
|
out:
|
|
@@ -8573,7 +8593,7 @@ static void netdev_init_one_queue(struct net_device *dev,
|
|
/* Initialize queue lock */
|
|
spin_lock_init(&queue->_xmit_lock);
|
|
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
|
|
- queue->xmit_lock_owner = -1;
|
|
+ netdev_queue_clear_owner(queue);
|
|
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
|
|
queue->dev = dev;
|
|
#ifdef CONFIG_BQL
|
|
@@ -9518,6 +9538,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
|
|
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
#ifdef CONFIG_RPS
|
|
remsd = oldsd->rps_ipi_list;
|
|
@@ -9531,10 +9552,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
|
|
netif_rx_ni(skb);
|
|
input_queue_head_incr(oldsd);
|
|
}
|
|
- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
|
|
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
|
|
netif_rx_ni(skb);
|
|
input_queue_head_incr(oldsd);
|
|
}
|
|
+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
|
|
+ kfree_skb(skb);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -9845,8 +9869,9 @@ static int __init net_dev_init(void)
|
|
|
|
INIT_WORK(flush, flush_backlog);
|
|
|
|
- skb_queue_head_init(&sd->input_pkt_queue);
|
|
- skb_queue_head_init(&sd->process_queue);
|
|
+ skb_queue_head_init_raw(&sd->input_pkt_queue);
|
|
+ skb_queue_head_init_raw(&sd->process_queue);
|
|
+ skb_queue_head_init_raw(&sd->tofree_queue);
|
|
#ifdef CONFIG_XFRM_OFFLOAD
|
|
skb_queue_head_init(&sd->xfrm_backlog);
|
|
#endif
|
|
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
|
|
index 752744db1..7112e28b4 100644
|
|
--- a/net/core/gen_estimator.c
|
|
+++ b/net/core/gen_estimator.c
|
|
@@ -46,7 +46,7 @@
|
|
struct net_rate_estimator {
|
|
struct gnet_stats_basic_packed *bstats;
|
|
spinlock_t *stats_lock;
|
|
- seqcount_t *running;
|
|
+ net_seqlock_t *running;
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
|
u8 ewma_log;
|
|
u8 intvl_log; /* period : (250ms << intvl_log) */
|
|
@@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **rate_est,
|
|
spinlock_t *lock,
|
|
- seqcount_t *running,
|
|
+ net_seqlock_t *running,
|
|
struct nlattr *opt)
|
|
{
|
|
struct gnet_estimator *parm = nla_data(opt);
|
|
@@ -230,7 +230,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **rate_est,
|
|
spinlock_t *lock,
|
|
- seqcount_t *running, struct nlattr *opt)
|
|
+ net_seqlock_t *running, struct nlattr *opt)
|
|
{
|
|
return gen_new_estimator(bstats, cpu_bstats, rate_est,
|
|
lock, running, opt);
|
|
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
|
|
index e2fd8baec..8bab88738 100644
|
|
--- a/net/core/gen_stats.c
|
|
+++ b/net/core/gen_stats.c
|
|
@@ -142,7 +142,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
|
|
}
|
|
|
|
void
|
|
-__gnet_stats_copy_basic(const seqcount_t *running,
|
|
+__gnet_stats_copy_basic(net_seqlock_t *running,
|
|
struct gnet_stats_basic_packed *bstats,
|
|
struct gnet_stats_basic_cpu __percpu *cpu,
|
|
struct gnet_stats_basic_packed *b)
|
|
@@ -155,10 +155,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
|
|
}
|
|
do {
|
|
if (running)
|
|
- seq = read_seqcount_begin(running);
|
|
+ seq = net_seq_begin(running);
|
|
bstats->bytes = b->bytes;
|
|
bstats->packets = b->packets;
|
|
- } while (running && read_seqcount_retry(running, seq));
|
|
+ } while (running && net_seq_retry(running, seq));
|
|
}
|
|
EXPORT_SYMBOL(__gnet_stats_copy_basic);
|
|
|
|
@@ -176,7 +176,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
|
|
* if the room in the socket buffer was not sufficient.
|
|
*/
|
|
int
|
|
-gnet_stats_copy_basic(const seqcount_t *running,
|
|
+gnet_stats_copy_basic(net_seqlock_t *running,
|
|
struct gnet_dump *d,
|
|
struct gnet_stats_basic_cpu __percpu *cpu,
|
|
struct gnet_stats_basic_packed *b)
|
|
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
|
|
index 092fa3d75..9d472d626 100644
|
|
--- a/net/core/pktgen.c
|
|
+++ b/net/core/pktgen.c
|
|
@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|
s64 remaining;
|
|
struct hrtimer_sleeper t;
|
|
|
|
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS,
|
|
+ current);
|
|
hrtimer_set_expires(&t.timer, spin_until);
|
|
|
|
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
|
|
@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|
} while (ktime_compare(end_time, spin_until) < 0);
|
|
} else {
|
|
/* see do_nanosleep */
|
|
- hrtimer_init_sleeper(&t, current);
|
|
do {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index b7314a6cf..d3706fa45 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -63,6 +63,7 @@
|
|
#include <linux/errqueue.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/if_vlan.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include <net/protocol.h>
|
|
#include <net/dst.h>
|
|
@@ -330,6 +331,8 @@ struct napi_alloc_cache {
|
|
|
|
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
|
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
|
|
|
|
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
{
|
|
@@ -337,10 +340,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
unsigned long flags;
|
|
void *data;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(netdev_alloc_lock, flags);
|
|
nc = this_cpu_ptr(&netdev_alloc_cache);
|
|
data = page_frag_alloc(nc, fragsz, gfp_mask);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(netdev_alloc_lock, flags);
|
|
return data;
|
|
}
|
|
|
|
@@ -361,9 +364,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
|
|
|
|
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
{
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
+ struct napi_alloc_cache *nc;
|
|
+ void *data;
|
|
|
|
- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
+ data = page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
+ return data;
|
|
}
|
|
|
|
void *napi_alloc_frag(unsigned int fragsz)
|
|
@@ -416,13 +423,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
|
if (sk_memalloc_socks())
|
|
gfp_mask |= __GFP_MEMALLOC;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(netdev_alloc_lock, flags);
|
|
|
|
nc = this_cpu_ptr(&netdev_alloc_cache);
|
|
data = page_frag_alloc(nc, len, gfp_mask);
|
|
pfmemalloc = nc->pfmemalloc;
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(netdev_alloc_lock, flags);
|
|
|
|
if (unlikely(!data))
|
|
return NULL;
|
|
@@ -466,6 +473,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
struct napi_alloc_cache *nc;
|
|
struct sk_buff *skb;
|
|
void *data;
|
|
+ bool pfmemalloc;
|
|
|
|
len += NET_SKB_PAD + NET_IP_ALIGN;
|
|
|
|
@@ -488,7 +496,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
if (sk_memalloc_socks())
|
|
gfp_mask |= __GFP_MEMALLOC;
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
data = page_frag_alloc(&nc->page, len, gfp_mask);
|
|
+ pfmemalloc = nc->page.pfmemalloc;
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
if (unlikely(!data))
|
|
return NULL;
|
|
|
|
@@ -499,7 +510,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
}
|
|
|
|
/* use OR instead of assignment to avoid clearing of bits in mask */
|
|
- if (nc->page.pfmemalloc)
|
|
+ if (pfmemalloc)
|
|
skb->pfmemalloc = 1;
|
|
skb->head_frag = 1;
|
|
|
|
@@ -731,23 +742,26 @@ void __consume_stateless_skb(struct sk_buff *skb)
|
|
|
|
void __kfree_skb_flush(void)
|
|
{
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
/* flush skb_cache if containing objects */
|
|
if (nc->skb_count) {
|
|
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
|
|
nc->skb_cache);
|
|
nc->skb_count = 0;
|
|
}
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
}
|
|
|
|
static inline void _kfree_skb_defer(struct sk_buff *skb)
|
|
{
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
/* drop skb->head and call any destructors for packet */
|
|
skb_release_all(skb);
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
/* record skb to CPU local list */
|
|
nc->skb_cache[nc->skb_count++] = skb;
|
|
|
|
@@ -762,6 +776,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
|
|
nc->skb_cache);
|
|
nc->skb_count = 0;
|
|
}
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
}
|
|
void __kfree_skb_defer(struct sk_buff *skb)
|
|
{
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index a4f838ec7..d6da6f7af 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -77,6 +77,7 @@
|
|
#include <linux/string.h>
|
|
#include <linux/netfilter_ipv4.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/locallock.h>
|
|
#include <net/snmp.h>
|
|
#include <net/ip.h>
|
|
#include <net/route.h>
|
|
@@ -204,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
|
|
*
|
|
* On SMP we have one ICMP socket per-cpu.
|
|
*/
|
|
+static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
|
|
+
|
|
static struct sock *icmp_sk(struct net *net)
|
|
{
|
|
return *this_cpu_ptr(net->ipv4.icmp_sk);
|
|
@@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
|
|
{
|
|
struct sock *sk;
|
|
|
|
+ if (!local_trylock(icmp_sk_lock))
|
|
+ return NULL;
|
|
+
|
|
sk = icmp_sk(net);
|
|
|
|
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
|
|
/* This can happen if the output path signals a
|
|
* dst_link_failure() for an outgoing ICMP packet.
|
|
*/
|
|
+ local_unlock(icmp_sk_lock);
|
|
return NULL;
|
|
}
|
|
return sk;
|
|
@@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
|
|
static inline void icmp_xmit_unlock(struct sock *sk)
|
|
{
|
|
spin_unlock(&sk->sk_lock.slock);
|
|
+ local_unlock(icmp_sk_lock);
|
|
}
|
|
|
|
int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
|
|
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
|
|
index dc59bd34e..c63275a1c 100644
|
|
--- a/net/ipv4/tcp_ipv4.c
|
|
+++ b/net/ipv4/tcp_ipv4.c
|
|
@@ -62,6 +62,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/times.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include <net/net_namespace.h>
|
|
#include <net/icmp.h>
|
|
@@ -637,6 +638,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
EXPORT_SYMBOL(tcp_v4_send_check);
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
|
|
/*
|
|
* This routine will send an RST to the other tcp.
|
|
*
|
|
@@ -771,6 +773,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
|
arg.tos = ip_hdr(skb)->tos;
|
|
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
|
|
local_bh_disable();
|
|
+ local_lock(tcp_sk_lock);
|
|
ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
|
|
if (sk)
|
|
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
|
|
@@ -783,6 +786,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
|
ctl_sk->sk_mark = 0;
|
|
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
|
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
|
|
+ local_unlock(tcp_sk_lock);
|
|
local_bh_enable();
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
@@ -863,6 +867,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
|
|
arg.tos = tos;
|
|
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
|
|
local_bh_disable();
|
|
+ local_lock(tcp_sk_lock);
|
|
ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
|
|
if (sk)
|
|
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
|
|
@@ -874,6 +879,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
|
|
|
|
ctl_sk->sk_mark = 0;
|
|
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
|
+ local_unlock(tcp_sk_lock);
|
|
local_bh_enable();
|
|
}
|
|
|
|
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
|
|
index b6ea30116..d399282b0 100644
|
|
--- a/net/netfilter/core.c
|
|
+++ b/net/netfilter/core.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/inetdevice.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/mutex.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <net/net_namespace.h>
|
|
@@ -27,6 +28,11 @@
|
|
|
|
#include "nf_internals.h"
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
|
|
+EXPORT_PER_CPU_SYMBOL(xt_write_lock);
|
|
+#endif
|
|
+
|
|
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
|
|
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
|
|
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 4e3766a72..a2e3010a6 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -63,6 +63,7 @@
|
|
#include <linux/if_packet.h>
|
|
#include <linux/wireless.h>
|
|
#include <linux/kernel.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/kmod.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
@@ -672,7 +673,7 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
|
|
if (BLOCK_NUM_PKTS(pbd)) {
|
|
while (atomic_read(&pkc->blk_fill_in_prog)) {
|
|
/* Waiting for skb_copy_bits to finish... */
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
|
|
@@ -934,7 +935,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
|
|
if (!(status & TP_STATUS_BLK_TMO)) {
|
|
while (atomic_read(&pkc->blk_fill_in_prog)) {
|
|
/* Waiting for skb_copy_bits to finish... */
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
prb_close_block(pkc, pbd, po, status);
|
|
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
|
|
index 0b347f46b..f395f0603 100644
|
|
--- a/net/rds/ib_rdma.c
|
|
+++ b/net/rds/ib_rdma.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/llist.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#include "rds_single_path.h"
|
|
#include "ib_mr.h"
|
|
@@ -222,7 +223,7 @@ static inline void wait_clean_list_grace(void)
|
|
for_each_online_cpu(cpu) {
|
|
flag = &per_cpu(clean_list_grace, cpu);
|
|
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
|
|
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
|
|
index 27166a482..6c347a64b 100644
|
|
--- a/net/sched/sch_api.c
|
|
+++ b/net/sched/sch_api.c
|
|
@@ -1195,7 +1195,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
|
|
rcu_assign_pointer(sch->stab, stab);
|
|
}
|
|
if (tca[TCA_RATE]) {
|
|
- seqcount_t *running;
|
|
+ net_seqlock_t *running;
|
|
|
|
err = -EOPNOTSUPP;
|
|
if (sch->flags & TCQ_F_MQROOT) {
|
|
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
|
|
index 64ae84bea..1f1c2d20f 100644
|
|
--- a/net/sched/sch_generic.c
|
|
+++ b/net/sched/sch_generic.c
|
|
@@ -571,7 +571,11 @@ struct Qdisc noop_qdisc = {
|
|
.ops = &noop_qdisc_ops,
|
|
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
|
|
.dev_queue = &noop_netdev_queue,
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
|
|
+#else
|
|
.running = SEQCNT_ZERO(noop_qdisc.running),
|
|
+#endif
|
|
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
|
|
.gso_skb = {
|
|
.next = (struct sk_buff *)&noop_qdisc.gso_skb,
|
|
@@ -872,9 +876,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|
lockdep_set_class(&sch->busylock,
|
|
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ seqlock_init(&sch->running);
|
|
+ lockdep_set_class(&sch->running.seqcount,
|
|
+ dev->qdisc_running_key ?: &qdisc_running_key);
|
|
+ lockdep_set_class(&sch->running.lock,
|
|
+ dev->qdisc_running_key ?: &qdisc_running_key);
|
|
+#else
|
|
seqcount_init(&sch->running);
|
|
lockdep_set_class(&sch->running,
|
|
dev->qdisc_running_key ?: &qdisc_running_key);
|
|
+#endif
|
|
|
|
sch->ops = ops;
|
|
sch->flags = ops->static_flags;
|
|
@@ -1227,7 +1239,7 @@ void dev_deactivate_many(struct list_head *head)
|
|
/* Wait for outstanding qdisc_run calls. */
|
|
list_for_each_entry(dev, head, close_list) {
|
|
while (some_qdisc_is_busy(dev))
|
|
- yield();
|
|
+ msleep(1);
|
|
/* The new qdisc is assigned at this point so we can safely
|
|
* unwind stale skb lists and qdisc statistics
|
|
*/
|
|
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
|
|
index 0ab649e02..a6b8ba031 100644
|
|
--- a/net/sunrpc/svc_xprt.c
|
|
+++ b/net/sunrpc/svc_xprt.c
|
|
@@ -406,7 +406,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
|
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
|
|
return;
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
|
|
|
|
atomic_long_inc(&pool->sp_stats.packets);
|
|
@@ -430,7 +430,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
|
rqstp = NULL;
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
trace_svc_xprt_do_enqueue(xprt, rqstp);
|
|
}
|
|
EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
|
|
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
|
|
index 32aed1d0f..2fb55dba4 100644
|
|
--- a/net/xfrm/xfrm_ipcomp.c
|
|
+++ b/net/xfrm/xfrm_ipcomp.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/list.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/smp.h>
|
|
@@ -36,6 +37,7 @@ struct ipcomp_tfms {
|
|
|
|
static DEFINE_MUTEX(ipcomp_resource_mutex);
|
|
static void * __percpu *ipcomp_scratches;
|
|
+static DEFINE_LOCAL_IRQ_LOCK(ipcomp_scratches_lock);
|
|
static int ipcomp_scratch_users;
|
|
static LIST_HEAD(ipcomp_tfms_list);
|
|
|
|
@@ -45,12 +47,15 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
|
|
const int plen = skb->len;
|
|
int dlen = IPCOMP_SCRATCH_SIZE;
|
|
const u8 *start = skb->data;
|
|
- const int cpu = get_cpu();
|
|
- u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
|
|
- struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
|
|
- int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
|
|
- int len;
|
|
+ u8 *scratch;
|
|
+ struct crypto_comp *tfm;
|
|
+ int err, len;
|
|
+
|
|
+ local_lock(ipcomp_scratches_lock);
|
|
|
|
+ scratch = *this_cpu_ptr(ipcomp_scratches);
|
|
+ tfm = *this_cpu_ptr(ipcd->tfms);
|
|
+ err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
|
|
if (err)
|
|
goto out;
|
|
|
|
@@ -103,7 +108,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
|
|
err = 0;
|
|
|
|
out:
|
|
- put_cpu();
|
|
+ local_unlock(ipcomp_scratches_lock);
|
|
return err;
|
|
}
|
|
|
|
@@ -146,6 +151,8 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
|
|
int err;
|
|
|
|
local_bh_disable();
|
|
+ local_lock(ipcomp_scratches_lock);
|
|
+
|
|
scratch = *this_cpu_ptr(ipcomp_scratches);
|
|
tfm = *this_cpu_ptr(ipcd->tfms);
|
|
err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
|
|
@@ -158,12 +165,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
|
|
}
|
|
|
|
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
|
|
+ local_unlock(ipcomp_scratches_lock);
|
|
local_bh_enable();
|
|
|
|
pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
|
|
return 0;
|
|
|
|
out:
|
|
+ local_unlock(ipcomp_scratches_lock);
|
|
local_bh_enable();
|
|
return err;
|
|
}
|
|
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
|
|
index 552269210..8b4be8e18 100644
|
|
--- a/samples/trace_events/trace-events-sample.c
|
|
+++ b/samples/trace_events/trace-events-sample.c
|
|
@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
|
|
|
|
/* Silly tracepoints */
|
|
trace_foo_bar("hello", cnt, array, random_strings[len],
|
|
- ¤t->cpus_allowed);
|
|
+ current->cpus_ptr);
|
|
|
|
trace_foo_with_template_simple("HELLO", cnt);
|
|
|
|
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
|
|
index 87f1fc980..f67b15236 100755
|
|
--- a/scripts/mkcompile_h
|
|
+++ b/scripts/mkcompile_h
|
|
@@ -5,7 +5,8 @@ TARGET=$1
|
|
ARCH=$2
|
|
SMP=$3
|
|
PREEMPT=$4
|
|
-CC=$5
|
|
+RT=$5
|
|
+CC=$6
|
|
|
|
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
|
|
|
|
@@ -53,6 +54,7 @@ UTS_VERSION="#$VERSION"
|
|
CONFIG_FLAGS=""
|
|
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
|
|
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
|
|
+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
|
|
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
|
|
|
|
# Truncate to maximum length
|
|
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
|
|
index b6380c5f0..12abfddb1 100644
|
|
--- a/security/apparmor/include/path.h
|
|
+++ b/security/apparmor/include/path.h
|
|
@@ -40,8 +40,10 @@ struct aa_buffers {
|
|
|
|
#include <linux/percpu.h>
|
|
#include <linux/preempt.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
|
|
+DECLARE_LOCAL_IRQ_LOCK(aa_buffers_lock);
|
|
|
|
#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
|
|
#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
|
|
@@ -51,7 +53,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
|
|
|
|
#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
|
|
|
|
-#ifdef CONFIG_DEBUG_PREEMPT
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static inline void AA_BUG_PREEMPT_ENABLED(const char *s)
|
|
+{
|
|
+ struct local_irq_lock *lv;
|
|
+
|
|
+ lv = this_cpu_ptr(&aa_buffers_lock);
|
|
+ WARN_ONCE(lv->owner != current,
|
|
+ "__get_buffer without aa_buffers_lock\n");
|
|
+}
|
|
+
|
|
+#elif defined(CONFIG_DEBUG_PREEMPT)
|
|
#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
|
|
#else
|
|
#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
|
|
@@ -67,14 +79,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
|
|
|
|
#define get_buffers(X...) \
|
|
do { \
|
|
- struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \
|
|
+ struct aa_buffers *__cpu_var; \
|
|
+ __cpu_var = get_locked_ptr(aa_buffers_lock, &aa_buffers); \
|
|
__get_buffers(__cpu_var, X); \
|
|
} while (0)
|
|
|
|
#define put_buffers(X, Y...) \
|
|
do { \
|
|
__put_buffers(X, Y); \
|
|
- put_cpu_ptr(&aa_buffers); \
|
|
+ put_locked_ptr(aa_buffers_lock, &aa_buffers); \
|
|
} while (0)
|
|
|
|
#endif /* __AA_PATH_H */
|
|
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
|
|
index 8b8b70620..8330ef57a 100644
|
|
--- a/security/apparmor/lsm.c
|
|
+++ b/security/apparmor/lsm.c
|
|
@@ -45,7 +45,7 @@
|
|
int apparmor_initialized;
|
|
|
|
DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
|
|
-
|
|
+DEFINE_LOCAL_IRQ_LOCK(aa_buffers_lock);
|
|
|
|
/*
|
|
* LSM hook functions
|
|
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
|
|
index 0f8cfc95a..767894d24 100644
|
|
--- a/virt/kvm/arm/arch_timer.c
|
|
+++ b/virt/kvm/arm/arch_timer.c
|
|
@@ -67,7 +67,7 @@ static inline bool userspace_irqchip(struct kvm *kvm)
|
|
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
|
|
{
|
|
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
|
|
- HRTIMER_MODE_ABS);
|
|
+ HRTIMER_MODE_ABS_HARD);
|
|
}
|
|
|
|
static void soft_timer_cancel(struct hrtimer *hrt)
|
|
@@ -611,10 +611,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
|
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
|
|
vcpu_ptimer(vcpu)->cntvoff = 0;
|
|
|
|
- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
|
timer->bg_timer.function = kvm_bg_timer_expire;
|
|
|
|
- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
|
timer->phys_timer.function = kvm_phys_timer_expire;
|
|
|
|
vtimer->irq.irq = default_vtimer_irq.irq;
|
|
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
|
|
index 0ab79ed49..c747f38ed 100644
|
|
--- a/virt/kvm/arm/arm.c
|
|
+++ b/virt/kvm/arm/arm.c
|
|
@@ -809,7 +809,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
* involves poking the GIC, which must be done in a
|
|
* non-preemptible context.
|
|
*/
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
|
|
kvm_pmu_flush_hwstate(vcpu);
|
|
|
|
@@ -858,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
kvm_timer_sync_hwstate(vcpu);
|
|
kvm_vgic_sync_hwstate(vcpu);
|
|
local_irq_enable();
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
continue;
|
|
}
|
|
|
|
@@ -936,7 +936,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
/* Exit types that need handling before we can be preempted */
|
|
handle_exit_early(vcpu, run, ret);
|
|
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
|
|
ret = handle_exit(vcpu, run, ret);
|
|
update_vcpu_stat_time(&vcpu->stat);
|
|
--
|
|
2.33.0
|
|
|