Files
kernel_xiaomi_sm8250/kernel/cpu.c
Ivaylo Georgiev b434e4bcd4 Merge android-4.19-q.84 (314ab78) into msm-4.19
* refs/heads/tmp-314ab78:
  Linux 4.19.84
  kvm: x86: mmu: Recovery of shattered NX large pages
  kvm: Add helper function for creating VM worker threads
  kvm: mmu: ITLB_MULTIHIT mitigation
  KVM: vmx, svm: always run with EFER.NXE=1 when shadow paging is active
  KVM: x86: add tracepoints around __direct_map and FNAME(fetch)
  KVM: x86: change kvm_mmu_page_get_gfn BUG_ON to WARN_ON
  KVM: x86: remove now unneeded hugepage gfn adjustment
  KVM: x86: make FNAME(fetch) and __direct_map more similar
  kvm: mmu: Do not release the page inside mmu_set_spte()
  kvm: Convert kvm_lock to a mutex
  kvm: x86, powerpc: do not allow clearing largepages debugfs entry
  Documentation: Add ITLB_MULTIHIT documentation
  cpu/speculation: Uninline and export CPU mitigations helpers
  x86/cpu: Add Tremont to the cpu vulnerability whitelist
  x86/bugs: Add ITLB_MULTIHIT bug infrastructure
  x86/speculation/taa: Fix printing of TAA_MSG_SMT on IBRS_ALL CPUs
  x86/tsx: Add config options to set tsx=on|off|auto
  x86/speculation/taa: Add documentation for TSX Async Abort
  x86/tsx: Add "auto" option to the tsx= cmdline parameter
  kvm/x86: Export MDS_NO=0 to guests when TSX is enabled
  x86/speculation/taa: Add sysfs reporting for TSX Async Abort
  x86/speculation/taa: Add mitigation for TSX Async Abort
  x86/cpu: Add a "tsx=" cmdline option with TSX disabled by default
  x86/cpu: Add a helper function x86_read_arch_cap_msr()
  x86/msr: Add the IA32_TSX_CTRL MSR
  KVM: x86: use Intel speculation bugs and features as derived in generic x86 code
  drm/i915/cmdparser: Fix jump whitelist clearing
  drm/i915/gen8+: Add RC6 CTX corruption WA
  drm/i915: Lower RM timeout to avoid DSI hard hangs
  drm/i915/cmdparser: Ignore Length operands during command matching
  drm/i915/cmdparser: Add support for backward jumps
  drm/i915/cmdparser: Use explicit goto for error paths
  drm/i915: Add gen9 BCS cmdparsing
  drm/i915: Allow parsing of unsized batches
  drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
  drm/i915: Add support for mandatory cmdparsing
  drm/i915: Remove Master tables from cmdparser
  drm/i915: Disable Secure Batches for gen6+
  drm/i915: Rename gen7 cmdparser tables
  vsock/virtio: fix sock refcnt holding during the shutdown
  iio: imu: mpu6050: Fix FIFO layout for ICM20602
  net: prevent load/store tearing on sk->sk_stamp
  netfilter: ipset: Copy the right MAC address in hash:ip,mac IPv6 sets
  usbip: Fix free of unallocated memory in vhci tx
  cgroup,writeback: don't switch wbs immediately on dead wbs if the memcg is dead
  mm/filemap.c: don't initiate writeback if mapping has no dirty pages
  iio: imu: inv_mpu6050: fix no data on MPU6050
  iio: imu: mpu6050: Add support for the ICM 20602 IMU
  blkcg: make blkcg_print_stat() print stats only for online blkgs
  pinctrl: cherryview: Fix irq_valid_mask calculation
  ocfs2: protect extent tree in ocfs2_prepare_inode_for_write()
  pinctrl: intel: Avoid potential glitches if pin is in GPIO mode
  e1000: fix memory leaks
  igb: Fix constant media auto sense switching when no cable is connected
  net: ethernet: arc: add the missed clk_disable_unprepare
  NFSv4: Don't allow a cached open with a revoked delegation
  usb: dwc3: gadget: fix race when disabling ep with cancelled xfers
  hv_netvsc: Fix error handling in netvsc_attach()
  drm/amd/display: Passive DP->HDMI dongle detection fix
  drm/amdgpu: If amdgpu_ib_schedule fails return back the error.
  iommu/amd: Apply the same IVRS IOAPIC workaround to Acer Aspire A315-41
  net: mscc: ocelot: refuse to overwrite the port's native vlan
  net: mscc: ocelot: fix vlan_filtering when enslaving to bridge before link is up
  net: hisilicon: Fix "Trying to free already-free IRQ"
  fjes: Handle workqueue allocation failure
  nvme-multipath: fix possible io hang after ctrl reconnect
  scsi: qla2xxx: stop timer in shutdown path
  RDMA/hns: Prevent memory leaks of eq->buf_list
  RDMA/iw_cxgb4: Avoid freeing skb twice in arp failure case
  usbip: tools: Fix read_usb_vudc_device() error path handling
  USB: ldusb: use unsigned size format specifiers
  USB: Skip endpoints with 0 maxpacket length
  perf/x86/uncore: Fix event group support
  perf/x86/amd/ibs: Handle erratum #420 only on the affected CPU family (10h)
  perf/x86/amd/ibs: Fix reading of the IBS OpData register and thus precise RIP validity
  usb: dwc3: remove the call trace of USBx_GFLADJ
  usb: gadget: configfs: fix concurrent issue between composite APIs
  usb: dwc3: pci: prevent memory leak in dwc3_pci_probe
  usb: gadget: composite: Fix possible double free memory bug
  usb: gadget: udc: atmel: Fix interrupt storm in FIFO mode.
  usb: fsl: Check memory resource before releasing it
  macsec: fix refcnt leak in module exit routine
  bonding: fix unexpected IFF_BONDING bit unset
  ipvs: move old_secure_tcp into struct netns_ipvs
  ipvs: don't ignore errors in case refcounting ip_vs module fails
  netfilter: nf_flow_table: set timeout before insertion into hashes
  scsi: qla2xxx: Initialized mailbox to prevent driver load failure
  scsi: lpfc: Honor module parameter lpfc_use_adisc
  net: openvswitch: free vport unless register_netdevice() succeeds
  RDMA/uverbs: Prevent potential underflow
  scsi: qla2xxx: fixup incorrect usage of host_byte
  net/mlx5: prevent memory leak in mlx5_fpga_conn_create_cq
  net/mlx5e: TX, Fix consumer index of error cqe dump
  RDMA/qedr: Fix reported firmware version
  iw_cxgb4: fix ECN check on the passive accept
  RDMA/mlx5: Clear old rate limit when closing QP
  HID: intel-ish-hid: fix wrong error handling in ishtp_cl_alloc_tx_ring()
  dmaengine: sprd: Fix the possible memory leak issue
  dmaengine: xilinx_dma: Fix control reg update in vdma_channel_set_config
  HID: google: add magnemite/masterball USB ids
  PCI: tegra: Enable Relaxed Ordering only for Tegra20 & Tegra30
  usbip: Implement SG support to vhci-hcd and stub driver
  usbip: Fix vhci_urb_enqueue() URB null transfer buffer error path
  sched/fair: Fix -Wunused-but-set-variable warnings
  sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
  ALSA: usb-audio: Fix copy&paste error in the validator
  ALSA: usb-audio: remove some dead code
  ALSA: usb-audio: Fix possible NULL dereference at create_yamaha_midi_quirk()
  ALSA: usb-audio: Clean up check_input_term()
  ALSA: usb-audio: Remove superfluous bLength checks
  ALSA: usb-audio: Unify the release of usb_mixer_elem_info objects
  ALSA: usb-audio: Simplify parse_audio_unit()
  ALSA: usb-audio: More validations of descriptor units
  configfs: fix a deadlock in configfs_symlink()
  configfs: provide exclusion between IO and removals
  configfs: new object reprsenting tree fragments
  configfs_register_group() shouldn't be (and isn't) called in rmdirable parts
  configfs: stash the data we need into configfs_buffer at open time
  can: peak_usb: fix slab info leak
  can: mcba_usb: fix use-after-free on disconnect
  can: dev: add missing of_node_put() after calling of_get_child_by_name()
  can: gs_usb: gs_can_open(): prevent memory leak
  can: rx-offload: can_rx_offload_queue_sorted(): fix error handling, avoid skb mem leak
  can: peak_usb: fix a potential out-of-sync while decoding packets
  can: c_can: c_can_poll(): only read status register after status IRQ
  can: flexcan: disable completely the ECC mechanism
  can: usb_8dev: fix use-after-free on disconnect
  SMB3: Fix persistent handles reconnect
  x86/apic/32: Avoid bogus LDR warnings
  intel_th: pci: Add Jasper Lake PCH support
  intel_th: pci: Add Comet Lake PCH support
  netfilter: ipset: Fix an error code in ip_set_sockfn_get()
  netfilter: nf_tables: Align nft_expr private data to 64-bit
  ARM: sunxi: Fix CPU powerdown on A83T
  iio: srf04: fix wrong limitation in distance measuring
  iio: imu: adis16480: make sure provided frequency is positive
  iio: adc: stm32-adc: fix stopping dma
  ceph: add missing check in d_revalidate snapdir handling
  ceph: fix use-after-free in __ceph_remove_cap()
  arm64: Do not mask out PTE_RDONLY in pte_same()
  soundwire: bus: set initial value to port_status
  soundwire: depend on ACPI
  HID: wacom: generic: Treat serial number and related fields as unsigned
  drm/radeon: fix si_enable_smc_cac() failed issue
  perf tools: Fix time sorting
  tools: gpio: Use !building_out_of_srctree to determine srctree
  dump_stack: avoid the livelock of the dump_lock
  mm, vmstat: hide /proc/pagetypeinfo from normal users
  mm: thp: handle page cache THP correctly in PageTransCompoundMap
  mm, meminit: recalculate pcpu batch and high limits after init completes
  mm: memcontrol: fix network errors from failing __GFP_ATOMIC charges
  ALSA: hda/ca0132 - Fix possible workqueue stall
  ALSA: bebob: fix to detect configured source of sampling clock for Focusrite Saffire Pro i/o series
  ALSA: timer: Fix incorrectly assigned timer instance
  net: hns: Fix the stray netpoll locks causing deadlock in NAPI path
  ipv6: fixes rt6_probe() and fib6_nh->last_probe init
  net: mscc: ocelot: fix NULL pointer on LAG slave removal
  net: mscc: ocelot: don't handle netdev events for other netdevs
  qede: fix NULL pointer deref in __qede_remove()
  NFC: st21nfca: fix double free
  nfc: netlink: fix double device reference drop
  NFC: fdp: fix incorrect free object
  net: usb: qmi_wwan: add support for DW5821e with eSIM support
  net: qualcomm: rmnet: Fix potential UAF when unregistering
  net: fix data-race in neigh_event_send()
  net: ethernet: octeon_mgmt: Account for second possible VLAN header
  ipv4: Fix table id reference in fib_sync_down_addr
  CDC-NCM: handle incomplete transfer of MTU
  bonding: fix state transition issue in link monitoring
  Linux 4.19.83
  usb: gadget: udc: core: Fix segfault if udc_bind_to_driver() for pending driver fails
  arm64: dts: ti: k3-am65-main: Fix gic-its node unit-address
  ASoC: pcm3168a: The codec does not support S32_LE
  selftests/powerpc: Fix compile error on tlbie_test due to newer gcc
  selftests/powerpc: Add test case for tlbie vs mtpidr ordering issue
  powerpc/mm: Fixup tlbie vs mtpidr/mtlpidr ordering issue on POWER9
  platform/x86: pmc_atom: Add Siemens SIMATIC IPC227E to critclk_systems DMI table
  wireless: Skip directory when generating certificates
  net/flow_dissector: switch to siphash
  r8152: add device id for Lenovo ThinkPad USB-C Dock Gen 2
  net: dsa: fix switch tree list
  net: usb: lan78xx: Connect PHY before registering MAC
  net: bcmgenet: reset 40nm EPHY on energy detect
  net: phy: bcm7xxx: define soft_reset for 40nm EPHY
  net: bcmgenet: don't set phydev->link from MAC
  net: dsa: b53: Do not clear existing mirrored port mask
  net/mlx5e: Fix ethtool self test: link speed
  r8169: fix wrong PHY ID issue with RTL8168dp
  net/mlx5e: Fix handling of compressed CQEs in case of low NAPI budget
  selftests: fib_tests: add more tests for metric update
  ipv4: fix route update on metric change.
  net: add READ_ONCE() annotation in __skb_wait_for_more_packets()
  net: use skb_queue_empty_lockless() in busy poll contexts
  net: use skb_queue_empty_lockless() in poll() handlers
  udp: use skb_queue_empty_lockless()
  net: add skb_queue_empty_lockless()
  vxlan: check tun_info options_len properly
  udp: fix data-race in udp_set_dev_scratch()
  selftests: net: reuseport_dualstack: fix uninitalized parameter
  net: Zeroing the structure ethtool_wolinfo in ethtool_get_wol()
  net: usb: lan78xx: Disable interrupts before calling generic_handle_irq()
  netns: fix GFP flags in rtnl_net_notifyid()
  net/mlx4_core: Dynamically set guaranteed amount of counters per VF
  net: hisilicon: Fix ping latency when deal with high throughput
  net: fix sk_page_frag() recursion from memory reclaim
  net: ethernet: ftgmac100: Fix DMA coherency issue with SW checksum
  net: dsa: bcm_sf2: Fix IMP setup for port different than 8
  net: annotate lockless accesses to sk->sk_napi_id
  net: annotate accesses to sk->sk_incoming_cpu
  inet: stop leaking jiffies on the wire
  erspan: fix the tun_info options_len check for erspan
  dccp: do not leak jiffies on the wire
  cxgb4: fix panic when attaching to ULD fail
  nbd: handle racing with error'ed out commands
  nbd: protect cmd->status with cmd->lock
  cifs: Fix cifsInodeInfo lock_sem deadlock when reconnect occurs
  i2c: stm32f7: remove warning when compiling with W=1
  i2c: stm32f7: fix a race in slave mode with arbitration loss irq
  i2c: stm32f7: fix first byte to send in slave mode
  irqchip/gic-v3-its: Use the exact ITSList for VMOVP
  MIPS: bmips: mark exception vectors as char arrays
  of: unittest: fix memory leak in unittest_data_add
  ARM: 8926/1: v7m: remove register save to stack before svc
  tracing: Fix "gfp_t" format for synthetic events
  scsi: target: core: Do not overwrite CDB byte 1
  drm/amdgpu: fix potential VM faults
  ARM: davinci: dm365: Fix McBSP dma_slave_map entry
  perf kmem: Fix memory leak in compact_gfp_flags()
  8250-men-mcb: fix error checking when get_num_ports returns -ENODEV
  perf c2c: Fix memory leak in build_cl_output()
  ARM: dts: imx7s: Correct GPT's ipg clock source
  scsi: fix kconfig dependency warning related to 53C700_LE_ON_BE
  scsi: sni_53c710: fix compilation error
  scsi: scsi_dh_alua: handle RTPG sense code correctly during state transitions
  scsi: qla2xxx: fix a potential NULL pointer dereference
  ARM: mm: fix alignment handler faults under memory pressure
  pinctrl: ns2: Fix off by one bugs in ns2_pinmux_enable()
  ARM: dts: logicpd-torpedo-som: Remove twl_keypad
  ASoc: rockchip: i2s: Fix RPM imbalance
  ASoC: wm_adsp: Don't generate kcontrols without READ flags
  regulator: pfuze100-regulator: Variable "val" in pfuze100_regulator_probe() could be uninitialized
  ASoC: rt5682: add NULL handler to set_jack function
  regulator: ti-abb: Fix timeout in ti_abb_wait_txdone/ti_abb_clear_all_txdone
  arm64: dts: Fix gpio to pinmux mapping
  arm64: dts: allwinner: a64: sopine-baseboard: Add PHY regulator delay
  arm64: dts: allwinner: a64: pine64-plus: Add PHY regulator delay
  ASoC: wm8994: Do not register inapplicable controls for WM1811
  regulator: of: fix suspend-min/max-voltage parsing
  kbuild: add -fcf-protection=none when using retpoline flags
  Linux 4.19.82
  Revert "ALSA: hda: Flush interrupts on disabling"
  powerpc/powernv: Fix CPU idle to be called with IRQs disabled
  ALSA: usb-audio: Add DSD support for Gustard U16/X26 USB Interface
  ALSA: usb-audio: Update DSD support quirks for Oppo and Rotel
  ALSA: usb-audio: DSD auto-detection for Playback Designs
  ALSA: timer: Fix mutex deadlock at releasing card
  ALSA: timer: Simplify error path in snd_timer_open()
  sch_netem: fix rcu splat in netem_enqueue()
  net: usb: sr9800: fix uninitialized local variable
  bonding: fix potential NULL deref in bond_update_slave_arr
  NFC: pn533: fix use-after-free and memleaks
  rxrpc: Fix trace-after-put looking at the put peer record
  rxrpc: rxrpc_peer needs to hold a ref on the rxrpc_local record
  rxrpc: Fix call ref leak
  llc: fix sk_buff leak in llc_conn_service()
  llc: fix sk_buff leak in llc_sap_state_process()
  batman-adv: Avoid free/alloc race when handling OGM buffer
  NFS: Fix an RCU lock leak in nfs4_refresh_delegation_stateid()
  drm/amdgpu/powerplay/vega10: allow undervolting in p7
  dmaengine: cppi41: Fix cppi41_dma_prep_slave_sg() when idle
  dmaengine: qcom: bam_dma: Fix resource leak
  rtlwifi: Fix potential overflow on P2P code
  arm64: Ensure VM_WRITE|VM_SHARED ptes are clean by default
  s390/idle: fix cpu idle time calculation
  s390/cmm: fix information leak in cmm_timeout_handler()
  nl80211: fix validation of mesh path nexthop
  HID: fix error message in hid_open_report()
  HID: Fix assumption that devices have inputs
  HID: i2c-hid: add Trekstor Primebook C11B to descriptor override
  scsi: target: cxgbit: Fix cxgbit_fw4_ack()
  USB: serial: whiteheat: fix line-speed endianness
  USB: serial: whiteheat: fix potential slab corruption
  usb: xhci: fix __le32/__le64 accessors in debugfs code
  USB: ldusb: fix control-message timeout
  USB: ldusb: fix ring-buffer locking
  usb-storage: Revert commit 747668dbc061 ("usb-storage: Set virt_boundary_mask to avoid SG overflows")
  USB: gadget: Reject endpoints with 0 maxpacket value
  UAS: Revert commit 3ae62a42090f ("UAS: fix alignment of scatter/gather segments")
  ALSA: hda/realtek - Add support for ALC623
  ALSA: hda/realtek - Fix 2 front mics of codec 0x623
  ALSA: bebob: Fix prototype of helper function to return negative value
  fuse: truncate pending writes on O_TRUNC
  fuse: flush dirty data/metadata before non-truncate setattr
  ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe()
  thunderbolt: Use 32-bit writes when writing ring producer/consumer
  USB: legousbtower: fix a signedness bug in tower_probe()
  nbd: verify socket is supported during setup
  iwlwifi: exclude GEO SAR support for 3168
  ALSA: hda/realtek: Reduce the Headphone static noise on XPS 9350/9360
  ARM: 8914/1: NOMMU: Fix exc_ret for XIP
  tracing: Initialize iter->seq after zeroing in tracing_read_pipe()
  s390/uaccess: avoid (false positive) compiler warnings
  NFSv4: Fix leak of clp->cl_acceptor string
  nbd: fix possible sysfs duplicate warning
  virt: vbox: fix memory leak in hgcm_call_preprocess_linaddr
  MIPS: fw: sni: Fix out of bounds init of o32 stack
  MIPS: include: Mark __xchg as __always_inline
  iio: imu: adis16400: release allocated memory on failure
  drm/amdgpu: fix memory leak
  perf/x86/amd: Change/fix NMI latency mitigation to use a timestamp
  sched/vtime: Fix guest/system mis-accounting on task switch
  x86/cpu: Add Comet Lake to the Intel CPU models header
  arm64: armv8_deprecated: Checking return value for memory allocation
  fs: ocfs2: fix a possible null-pointer dereference in ocfs2_info_scan_inode_alloc()
  fs: ocfs2: fix a possible null-pointer dereference in ocfs2_write_end_nolock()
  fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()
  ocfs2: clear zero in unaligned direct IO
  x86/xen: Return from panic notifier
  MIPS: include: Mark __cmpxchg as __always_inline
  efi/x86: Do not clean dummy variable in kexec path
  efi/cper: Fix endianness of PCIe class code
  serial: mctrl_gpio: Check for NULL pointer
  fs: cifs: mute -Wunused-const-variable message
  gpio: max77620: Use correct unit for debounce times
  tty: n_hdlc: fix build on SPARC
  tty: serial: owl: Fix the link time qualifier of 'owl_uart_exit()'
  arm64: ftrace: Ensure synchronisation in PLT setup for Neoverse-N1 #1542419
  nfs: Fix nfsi->nrequests count error on nfs_inode_remove_request
  HID: hyperv: Use in-place iterator API in the channel callback
  RDMA/iwcm: Fix a lock inversion issue
  RDMA/hfi1: Prevent memory leak in sdma_init
  staging: rtl8188eu: fix null dereference when kzalloc fails
  perf annotate: Return appropriate error code for allocation failures
  perf annotate: Propagate the symbol__annotate() error return
  perf annotate: Fix the signedness of failure returns
  perf annotate: Propagate perf_env__arch() error
  perf tools: Propagate get_cpuid() error
  perf jevents: Fix period for Intel fixed counters
  perf script brstackinsn: Fix recovery from LBR/binary mismatch
  perf map: Fix overlapped map handling
  perf tests: Avoid raising SEGV using an obvious NULL dereference
  libsubcmd: Make _FORTIFY_SOURCE defines dependent on the feature
  iio: fix center temperature of bmc150-accel-core
  iio: adc: meson_saradc: Fix memory allocation order
  power: supply: max14656: fix potential use-after-free
  drm/amd/display: fix odm combine pipe reset
  PCI/PME: Fix possible use-after-free on remove
  net: dsa: mv88e6xxx: Release lock while requesting IRQ
  exec: load_script: Do not exec truncated interpreter path
  ext4: disallow files with EXT4_JOURNAL_DATA_FL from EXT4_IOC_SWAP_BOOT
  media: vimc: Remove unused but set variables
  ALSA: hda/realtek - Apply ALC294 hp init also for S4 resume
  cifs: add credits from unmatched responses/messages
  CIFS: Respect SMB2 hdr preamble size in read responses
  scsi: lpfc: Correct localport timeout duration error
  mlxsw: spectrum: Set LAG port collector only when active
  arm64: kpti: Whitelist HiSilicon Taishan v110 CPUs
  arm64: Add MIDR encoding for HiSilicon Taishan CPUs
  rtc: pcf8523: set xtal load capacitance from DT
  usb: handle warm-reset port requests on hub resume
  ALSA: usb-audio: Cleanup DSD whitelist
  usb: dwc3: gadget: clear DWC3_EP_TRANSFER_STARTED on cmd complete
  usb: dwc3: gadget: early giveback if End Transfer already completed
  samples: bpf: fix: seg fault with NULL pointer arg
  HID: steam: fix deadlock with input devices.
  HID: steam: fix boot loop with bluetooth firmware
  NFSv4: Ensure that the state manager exits the loop on SIGKILL
  HID: Add ASUS T100CHI keyboard dock battery quirks
  staging: mt7621-pinctrl: use pinconf-generic for 'dt_node_to_map' and 'dt_free_map'
  scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks
  clk: boston: unregister clks on failure in clk_boston_setup()
  ath10k: assign 'n_cipher_suites = 11' for WCN3990 to enable WPA3
  platform/x86: Fix config space access for intel_atomisp2_pm
  platform/x86: Add the VLV ISP PCI ID to atomisp2_pm
  HID: i2c-hid: Add Odys Winbook 13 to descriptor override
  HID: i2c-hid: Ignore input report if there's no data present on Elan touchpanels
  HID: i2c-hid: Disable runtime PM for LG touchscreen
  netfilter: ipset: Make invalid MAC address checks consistent
  Btrfs: fix deadlock on tree root leaf when finding free extent
  PCI: Fix Switchtec DMA aliasing quirk dmesg noise
  bcache: fix input overflow to writeback_rate_minimum
  drm/msm/dpu: handle failures while initializing displays
  x86/cpu: Add Atom Tremont (Jacobsville)
  tools/power turbostat: fix goldmont C-state limit decoding
  usb: dwc2: fix unbalanced use of external vbus-supply
  HID: i2c-hid: add Direkt-Tek DTLAPY133-1 to descriptor override
  f2fs: fix to recover inode->i_flags of inode block during POR
  f2fs: fix to recover inode's i_gc_failures during POR
  powerpc/powernv: hold device_hotplug_lock when calling memtrace_offline_pages()
  sc16is7xx: Fix for "Unexpected interrupt: 8"
  scsi: lpfc: Fix a duplicate 0711 log message number.
  f2fs: flush quota blocks after turnning it off
  wil6210: fix freeing of rx buffers in EDMA mode
  btrfs: tracepoints: Fix wrong parameter order for qgroup events
  btrfs: qgroup: Always free PREALLOC META reserve in btrfs_delalloc_release_extents()
  Btrfs: fix memory leak due to concurrent append writes with fiemap
  Btrfs: fix inode cache block reserve leak on failure to allocate data space
  dm snapshot: rework COW throttling to fix deadlock
  dm snapshot: introduce account_start_copy() and account_end_copy()
  zram: fix race between backing_dev_show and backing_dev_store

Conflicts:
	arch/arm64/include/asm/cputype.h
	drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
	drivers/net/wireless/ath/wil6210/txrx_edma.c
	drivers/usb/dwc3/gadget.c
	include/linux/cpu.h
	kernel/cpu.c

Following USB commits were reverted on importing android-4.19.57
into msm-4.19 due to BootTimeRunner failure. android-4.19-q.82
introduced new usb changes [1] that fixed the regression, hence it
is safe to restore the reverts. It is done in this merge.

  9c423fd89("usb: dwc3: Reset num_trbs after skipping")
  385cacd95("usb: dwc3: gadget: Clear req->needs_extra_trb flag on cleanup")
  6edcdd0e6("usb: dwc3: gadget: remove wait_end_transfer")
  d7ff2e3ff("usb: dwc3: gadget: move requests to cancelled_list")
  bba5f9878("usb: dwc3: gadget: introduce cancelled_list")
  65e1f3403("usb: dwc3: gadget: extract dwc3_gadget_ep_skip_trbs()")
  56092bd50("usb: dwc3: gadget: use num_trbs when skipping TRBs on->dequeue()")
  2a2b1c4dc("usb: dwc3: gadget: track number of TRBs per request")
  420b1237c("usb: dwc3: gadget: combine unaligned and zero flags")
  62805d319("Revert "usb: dwc3: gadget: Clear req->needs_extra_trb flag on cleanup"")

[1]
  a0608eec29("usb: dwc3: gadget: clear DWC3_EP_TRANSFER_STARTED on cmd complete")
  d0e8b35e91("usb: dwc3: gadget: early giveback if End Transfer already completed")

Change-Id: I77c3490d2c1cf7c8233a7e797c6f217f737621a2
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
2020-01-28 01:25:51 -08:00

2438 lines
58 KiB
C

/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <uapi/linux/sched/types.h>
#include <linux/cpuset.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
#include <linux/sched/clock.h>
#include "smpboot.h"
/**
* cpuhp_cpu_state - Per cpu hotplug state storage
* @state: The current cpu state
* @target: The target state
* @thread: Pointer to the hotplug thread
* @should_run: Thread should execute
* @rollback: Perform a rollback
* @single: Single callback invocation
* @bringup: Single callback bringup or teardown selector
* @cb_state: The state for a single callback (install/uninstall)
* @result: Result of the operation
* @done_up: Signal completion to the issuer of the task for cpu-up
* @done_down: Signal completion to the issuer of the task for cpu-down
*/
struct cpuhp_cpu_state {
enum cpuhp_state state;
enum cpuhp_state target;
enum cpuhp_state fail;
#ifdef CONFIG_SMP
struct task_struct *thread;
bool should_run;
bool rollback;
bool single;
bool bringup;
bool booted_once;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
int result;
struct completion done_up;
struct completion done_down;
#endif
};
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
static struct lockdep_map cpuhp_state_down_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
static inline void cpuhp_lock_acquire(bool bringup)
{
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
static inline void cpuhp_lock_release(bool bringup)
{
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
#else
static inline void cpuhp_lock_acquire(bool bringup) { }
static inline void cpuhp_lock_release(bool bringup) { }
#endif
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
const char *name;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} startup;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} teardown;
struct hlist_head list;
bool cant_stop;
bool multi_instance;
};
static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_hp_states[];
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
return cpuhp_hp_states + state;
}
/**
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
* @cpu: The cpu for which the callback should be invoked
* @state: The state to do callbacks for
* @bringup: True if the bringup callback should be invoked
* @node: For multi-instance, do a single entry callback for install/remove
* @lastp: For multi-instance rollback, remember how far we got
*
* Called from cpu hotplug and from the state register machinery.
*/
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
bool bringup, struct hlist_node *node,
struct hlist_node **lastp)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct cpuhp_step *step = cpuhp_get_step(state);
int (*cbm)(unsigned int cpu, struct hlist_node *node);
int (*cb)(unsigned int cpu);
int ret, cnt;
if (st->fail == state) {
st->fail = CPUHP_INVALID;
if (!(bringup ? step->startup.single : step->teardown.single))
return 0;
return -EAGAIN;
}
if (!step->multi_instance) {
WARN_ON_ONCE(lastp && *lastp);
cb = bringup ? step->startup.single : step->teardown.single;
if (!cb)
return 0;
trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
cbm = bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return 0;
/* Single invocation for instance add/remove */
if (node) {
WARN_ON_ONCE(lastp && *lastp);
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
/* State transition. Invoke on all instances */
cnt = 0;
hlist_for_each(node, &step->list) {
if (lastp && node == *lastp)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
if (ret) {
if (!lastp)
goto err;
*lastp = node;
return ret;
}
cnt++;
}
if (lastp)
*lastp = NULL;
return 0;
err:
/* Rollback the instances if one failed */
cbm = !bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return ret;
hlist_for_each(node, &step->list) {
if (!cnt--)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
/*
* Rollback must not fail,
*/
WARN_ON_ONCE(ret);
}
return ret;
}
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
/*
* The extra check for CPUHP_TEARDOWN_CPU is only for documentation
* purposes as that state is handled explicitly in cpu_down.
*/
return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
wait_for_completion(done);
}
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
complete(done);
}
/*
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
*/
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void)
{
percpu_down_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_lock);
int cpus_read_trylock(void)
{
return percpu_down_read_trylock(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_trylock);
void cpus_read_unlock(void)
{
percpu_up_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_unlock);
void cpus_write_lock(void)
{
percpu_down_write(&cpu_hotplug_lock);
}
void cpus_write_unlock(void)
{
percpu_up_write(&cpu_hotplug_lock);
}
void lockdep_assert_cpus_held(void)
{
/*
* We can't have hotplug operations before userspace starts running,
* and some init codepaths will knowingly not take the hotplug lock.
* This is all valid, so mute lockdep until it makes sense to report
* unheld locks.
*/
if (system_state < SYSTEM_RUNNING)
return;
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
* the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
* hotplug path before performing hotplug operations. So acquiring that lock
* guarantees mutual exclusion from any currently running hotplug operations.
*/
void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
static void __cpu_hotplug_enable(void)
{
if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
return;
cpu_hotplug_disabled--;
}
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
__cpu_hotplug_enable();
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Architectures that need SMT-specific errata handling during SMT hotplug
* should override this.
*/
void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
void __init cpu_smt_disable(bool force)
{
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return;
if (force) {
pr_info("SMT: Force disabled\n");
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
} else {
cpu_smt_control = CPU_SMT_DISABLED;
}
}
/*
* The decision whether SMT is supported can only be done after the full
* CPU identification. Called from architecture code.
*/
void __init cpu_smt_check_topology(void)
{
if (!topology_smt_supported())
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}
static int __init smt_cmdline_disable(char *str)
{
cpu_smt_disable(str && !strcmp(str, "force"));
return 0;
}
early_param("nosmt", smt_cmdline_disable);
static inline bool cpu_smt_allowed(unsigned int cpu)
{
if (cpu_smt_control == CPU_SMT_ENABLED)
return true;
if (topology_is_primary_thread(cpu))
return true;
/*
* On x86 it's required to boot all logical CPUs at least once so
* that the init code can get a chance to set CR4.MCE on each
* CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
* core will shutdown the machine.
*/
return !per_cpu(cpuhp_state, cpu).booted_once;
}
#else
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
#endif
static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
st->rollback = false;
st->last = NULL;
st->target = target;
st->single = false;
st->bringup = st->state < target;
return prev_state;
}
static inline void
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
{
st->rollback = true;
/*
* If we have st->last we need to undo partial multi_instance of this
* state first. Otherwise start undo at the previous state.
*/
if (!st->last) {
if (st->bringup)
st->state--;
else
st->state++;
}
st->target = prev_state;
st->bringup = !st->bringup;
}
/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
if (!st->single && st->state == st->target)
return;
st->result = 0;
/*
* Make sure the above stores are visible before should_run becomes
* true. Paired with the mb() above in cpuhp_thread_fun()
*/
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
wait_for_ap_thread(st, st->bringup);
}
static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state;
int ret;
prev_state = cpuhp_set_state(st, target);
__cpuhp_kick_ap(st);
if ((ret = st->result)) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
return ret;
}
static int bringup_wait_for_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_ap_thread(st, true);
if (WARN_ON_ONCE((!cpu_online(cpu))))
return -ECANCELED;
/* Unpark the stopper thread and the hotplug thread of the target cpu */
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
/*
* SMT soft disabling on X86 requires to bring the CPU out of the
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
* CPU marked itself as booted_once in cpu_notify_starting() so the
* cpu_smt_allowed() check will now return false if this is not the
* primary sibling.
*/
if (!cpu_smt_allowed(cpu))
return -ECANCELED;
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(st, st->target);
}
static int bringup_cpu(unsigned int cpu)
{
struct task_struct *idle = idle_thread_get(cpu);
int ret;
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup.
*/
irq_lock_sparse();
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
if (ret)
return ret;
return bringup_wait_for_ap(cpu);
}
/*
* Hotplug state machine related functions
*/
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state--; st->state > st->target; st->state--)
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
{
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
return true;
/*
* When CPU hotplug is disabled, then taking the CPU down is not
* possible because takedown_cpu() and the architecture and
* subsystem specific mechanisms are not available. So the CPU
* which would be completely unplugged again needs to stay around
* in the current state.
*/
return st->state <= CPUHP_BRINGUP_CPU;
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
if (ret) {
if (can_rollback_cpu(st)) {
st->target = prev_state;
undo_cpu_up(cpu, st);
}
break;
}
}
return ret;
}
/*
* The cpu hotplug threads manage the bringup and teardown of the cpus
*/
static void cpuhp_create(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
init_completion(&st->done_up);
init_completion(&st->done_down);
}
static int cpuhp_should_run(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
return st->should_run;
}
/*
* Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
* callbacks when a state gets [un]installed at runtime.
*
* Each invocation of this function by the smpboot thread does a single AP
* state callback.
*
* It has 3 modes of operation:
* - single: runs st->cb_state
* - up: runs ++st->state, while st->state < st->target
* - down: runs st->state--, while st->state > st->target
*
* When complete or on error, should_run is cleared and the completion is fired.
*/
static void cpuhp_thread_fun(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
bool bringup = st->bringup;
enum cpuhp_state state;
if (WARN_ON_ONCE(!st->should_run))
return;
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
cpuhp_lock_acquire(bringup);
if (st->single) {
state = st->cb_state;
st->should_run = false;
} else {
if (bringup) {
st->state++;
state = st->state;
st->should_run = (st->state < st->target);
WARN_ON_ONCE(st->state > st->target);
} else {
state = st->state;
st->state--;
st->should_run = (st->state > st->target);
WARN_ON_ONCE(st->state < st->target);
}
}
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
local_irq_enable();
/*
* STARTING/DYING must not fail!
*/
WARN_ON_ONCE(st->result);
} else {
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
}
if (st->result) {
/*
* If we fail on a rollback, we're up a creek without no
* paddle, no way forward, no way back. We loose, thanks for
* playing.
*/
WARN_ON_ONCE(st->rollback);
st->should_run = false;
}
cpuhp_lock_release(bringup);
if (!st->should_run)
complete_ap_thread(st, bringup);
}
/* Invoke a single callback on a remote cpu */
static int
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int ret;
if (!cpu_online(cpu))
return 0;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
*/
if (!st->thread)
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
st->rollback = false;
st->last = NULL;
st->node = node;
st->bringup = bringup;
st->cb_state = state;
st->single = true;
__cpuhp_kick_ap(st);
/*
* If we failed and did a partial, do a rollback.
*/
if ((ret = st->result) && st->last) {
st->rollback = true;
st->bringup = !bringup;
__cpuhp_kick_ap(st);
}
/*
* Clean up the leftovers so the next hotplug operation wont use stale
* data.
*/
st->node = st->last = NULL;
return ret;
}
static int cpuhp_kick_ap_work(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state prev_state = st->state;
int ret;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
ret = cpuhp_kick_ap(st, st->target);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
return ret;
}
static struct smp_hotplug_thread cpuhp_threads = {
.store = &cpuhp_state.thread,
.create = &cpuhp_create,
.thread_should_run = cpuhp_should_run,
.thread_fn = cpuhp_thread_fun,
.thread_comm = "cpuhp/%u",
.selfparking = true,
};
void __init cpuhp_threads_init(void)
{
BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
#ifdef CONFIG_HOTPLUG_CPU
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
*
* This function walks all processes, finds a valid mm struct for each one and
* then clears a corresponding bit in mm's cpumask. While this all sounds
* trivial, there are various non-obvious corner cases, which this function
* tries to solve in a safe manner.
*
* Also note that the function uses a somewhat relaxed locking scheme, so it may
* be called only for an already offlined CPU.
*/
void clear_tasks_mm_cpumask(int cpu)
{
struct task_struct *p;
/*
* This function is called after the cpu is taken down and marked
* offline, so its not like new tasks will ever get this cpu set in
* their mm mask. -- Peter Zijlstra
* Thus, we may use rcu_read_lock() here, instead of grabbing
* full-fledged tasklist_lock.
*/
WARN_ON(cpu_online(cpu));
rcu_read_lock();
for_each_process(p) {
struct task_struct *t;
/*
* Main thread might exit, but other threads may still have
* a valid mm. Find one.
*/
t = find_lock_task_mm(p);
if (!t)
continue;
cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
task_unlock(t);
}
rcu_read_unlock();
}
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
int err, cpu = smp_processor_id();
int ret;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
/*
* We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
* do this step again.
*/
WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
st->state--;
/* Invoke the former CPU_DYING callbacks */
for (; st->state > target; st->state--) {
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
/*
* DYING must not fail!
*/
WARN_ON_ONCE(ret);
}
/* Give up timekeeping duties */
tick_handover_do_timer();
/* Park the stopper thread */
stop_machine_park(cpu);
return 0;
}
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
/* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
*/
irq_lock_sparse();
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
return err;
}
BUG_ON(cpu_online(cpu));
/*
* The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
* all runnable tasks from the CPU, there's only the idle task left now
* that the migration thread is done doing the stop_machine thing.
*
* Wait for the stop thread to go away.
*/
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
__cpu_die(cpu);
tick_cleanup_dead_cpu(cpu);
rcutree_migrate_callbacks(cpu);
return 0;
}
static void cpuhp_complete_idle_dead(void *arg)
{
struct cpuhp_cpu_state *st = arg;
complete_ap_thread(st, false);
}
void cpuhp_report_idle_dead(void)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
BUG_ON(st->state != CPUHP_AP_OFFLINE);
rcu_report_dead(smp_processor_id());
st->state = CPUHP_AP_IDLE_DEAD;
/*
* We cannot call complete after rcu_report_dead() so we delegate it
* to an online cpu.
*/
smp_call_function_single(cpumask_first(cpu_online_mask),
cpuhp_complete_idle_dead, st, 0);
}
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state++; st->state < st->target; st->state++)
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
for (; st->state > target; st->state--) {
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
BUG_ON(ret && st->state < CPUHP_AP_IDLE_DEAD);
if (ret) {
st->target = prev_state;
if (st->state < prev_state)
undo_cpu_down(cpu, st);
break;
}
}
return ret;
}
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
u64 start_time = 0;
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_present(cpu))
return -EINVAL;
if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
return -EBUSY;
cpus_write_lock();
if (trace_cpuhp_latency_enabled())
start_time = sched_clock();
cpuhp_tasks_frozen = tasks_frozen;
prev_state = cpuhp_set_state(st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread.
*/
if (st->state > CPUHP_TEARDOWN_CPU) {
st->target = max((int)target, CPUHP_TEARDOWN_CPU);
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
/*
* We might have stopped still in the range of the AP hotplug
* thread. Nothing to do anymore.
*/
if (st->state > CPUHP_TEARDOWN_CPU)
goto out;
st->target = target;
}
/*
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
out:
trace_cpuhp_latency(cpu, 0, start_time, ret);
cpus_write_unlock();
/*
* Do post unplug cleanup. This is still protected against
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
arch_smt_update();
return ret;
}
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{
if (cpu_hotplug_disabled)
return -EBUSY;
return _cpu_down(cpu, 0, target);
}
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{
int err;
/*
* When cpusets are enabled, the rebuilding of the scheduling
* domains is deferred to a workqueue context. Make sure
* that the work is completed before proceeding to the next
* hotplug. Otherwise scheduler observes an inconsistent
* view of online and offline CPUs in the root domain. If
* the online CPUs are still stuck in the offline (default)
* domain, those CPUs would not be visible when scheduling
* happens on from other CPUs in the root domain.
*/
cpuset_wait_for_hotplug();
cpu_maps_update_begin();
err = cpu_down_maps_locked(cpu, target);
cpu_maps_update_done();
return err;
}
int cpu_down(unsigned int cpu)
{
return do_cpu_down(cpu, CPUHP_OFFLINE);
}
EXPORT_SYMBOL(cpu_down);
#else
#define takedown_cpu NULL
#endif /*CONFIG_HOTPLUG_CPU*/
/**
* notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
* @cpu: cpu that just started
*
* It must be called by the arch code on the new cpu, before the new cpu
* enables interrupts and before the "boot" cpu returns from __cpu_up().
*/
void notify_cpu_starting(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
int ret;
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
st->booted_once = true;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
/*
* STARTING must not fail!
*/
WARN_ON_ONCE(ret);
}
}
/*
* Called from the idle task. Wake up the controlling task which brings the
* stopper and the hotplug thread of the upcoming CPU up and then delegates
* the rest of the online bringup to the hotplug thread.
*/
void cpuhp_online_idle(enum cpuhp_state state)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
/* Happens for the boot cpu */
if (state != CPUHP_AP_ONLINE_IDLE)
return;
st->state = CPUHP_AP_ONLINE_IDLE;
complete_ap_thread(st, true);
}
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle;
int ret = 0;
u64 start_time = 0;
cpus_write_lock();
if (trace_cpuhp_latency_enabled())
start_time = sched_clock();
if (!cpu_present(cpu)) {
ret = -EINVAL;
goto out;
}
/*
* The caller of do_cpu_up might have raced with another
* caller. Ignore it for now.
*/
if (st->state >= target)
goto out;
if (st->state == CPUHP_OFFLINE) {
/* Let it fail before we try to bring the cpu up */
idle = idle_thread_get(cpu);
if (IS_ERR(idle)) {
ret = PTR_ERR(idle);
goto out;
}
}
cpuhp_tasks_frozen = tasks_frozen;
cpuhp_set_state(st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread once more.
*/
if (st->state > CPUHP_BRINGUP_CPU) {
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
}
/*
* Try to reach the target state. We max out on the BP at
* CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
* responsible for bringing it up to the target state.
*/
target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target);
out:
trace_cpuhp_latency(cpu, 1, start_time, ret);
cpus_write_unlock();
arch_smt_update();
return ret;
}
static int switch_to_rt_policy(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
unsigned int policy = current->policy;
int err;
/* Nobody should be attempting hotplug from these policy contexts. */
if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
policy == SCHED_DEADLINE)
return -EPERM;
if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
/* Only SCHED_NORMAL left. */
err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
return err;
}
static int switch_to_fair_policy(void)
{
struct sched_param param = { .sched_priority = 0 };
return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
}
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
int switch_err = 0;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
cpu);
#if defined(CONFIG_IA64)
pr_err("please check additional_cpus= boot parameter\n");
#endif
return -EINVAL;
}
cpuset_wait_for_hotplug();
switch_err = switch_to_rt_policy();
if (switch_err < 0)
return switch_err;
err = try_online_node(cpu_to_node(cpu));
if (err)
return err;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto out;
}
if (!cpu_smt_allowed(cpu)) {
err = -EPERM;
goto out;
}
err = _cpu_up(cpu, 0, target);
out:
cpu_maps_update_done();
if (!switch_err) {
switch_err = switch_to_fair_policy();
if (switch_err)
pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
switch_err, current->comm, current->pid);
}
return err;
}
int cpu_up(unsigned int cpu)
{
return do_cpu_up(cpu, CPUHP_ONLINE);
}
EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
int freeze_secondary_cpus(int primary)
{
int cpu, error = 0;
cpu_maps_update_begin();
if (!cpu_online(primary))
primary = cpumask_first(cpu_online_mask);
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == primary)
continue;
if (pm_wakeup_pending()) {
pr_info("Wakeup pending. Abort CPU freeze\n");
error = -EBUSY;
break;
}
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
else {
pr_err("Error taking CPU%d down: %d\n", cpu, error);
break;
}
}
if (!error)
BUG_ON(num_online_cpus() > 1);
else
pr_err("Non-boot CPUs are not disabled\n");
/*
* Make sure the CPUs won't be enabled by someone else. We need to do
* this even in case of failure as all disable_nonboot_cpus() users are
* supposed to do enable_nonboot_cpus() on the failure path.
*/
cpu_hotplug_disabled++;
cpu_maps_update_done();
return error;
}
void __weak arch_enable_nonboot_cpus_begin(void)
{
}
void __weak arch_enable_nonboot_cpus_end(void)
{
}
void enable_nonboot_cpus(void)
{
int cpu, error;
struct device *cpu_device;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
__cpu_hotplug_enable();
if (cpumask_empty(frozen_cpus))
goto out;
pr_info("Enabling non-boot CPUs ...\n");
arch_enable_nonboot_cpus_begin();
for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) {
pr_info("CPU%d is up\n", cpu);
cpu_device = get_cpu_device(cpu);
if (!cpu_device)
pr_err("%s: failed to get cpu%d device\n",
__func__, cpu);
else
kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
continue;
}
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
}
arch_enable_nonboot_cpus_end();
cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
}
static int __init alloc_frozen_cpus(void)
{
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
return 0;
}
core_initcall(alloc_frozen_cpus);
/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
* or not, as reported by the notification, remains unchanged *throughout the
* duration* of the execution of the callbacks.
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
*
* This synchronization is implemented by mutually excluding regular CPU
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
* Hibernate notifications.
*/
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
unsigned long action, void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
cpu_hotplug_disable();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
cpu_hotplug_enable();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int __init cpu_hotplug_pm_sync_init(void)
{
/*
* cpu_hotplug_pm_callback has higher priority than x86
* bsp_pm_callback which depends on cpu_hotplug_pm_callback
* to disable cpu hotplug to avoid cpu hotplug race.
*/
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
int __boot_cpu_id;
#endif /* CONFIG_SMP */
/* Boot processor state steps */
static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_OFFLINE] = {
.name = "offline",
.startup.single = NULL,
.teardown.single = NULL,
},
#ifdef CONFIG_SMP
[CPUHP_CREATE_THREADS]= {
.name = "threads:prepare",
.startup.single = smpboot_create_threads,
.teardown.single = NULL,
.cant_stop = true,
},
[CPUHP_PERF_PREPARE] = {
.name = "perf:prepare",
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue:prepare",
.startup.single = workqueue_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_HRTIMERS_PREPARE] = {
.name = "hrtimers:prepare",
.startup.single = hrtimers_prepare_cpu,
.teardown.single = hrtimers_dead_cpu,
},
[CPUHP_SMPCFD_PREPARE] = {
.name = "smpcfd:prepare",
.startup.single = smpcfd_prepare_cpu,
.teardown.single = smpcfd_dead_cpu,
},
[CPUHP_RELAY_PREPARE] = {
.name = "relay:prepare",
.startup.single = relay_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_SLAB_PREPARE] = {
.name = "slab:prepare",
.startup.single = slab_prepare_cpu,
.teardown.single = slab_dead_cpu,
},
[CPUHP_RCUTREE_PREP] = {
.name = "RCU/tree:prepare",
.startup.single = rcutree_prepare_cpu,
.teardown.single = rcutree_dead_cpu,
},
/*
* On the tear-down path, timers_dead_cpu() must be invoked
* before blk_mq_queue_reinit_notify() from notify_dead(),
* otherwise a RCU stall occurs.
*/
[CPUHP_TIMERS_PREPARE] = {
.name = "timers:prepare",
.startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu,
},
/* Kicks the plugged cpu into life */
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = bringup_cpu,
.teardown.single = NULL,
.cant_stop = true,
},
/* Final state before CPU kills itself */
[CPUHP_AP_IDLE_DEAD] = {
.name = "idle:dead",
},
/*
* Last state before CPU enters the idle loop to die. Transient state
* for synchronization.
*/
[CPUHP_AP_OFFLINE] = {
.name = "ap:offline",
.cant_stop = true,
},
/* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup.single = sched_cpu_starting,
.teardown.single = sched_cpu_dying,
},
[CPUHP_AP_RCUTREE_DYING] = {
.name = "RCU/tree:dying",
.startup.single = NULL,
.teardown.single = rcutree_dying_cpu,
},
[CPUHP_AP_SMPCFD_DYING] = {
.name = "smpcfd:dying",
.startup.single = NULL,
.teardown.single = smpcfd_dying_cpu,
},
/* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
},
/*
* Handled on controll processor until the plugged processor manages
* this itself.
*/
[CPUHP_TEARDOWN_CPU] = {
.name = "cpu:teardown",
.startup.single = NULL,
.teardown.single = takedown_cpu,
.cant_stop = true,
},
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot/threads:online",
.startup.single = smpboot_unpark_threads,
.teardown.single = smpboot_park_threads,
},
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
.name = "irq/affinity:online",
.startup.single = irq_affinity_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_PERF_ONLINE] = {
.name = "perf:online",
.startup.single = perf_event_restart_events,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_AP_WATCHDOG_ONLINE] = {
.name = "lockup_detector:online",
.startup.single = lockup_detector_online_cpu,
.teardown.single = lockup_detector_offline_cpu,
},
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue:online",
.startup.single = workqueue_online_cpu,
.teardown.single = workqueue_offline_cpu,
},
[CPUHP_AP_RCUTREE_ONLINE] = {
.name = "RCU/tree:online",
.startup.single = rcutree_online_cpu,
.teardown.single = rcutree_offline_cpu,
},
#endif
/*
* The dynamically registered state space is here
*/
#ifdef CONFIG_SMP
/* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup.single = sched_cpu_activate,
.teardown.single = sched_cpu_deactivate,
},
#endif
/* CPU is fully up and running. */
[CPUHP_ONLINE] = {
.name = "online",
.startup.single = NULL,
.teardown.single = NULL,
},
};
/* Sanity check for callbacks */
static int cpuhp_cb_check(enum cpuhp_state state)
{
if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
return -EINVAL;
return 0;
}
/*
* Returns a free for dynamic slot assignment of the Online state. The states
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
* by having no name assigned.
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
enum cpuhp_state i, end;
struct cpuhp_step *step;
switch (state) {
case CPUHP_AP_ONLINE_DYN:
step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
end = CPUHP_AP_ONLINE_DYN_END;
break;
case CPUHP_BP_PREPARE_DYN:
step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
end = CPUHP_BP_PREPARE_DYN_END;
break;
default:
return -EINVAL;
}
for (i = state; i <= end; i++, step++) {
if (!step->name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
return -ENOSPC;
}
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
int ret = 0;
/*
* If name is NULL, then the state gets removed.
*
* CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
* the first allocation from these dynamic ranges, so the removal
* would trigger a new allocation and clear the wrong (already
* empty) state, leaving the callbacks of the to be cleared state
* dangling, which causes wreckage on the next hotplug operation.
*/
if (name && (state == CPUHP_AP_ONLINE_DYN ||
state == CPUHP_BP_PREPARE_DYN)) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
return ret;
state = ret;
}
sp = cpuhp_get_step(state);
if (name && sp->name)
return -EBUSY;
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
return ret;
}
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{
return cpuhp_get_step(state)->teardown.single;
}
/*
* Call the startup/teardown function for a step either on the AP or
* on the current CPU.
*/
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int ret;
/*
* If there's nothing to do, we done.
* Relies on the union for multi_instance.
*/
if ((bringup && !sp->startup.single) ||
(!bringup && !sp->teardown.single))
return 0;
/*
* The non AP bound callbacks can fail on bringup. On teardown
* e.g. module removal we crash for now.
*/
#ifdef CONFIG_SMP
if (cpuhp_is_ap_state(state))
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#endif
BUG_ON(ret && !bringup);
return ret;
}
/*
* Called from __cpuhp_setup_state on a recoverable failure.
*
* Note: The teardown callbacks for rollback are not allowed to fail!
*/
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
struct hlist_node *node)
{
int cpu;
/* Roll back the already executed steps on the other cpus */
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpu >= failedcpu)
break;
/* Did we invoke the startup call on that cpu ? */
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
}
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
struct hlist_node *node,
bool invoke)
{
struct cpuhp_step *sp;
int cpu;
int ret;
lockdep_assert_cpus_held();
sp = cpuhp_get_step(state);
if (sp->multi_instance == false)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
goto add_node;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, node);
if (ret) {
if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node);
goto unlock;
}
}
add_node:
ret = 0;
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex);
return ret;
}
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{
int ret;
cpus_read_lock();
ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
* __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
* @state: The state to setup
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
* @startup: startup callback function
* @teardown: teardown callback function
* @multi_instance: State is set up for multiple instances which get
* added afterwards.
*
* The caller needs to hold cpus read locked while calling this function.
* Returns:
* On success:
* Positive state number if @state is CPUHP_AP_ONLINE_DYN
* 0 for all other states
* On failure: proper (negative) error code
*/
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int cpu, ret = 0;
bool dynstate;
lockdep_assert_cpus_held();
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
dynstate = state == CPUHP_AP_ONLINE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
}
if (ret || !invoke || !startup)
goto out;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, NULL);
if (ret) {
if (teardown)
cpuhp_rollback_install(cpu, state, NULL);
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
goto out;
}
}
out:
mutex_unlock(&cpuhp_state_mutex);
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
*/
if (!ret && dynstate)
return state;
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
int __cpuhp_setup_state(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int ret;
cpus_read_lock();
ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
teardown, multi_instance);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state);
int __cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
if (!sp->multi_instance)
return -EINVAL;
cpus_read_lock();
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
remove:
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
cpus_read_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
/**
* __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
* @state: The state to remove
* @invoke: If true, the teardown function is invoked for cpus where
* cpu state >= @state
*
* The caller needs to hold cpus read locked while calling this function.
* The teardown callback is currently not allowed to fail. Think
* about module removal!
*/
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
lockdep_assert_cpus_held();
mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list),
"Error: Removing state %d which has instances left.\n",
state);
goto remove;
}
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, NULL);
}
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex);
}
EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
cpus_read_lock();
__cpuhp_remove_state_cpuslocked(state, invoke);
cpus_read_unlock();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t show_cpuhp_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->state);
}
static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
static ssize_t write_cpuhp_target(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int target, ret;
ret = kstrtoint(buf, 10, &target);
if (ret)
return ret;
#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
return -EINVAL;
#else
if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
return -EINVAL;
#endif
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(target);
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
goto out;
if (st->state < target)
ret = do_cpu_up(dev->id, target);
else
ret = do_cpu_down(dev->id, target);
out:
unlock_device_hotplug();
return ret ? ret : count;
}
static ssize_t show_cpuhp_target(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->target);
}
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
static ssize_t write_cpuhp_fail(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int fail, ret;
ret = kstrtoint(buf, 10, &fail);
if (ret)
return ret;
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
return -EINVAL;
/*
* Cannot fail STARTING/DYING callbacks.
*/
if (cpuhp_is_atomic_state(fail))
return -EINVAL;
/*
* Cannot fail anything that doesn't have callbacks.
*/
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(fail);
if (!sp->startup.single && !sp->teardown.single)
ret = -EINVAL;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
return ret;
st->fail = fail;
return count;
}
static ssize_t show_cpuhp_fail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->fail);
}
static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
static struct attribute *cpuhp_cpu_attrs[] = {
&dev_attr_state.attr,
&dev_attr_target.attr,
&dev_attr_fail.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_attr_group = {
.attrs = cpuhp_cpu_attrs,
.name = "hotplug",
NULL
};
static ssize_t show_cpuhp_states(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t cur, res = 0;
int i;
mutex_lock(&cpuhp_state_mutex);
for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
struct cpuhp_step *sp = cpuhp_get_step(i);
if (sp->name) {
cur = sprintf(buf, "%3d: %s\n", i, sp->name);
buf += cur;
res += cur;
}
}
mutex_unlock(&cpuhp_state_mutex);
return res;
}
static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
static struct attribute *cpuhp_cpu_root_attrs[] = {
&dev_attr_states.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_root_attr_group = {
.attrs = cpuhp_cpu_root_attrs,
.name = "hotplug",
NULL
};
#ifdef CONFIG_HOTPLUG_SMT
static const char *smt_states[] = {
[CPU_SMT_ENABLED] = "on",
[CPU_SMT_DISABLED] = "off",
[CPU_SMT_FORCE_DISABLED] = "forceoff",
[CPU_SMT_NOT_SUPPORTED] = "notsupported",
};
static ssize_t
show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
}
static void cpuhp_offline_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = true;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
}
static void cpuhp_online_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = false;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
cpu_maps_update_begin();
for_each_online_cpu(cpu) {
if (topology_is_primary_thread(cpu))
continue;
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (ret)
break;
/*
* As this needs to hold the cpu maps lock it's impossible
* to call device_offline() because that ends up calling
* cpu_down() which takes cpu maps lock. cpu maps lock
* needs to be held as this might race against in kernel
* abusers of the hotplug machinery (thermal management).
*
* So nothing would update device:offline state. That would
* leave the sysfs entry stale and prevent onlining after
* smt control has been changed to 'off' again. This is
* called under the sysfs hotplug lock, so it is properly
* serialized against the regular offline usage.
*/
cpuhp_offline_cpu_device(cpu);
}
if (!ret) {
cpu_smt_control = ctrlval;
arch_smt_update();
}
cpu_maps_update_done();
return ret;
}
int cpuhp_smt_enable(void)
{
int cpu, ret = 0;
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
arch_smt_update();
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
continue;
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
if (ret)
break;
/* See comment in cpuhp_smt_disable() */
cpuhp_online_cpu_device(cpu);
}
cpu_maps_update_done();
return ret;
}
static ssize_t
store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ctrlval, ret;
if (sysfs_streq(buf, "on"))
ctrlval = CPU_SMT_ENABLED;
else if (sysfs_streq(buf, "off"))
ctrlval = CPU_SMT_DISABLED;
else if (sysfs_streq(buf, "forceoff"))
ctrlval = CPU_SMT_FORCE_DISABLED;
else
return -EINVAL;
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
return -EPERM;
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return -ENODEV;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
if (ctrlval != cpu_smt_control) {
switch (ctrlval) {
case CPU_SMT_ENABLED:
ret = cpuhp_smt_enable();
break;
case CPU_SMT_DISABLED:
case CPU_SMT_FORCE_DISABLED:
ret = cpuhp_smt_disable(ctrlval);
break;
}
}
unlock_device_hotplug();
return ret ? ret : count;
}
static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
static ssize_t
show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
{
bool active = topology_max_smt_threads() > 1;
return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
}
static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
static struct attribute *cpuhp_smt_attrs[] = {
&dev_attr_control.attr,
&dev_attr_active.attr,
NULL
};
static const struct attribute_group cpuhp_smt_attr_group = {
.attrs = cpuhp_smt_attrs,
.name = "smt",
NULL
};
static int __init cpu_smt_state_init(void)
{
return sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_smt_attr_group);
}
#else
static inline int cpu_smt_state_init(void) { return 0; }
#endif
static int __init cpuhp_sysfs_init(void)
{
int cpu, ret;
ret = cpu_smt_state_init();
if (ret)
return ret;
ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_cpu_root_attr_group);
if (ret)
return ret;
for_each_possible_cpu(cpu) {
struct device *dev = get_cpu_device(cpu);
if (!dev)
continue;
ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
if (ret)
return ret;
}
return 0;
}
device_initcall(cpuhp_sysfs_init);
#endif
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
* represents all NR_CPUS bits binary values of 1<<nr.
*
* It is used by cpumask_of() to get a constant address to a CPU
* mask value that has a single bit set only.
*/
/* cpu_bit_bitmap[0] is empty - so we can back into it */
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
};
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
#ifdef CONFIG_INIT_ALL_POSSIBLE
struct cpumask __cpu_possible_mask __read_mostly
= {CPU_BITS_ALL};
#else
struct cpumask __cpu_possible_mask __read_mostly;
#endif
EXPORT_SYMBOL(__cpu_possible_mask);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
struct cpumask __cpu_isolated_mask __read_mostly;
EXPORT_SYMBOL(__cpu_isolated_mask);
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(&__cpu_present_mask, src);
}
void init_cpu_possible(const struct cpumask *src)
{
cpumask_copy(&__cpu_possible_mask, src);
}
void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(&__cpu_online_mask, src);
}
void init_cpu_isolated(const struct cpumask *src)
{
cpumask_copy(&__cpu_isolated_mask, src);
}
/*
* Activate the first processor.
*/
void __init boot_cpu_init(void)
{
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
#ifdef CONFIG_SMP
__boot_cpu_id = cpu;
#endif
}
/*
* Must be called _AFTER_ setting up the per_cpu areas
*/
void __init boot_cpu_hotplug_init(void)
{
#ifdef CONFIG_SMP
this_cpu_write(cpuhp_state.booted_once, true);
#endif
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
{
atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);
void idle_notifier_unregister(struct notifier_block *n)
{
atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
void idle_notifier_call_chain(unsigned long val)
{
atomic_notifier_call_chain(&idle_notifier, val, NULL);
}
EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
/*
* These are used for a global "mitigations=" cmdline option for toggling
* optional CPU mitigations.
*/
enum cpu_mitigations {
CPU_MITIGATIONS_OFF,
CPU_MITIGATIONS_AUTO,
CPU_MITIGATIONS_AUTO_NOSMT,
};
static enum cpu_mitigations cpu_mitigations __ro_after_init =
CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
if (!strcmp(arg, "off"))
cpu_mitigations = CPU_MITIGATIONS_OFF;
else if (!strcmp(arg, "auto"))
cpu_mitigations = CPU_MITIGATIONS_AUTO;
else if (!strcmp(arg, "auto,nosmt"))
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
else
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
arg);
return 0;
}
early_param("mitigations", mitigations_parse_cmdline);
/* mitigations=off */
bool cpu_mitigations_off(void)
{
return cpu_mitigations == CPU_MITIGATIONS_OFF;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_off);
/* mitigations=auto,nosmt */
bool cpu_mitigations_auto_nosmt(void)
{
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);