Files
kernel_xiaomi_sm8250/kernel/cpu.c
Michael Bestas ad51bd9fc5 Merge tag 'ASB-2023-02-05_4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona
https://source.android.com/docs/security/bulletin/2023-02-01
CVE-2022-39189
CVE-2022-39842
CVE-2022-41222
CVE-2023-20937
CVE-2023-20938
CVE-2022-0850

* tag 'ASB-2023-02-05_4.19-stable' of https://android.googlesource.com/kernel/common:
  Linux 4.19.272
  usb: host: xhci-plat: add wakeup entry at sysfs
  ipv6: ensure sane device mtu in tunnels
  exit: Use READ_ONCE() for all oops/warn limit reads
  docs: Fix path paste-o for /sys/kernel/warn_count
  panic: Expose "warn_count" to sysfs
  panic: Introduce warn_limit
  panic: Consolidate open-coded panic_on_warn checks
  exit: Allow oops_limit to be disabled
  exit: Expose "oops_count" to sysfs
  exit: Put an upper limit on how often we can oops
  ia64: make IA64_MCA_RECOVERY bool instead of tristate
  h8300: Fix build errors from do_exit() to make_task_dead() transition
  hexagon: Fix function name in die()
  objtool: Add a missing comma to avoid string concatenation
  exit: Add and use make_task_dead.
  panic: unset panic_on_warn inside panic()
  sysctl: add a new register_sysctl_init() interface
  dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init
  ARM: dts: imx: Fix pca9547 i2c-mux node name
  x86/entry/64: Add instruction suffix to SYSRET
  x86/asm: Fix an assembler warning with current binutils
  drm/i915/display: fix compiler warning about array overrun
  x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL
  Revert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode"
  net/tg3: resolve deadlock in tg3_reset_task() during EEH
  net: ravb: Fix possible hang if RIS2_QFF1 happen
  sctp: fail if no bound addresses can be used for a given scope
  netrom: Fix use-after-free of a listening socket.
  netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE
  ipv4: prevent potential spectre v1 gadget in ip_metrics_convert()
  netlink: annotate data races around sk_state
  netlink: annotate data races around dst_portid and dst_group
  netlink: annotate data races around nlk->portid
  netlink: remove hash::nelems check in netlink_insert
  netfilter: nft_set_rbtree: skip elements in transaction from garbage collection
  net: fix UaF in netns ops registration error path
  EDAC/device: Respect any driver-supplied workqueue polling value
  ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment
  cifs: Fix oops due to uncleared server->smbd_conn in reconnect
  smbd: Make upper layer decide when to destroy the transport
  trace_events_hist: add check for return value of 'create_hist_field'
  tracing: Make sure trace_printk() can output as soon as it can be used
  module: Don't wait for GOING modules
  scsi: hpsa: Fix allocation size for scsi_host_alloc()
  Bluetooth: hci_sync: cancel cmd_timer if hci_open failed
  fs: reiserfs: remove useless new_opts in reiserfs_remount
  perf env: Do not return pointers to local variables
  block: fix and cleanup bio_check_ro
  netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state
  w1: fix WARNING after calling w1_process()
  w1: fix deadloop in __w1_remove_master_device()
  tcp: avoid the lookup process failing to get sk in ehash table
  dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node()
  dmaengine: xilinx_dma: Fix devm_platform_ioremap_resource error handling
  dmaengine: xilinx_dma: program hardware supported buffer length
  dmaengine: xilinx_dma: commonize DMA copy size calculation
  HID: betop: check shape of output reports
  net: macb: fix PTP TX timestamp failure due to packet padding
  dmaengine: Fix double increment of client_count in dma_chan_get()
  net: mlx5: eliminate anonymous module_init & module_exit
  usb: gadget: f_fs: Ensure ep0req is dequeued before free_request
  usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait
  HID: check empty report_list in hid_validate_values()
  net: mdio: validate parameter addr in mdiobus_get_phy()
  net: usb: sr9700: Handle negative len
  wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid
  net: nfc: Fix use-after-free in local_cleanup()
  phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on()
  bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation
  amd-xgbe: Delay AN timeout during KR training
  amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent
  affs: initialize fsdata in affs_truncate()
  IB/hfi1: Fix expected receive setup error exit issues
  IB/hfi1: Reserve user expected TIDs
  IB/hfi1: Reject a zero-length user expected buffer
  tomoyo: fix broken dependency on *.conf.default
  EDAC/highbank: Fix memory leak in highbank_mc_probe()
  HID: intel_ish-hid: Add check for ishtp_dma_tx_map
  ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts'
  UPSTREAM: tcp: fix tcp_rmem documentation
  UPSTREAM: nvmem: core: skip child nodes not matching binding
  BACKPORT: nvmem: core: Fix a resource leak on error in nvmem_add_cells_from_of()
  UPSTREAM: sched/eas: Don't update misfit status if the task is pinned
  BACKPORT: arm64: link with -z norelro for LLD or aarch64-elf
  UPSTREAM: driver: core: Fix list corruption after device_del()
  UPSTREAM: coresight: tmc-etr: Fix barrier packet insertion for perf buffer
  UPSTREAM: f2fs: fix double free of unicode map
  BACKPORT: net: xfrm: fix memory leak in xfrm_user_policy()
  UPSTREAM: xfrm/compat: Don't allocate memory with __GFP_ZERO
  UPSTREAM: xfrm/compat: memset(0) 64-bit padding at right place
  UPSTREAM: xfrm/compat: Translate by copying XFRMA_UNSPEC attribute
  UPSTREAM: scsi: ufs: Fix missing brace warning for old compilers
  UPSTREAM: arm64: vdso32: make vdso32 install conditional
  UPSTREAM: loop: unset GENHD_FL_NO_PART_SCAN on LOOP_CONFIGURE
  BACKPORT: drm/virtio: fix missing dma_fence_put() in virtio_gpu_execbuffer_ioctl()
  BACKPORT: sched/uclamp: Protect uclamp fast path code with static key
  BACKPORT: sched/uclamp: Fix initialization of struct uclamp_rq
  UPSTREAM: coresight: etmv4: Fix CPU power management setup in probe() function
  UPSTREAM: arm64: vdso: Add --eh-frame-hdr to ldflags
  BACKPORT: arm64: vdso: Add '-Bsymbolic' to ldflags
  UPSTREAM: drm/virtio: fix a wait_event condition
  BACKPORT: sched/topology: Don't try to build empty sched domains
  BACKPORT: binder: prevent UAF read in print_binder_transaction_log_entry()
  BACKPORT: copy_process(): don't use ksys_close() on cleanups
  BACKPORT: arm64: vdso: Remove unnecessary asm-offsets.c definitions
  UPSTREAM: locking/lockdep, cpu/hotplug: Annotate AP thread
  Revert "xhci: Add a flag to disable USB3 lpm on a xhci root port level."
  BACKPORT: mac80211_hwsim: add concurrent channels scanning support over virtio
  BACKPORT: mac80211_hwsim: add frame transmission support over virtio This allows communication with external entities.
  BACKPORT: driver core: Skip unnecessary work when device doesn't have sync_state()
  Linux 4.19.271
  x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN
  Revert "ext4: generalize extents status tree search functions"
  Revert "ext4: add new pending reservation mechanism"
  Revert "ext4: fix reserved cluster accounting at delayed write time"
  Revert "ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline"
  gsmi: fix null-deref in gsmi_get_variable
  serial: atmel: fix incorrect baudrate setup
  serial: pch_uart: Pass correct sg to dma_unmap_sg()
  usb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210
  usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate()
  usb: gadget: g_webcam: Send color matching descriptor per frame
  usb: typec: altmodes/displayport: Fix pin assignment calculation
  usb: typec: altmodes/displayport: Add pin assignment helper
  usb: host: ehci-fsl: Fix module alias
  USB: serial: cp210x: add SCALANCE LPE-9000 device id
  cifs: do not include page data when checking signature
  mmc: sunxi-mmc: Fix clock refcount imbalance during unbind
  comedi: adv_pci1760: Fix PWM instruction handling
  usb: core: hub: disable autosuspend for TI TUSB8041
  USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100
  USB: serial: option: add Quectel EM05CN modem
  USB: serial: option: add Quectel EM05CN (SG) modem
  USB: serial: option: add Quectel EC200U modem
  USB: serial: option: add Quectel EM05-G (RS) modem
  USB: serial: option: add Quectel EM05-G (CS) modem
  USB: serial: option: add Quectel EM05-G (GR) modem
  prlimit: do_prlimit needs to have a speculation check
  xhci: Add a flag to disable USB3 lpm on a xhci root port level.
  xhci: Fix null pointer dereference when host dies
  usb: xhci: Check endpoint is valid before dereferencing it
  xhci-pci: set the dma max_seg_size
  nilfs2: fix general protection fault in nilfs_btree_insert()
  Add exception protection processing for vd in axi_chan_handle_err function
  f2fs: let's avoid panic if extent_tree is not created
  RDMA/srp: Move large values to a new enum for gcc13
  net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats
  pNFS/filelayout: Fix coalescing test for single DS
  ANDROID: usb: f_accessory: Check buffer size when initialised via composite
  Linux 4.19.270
  serial: tegra: Change lower tolerance baud rate limit for tegra20 and tegra30
  serial: tegra: Only print FIFO error message when an error occurs
  tty: serial: tegra: Handle RX transfer in PIO mode if DMA wasn't started
  Revert "usb: ulpi: defer ulpi_register on ulpi_read_id timeout"
  efi: fix NULL-deref in init error path
  arm64: cmpxchg_double*: hazard against entire exchange variable
  drm/virtio: Fix GEM handle creation UAF
  x86/resctrl: Fix task CLOSID/RMID update race
  x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI
  iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()
  iommu/mediatek-v1: Add error handle for mtk_iommu_probe
  net/mlx5: Fix ptp max frequency adjustment range
  net/mlx5: Rename ptp clock info
  nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()
  hvc/xen: lock console list traversal
  regulator: da9211: Use irq handler when ready
  EDAC/device: Fix period calculation in edac_device_reset_delay_period()
  x86/boot: Avoid using Intel mnemonics in AT&T syntax asm
  netfilter: ipset: Fix overflow before widen in the bitmap_ip_create() function.
  ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline
  ext4: fix reserved cluster accounting at delayed write time
  ext4: add new pending reservation mechanism
  ext4: generalize extents status tree search functions
  ext4: fix uninititialized value in 'ext4_evict_inode'
  ext4: fix use-after-free in ext4_orphan_cleanup
  ext4: lost matching-pair of trace in ext4_truncate
  ext4: fix bug_on in __es_tree_search caused by bad quota inode
  quota: Factor out setup of quota inode
  usb: ulpi: defer ulpi_register on ulpi_read_id timeout
  kest.pl: Fix grub2 menu handling for rebooting
  ktest.pl: Fix incorrect reboot for grub2bls
  ktest: introduce grub2bls REBOOT_TYPE option
  ktest: cleanup get_grub_index
  ktest: introduce _get_grub_index
  ktest: Add support for meta characters in GRUB_MENU
  ALSA: hda/hdmi: fix failures at PCM open on Intel ICL and later
  wifi: wilc1000: sdio: fix module autoloading
  ipv6: raw: Deduct extension header length in rawv6_push_pending_frames
  platform/x86: sony-laptop: Don't turn off 0x153 keyboard backlight during probe
  cifs: Fix uninitialized memory read for smb311 posix symlink create
  ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF
  net/ulp: prevent ULP without clone op from entering the LISTEN status
  s390/percpu: add READ_ONCE() to arch_this_cpu_to_op_simple()
  perf auxtrace: Fix address filter duplicate symbol selection
  docs: Fix the docs build with Sphinx 6.0
  net: sched: disallow noqueue for qdisc classes
  driver core: Fix bus_type.match() error handling in __driver_attach()
  parisc: Align parisc MADV_XXX constants with all other architectures
  mbcache: Avoid nesting of cache->c_list_lock under bit locks
  hfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling
  hfs/hfsplus: use WARN_ON for sanity check
  ext4: don't allow journal inode to have encrypt flag
  riscv: uaccess: fix type of 0 variable on error in get_user()
  nfsd: fix handling of readdir in v4root vs. mount upcall timeout
  x86/bugs: Flush IBP in ib_prctl_set()
  ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet
  udf: Fix extension of the last extent in the file
  caif: fix memory leak in cfctrl_linkup_request()
  usb: rndis_host: Secure rndis_query check against int overflow
  net: sched: atm: dont intepret cls results when asked to drop
  RDMA/mlx5: Fix validation of max_rd_atomic caps for DC
  net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe
  net: amd-xgbe: add missed tasklet_kill
  nfc: Fix potential resource leaks
  qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure
  bpf: pull before calling skb_postpull_rcsum()
  SUNRPC: ensure the matching upcall is in-flight upon downcall
  ext4: fix deadlock due to mbcache entry corruption
  mbcache: automatically delete entries from cache on freeing
  ext4: fix race when reusing xattr blocks
  ext4: unindent codeblock in ext4_xattr_block_set()
  ext4: remove EA inode entry from mbcache on inode eviction
  mbcache: add functions to delete entry if unused
  mbcache: don't reclaim used entries
  ext4: use kmemdup() to replace kmalloc + memcpy
  ext4: correct inconsistent error msg in nojournal mode
  ext4: goto right label 'failed_mount3a'
  driver core: Set deferred_probe_timeout to a longer default if CONFIG_MODULES is set
  ravb: Fix "failed to switch device to config mode" message during unbind
  perf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data
  perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor
  dm thin: resume even if in FAIL mode
  media: s5p-mfc: Fix in register read and write for H264
  media: s5p-mfc: Clear workbit to handle error condition
  media: s5p-mfc: Fix to handle reference queue during finishing
  btrfs: replace strncpy() with strscpy()
  btrfs: send: avoid unnecessary backref lookups when finding clone source
  ext4: allocate extended attribute value in vmalloc area
  ext4: avoid unaccounted block allocation when expanding inode
  ext4: initialize quota before expanding inode in setproject ioctl
  ext4: fix inode leak in ext4_xattr_inode_create() on an error path
  ext4: avoid BUG_ON when creating xattrs
  ext4: fix error code return to user-space in ext4_get_branch()
  ext4: fix corruption when online resizing a 1K bigalloc fs
  ext4: init quota for 'old.inode' in 'ext4_rename'
  ext4: fix bug_on in __es_tree_search caused by bad boot loader inode
  ext4: add helper to check quota inums
  ext4: fix undefined behavior in bit shift for ext4_check_flag_values
  ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
  drm/vmwgfx: Validate the box size for the snooped cursor
  drm/connector: send hotplug uevent on connector cleanup
  device_cgroup: Roll back to original exceptions after copy failure
  parisc: led: Fix potential null-ptr-deref in start_task()
  iommu/amd: Fix ivrs_acpihid cmdline parsing code
  crypto: n2 - add missing hash statesize
  PCI/sysfs: Fix double free in error path
  PCI: Fix pci_device_is_present() for VFs by checking PF
  ipmi: fix use after free in _ipmi_destroy_user()
  ima: Fix a potential NULL pointer access in ima_restore_measurement_list
  ipmi: fix long wait in unload when IPMI disconnect
  md/bitmap: Fix bitmap chunk size overflow issues
  cifs: fix confusing debug message
  media: dvb-core: Fix UAF due to refcount races at releasing
  media: dvb-core: Fix double free in dvb_register_device()
  ARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod
  tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line
  x86/microcode/intel: Do not retry microcode reloading on the APs
  dm cache: set needs_check flag after aborting metadata
  dm cache: Fix UAF in destroy()
  dm thin: Fix UAF in run_timer_softirq()
  dm thin: Use last transaction's pmd->root when commit failed
  dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort
  binfmt: Fix error return code in load_elf_fdpic_binary()
  binfmt: Move install_exec_creds after setup_new_exec to match binfmt_elf
  selftests: Use optional USERCFLAGS and USERLDFLAGS
  ARM: ux500: do not directly dereference __iomem
  ktest.pl minconfig: Unset configs instead of just removing them
  soc: qcom: Select REMAP_MMIO for LLCC driver
  media: stv0288: use explicitly signed char
  SUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails
  tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak
  tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak
  mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING
  md: fix a crash in mempool_free
  pnode: terminate at peers of source
  ALSA: line6: fix stack overflow in line6_midi_transmit
  ALSA: line6: correct midi status byte when receiving data from podxt
  ovl: Use ovl mounter's fsuid and fsgid in ovl_link()
  hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount
  HID: plantronics: Additional PIDs for double volume key presses quirk
  powerpc/rtas: avoid scheduling in rtas_os_term()
  powerpc/rtas: avoid device tree lookups in rtas_os_term()
  ata: ahci: Fix PCS quirk application for suspend
  media: dvbdev: fix refcnt bug
  media: dvbdev: fix build warning due to comments
  gcov: add support for checksum field
  iio: adc: ad_sigma_delta: do not use internal iio_dev lock
  reiserfs: Add missing calls to reiserfs_security_free()
  HID: wacom: Ensure bootloader PID is usable in hidraw mode
  usb: dwc3: core: defer probe on ulpi_read_id timeout
  pstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES
  pstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion
  ASoC: rt5670: Remove unbalanced pm_runtime_put()
  ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume()
  ASoC: wm8994: Fix potential deadlock
  ASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume()
  ASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe()
  orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string()
  drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid()
  drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid()
  clk: st: Fix memory leak in st_of_quadfs_setup()
  media: si470x: Fix use-after-free in si470x_int_in_callback()
  mmc: f-sdh30: Add quirks for broken timeout clock capability
  regulator: core: fix use_count leakage when handling boot-on
  blk-mq: fix possible memleak when register 'hctx' failed
  media: dvb-usb: fix memory leak in dvb_usb_adapter_init()
  media: dvbdev: adopts refcnt to avoid UAF
  media: dvb-frontends: fix leak of memory fw
  ppp: associate skb with a device at tx
  mrp: introduce active flags to prevent UAF when applicant uninit
  md/raid1: stop mdx_raid1 thread when raid1 array run failed
  drivers/md/md-bitmap: check the return value of md_bitmap_get_counter()
  drm/sti: Use drm_mode_copy()
  s390/lcs: Fix return type of lcs_start_xmit()
  s390/netiucv: Fix return type of netiucv_tx()
  s390/ctcm: Fix return type of ctc{mp,}m_tx()
  drm/amdgpu: Fix type of second parameter in trans_msg() callback
  igb: Do not free q_vector unless new one was allocated
  wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request()
  hamradio: baycom_epp: Fix return type of baycom_send_packet()
  net: ethernet: ti: Fix return type of netcp_ndo_start_xmit()
  bpf: make sure skb->len != 0 when redirecting to a tunneling device
  ipmi: fix memleak when unload ipmi driver
  ASoC: codecs: rt298: Add quirk for KBL-R RVP platform
  wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out
  wifi: ath9k: verify the expected usb_endpoints are present
  hfs: fix OOB Read in __hfs_brec_find
  acct: fix potential integer overflow in encode_comp_t()
  nilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset()
  ACPICA: Fix error code path in acpi_ds_call_control_method()
  fs: jfs: fix shift-out-of-bounds in dbDiscardAG
  udf: Avoid double brelse() in udf_rename()
  fs: jfs: fix shift-out-of-bounds in dbAllocAG
  binfmt_misc: fix shift-out-of-bounds in check_special_flags
  net: stream: purge sk_error_queue in sk_stream_kill_queues()
  myri10ge: Fix an error handling path in myri10ge_probe()
  rxrpc: Fix missing unlock in rxrpc_do_sendmsg()
  net_sched: reject TCF_EM_SIMPLE case for complex ematch module
  skbuff: Account for tail adjustment during pull operations
  openvswitch: Fix flow lookup to use unmasked key
  rtc: mxc_v2: Add missing clk_disable_unprepare()
  r6040: Fix kmemleak in probe and remove
  nfc: pn533: Clear nfc_target before being used
  mISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
  mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
  mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
  nfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure
  rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe()
  selftests/powerpc: Fix resource leaks
  powerpc/hv-gpci: Fix hv_gpci event list
  powerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe()
  powerpc/perf: callchain validate kernel stack pointer bounds
  powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data()
  cxl: Fix refcount leak in cxl_calc_capp_routing
  powerpc/52xx: Fix a resource leak in an error handling path
  macintosh/macio-adb: check the return value of ioremap()
  macintosh: fix possible memory leak in macio_add_one_device()
  iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe()
  iommu/amd: Fix pci device refcount leak in ppr_notifier()
  rtc: snvs: Allow a time difference on clock register read
  include/uapi/linux/swab: Fix potentially missing __always_inline
  HSI: omap_ssi_core: Fix error handling in ssi_init()
  perf symbol: correction while adjusting symbol
  power: supply: fix residue sysfs file in error handle route of __power_supply_register()
  HSI: omap_ssi_core: fix possible memory leak in ssi_probe()
  HSI: omap_ssi_core: fix unbalanced pm_runtime_disable()
  fbdev: uvesafb: Fixes an error handling path in uvesafb_probe()
  fbdev: vermilion: decrease reference count in error path
  fbdev: via: Fix error in via_core_init()
  fbdev: pm2fb: fix missing pci_disable_device()
  fbdev: ssd1307fb: Drop optional dependency
  samples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe()
  tracing/hist: Fix issue of losting command info in error_log
  usb: storage: Add check for kcalloc
  i2c: ismt: Fix an out-of-bounds bug in ismt_access()
  vme: Fix error not catched in fake_init()
  staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor()
  staging: rtl8192u: Fix use after free in ieee80211_rx()
  i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe
  chardev: fix error handling in cdev_device_add()
  mcb: mcb-parse: fix error handing in chameleon_parse_gdd()
  drivers: mcb: fix resource leak in mcb_probe()
  usb: gadget: f_hid: fix refcount leak on error path
  usb: gadget: f_hid: fix f_hidg lifetime vs cdev
  usb: gadget: f_hid: optional SETUP/SET_REPORT mode
  cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter()
  cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter()
  misc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os
  misc: tifm: fix possible memory leak in tifm_7xx1_switch_media()
  test_firmware: fix memory leak in test_firmware_init()
  serial: sunsab: Fix error handling in sunsab_init()
  serial: altera_uart: fix locking in polling mode
  tty: serial: altera_uart_{r,t}x_chars() need only uart_port
  tty: serial: clean up stop-tx part in altera_uart_tx_chars()
  serial: pch: Fix PCI device refcount leak in pch_request_dma()
  serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle.
  serial: amba-pl011: avoid SBSA UART accessing DMACR register
  usb: typec: Check for ops->exit instead of ops->enter in altmode_exit
  staging: vme_user: Fix possible UAF in tsi148_dma_list_add
  usb: fotg210-udc: Fix ages old endianness issues
  uio: uio_dmem_genirq: Fix deadlock between irq config and handling
  uio: uio_dmem_genirq: Fix missing unlock in irq configuration
  vfio: platform: Do not pass return buffer to ACPI _RST method
  class: fix possible memory leak in __class_register()
  serial: tegra: Read DMA status before terminating
  tty: serial: tegra: Activate RX DMA transfer by request
  serial: tegra: Add PIO mode support
  serial: tegra: report clk rate errors
  serial: tegra: add support to adjust baud rate
  serial: tegra: add support to use 8 bytes trigger
  serial: tegra: set maximum num of uart ports to 8
  serial: tegra: check for FIFO mode enabled status
  serial: tegra: avoid reg access when clk disabled
  drivers: dio: fix possible memory leak in dio_init()
  IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces
  hwrng: geode - Fix PCI device refcount leak
  hwrng: amd - Fix PCI device refcount leak
  crypto: img-hash - Fix variable dereferenced before check 'hdev->req'
  orangefs: Fix sysfs not cleanup when dev init failed
  RDMA/hfi1: Fix error return code in parse_platform_config()
  scsi: snic: Fix possible UAF in snic_tgt_create()
  scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails
  scsi: ipr: Fix WARNING in ipr_init()
  scsi: fcoe: Fix possible name leak when device_register() fails
  scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device()
  scsi: hpsa: Fix error handling in hpsa_add_sas_host()
  crypto: tcrypt - Fix multibuffer skcipher speed test mem leak
  scsi: hpsa: Fix possible memory leak in hpsa_init_one()
  scsi: hpsa: use local workqueues instead of system workqueues
  RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed
  crypto: ccree - Make cc_debugfs_global_fini() available for module init function
  RDMA/hfi: Decrease PCI device reference count in error path
  PCI: Check for alloc failure in pci_request_irq()
  scsi: scsi_debug: Fix a warning in resp_write_scat()
  RDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port
  f2fs: fix normal discard process
  apparmor: Fix abi check to include v8 abi
  apparmor: fix lockdep warning when removing a namespace
  apparmor: fix a memleak in multi_transaction_new()
  stmmac: fix potential division by 0
  Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave()
  Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave()
  Bluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave()
  Bluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave()
  Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave()
  Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave()
  ntb_netdev: Use dev_kfree_skb_any() in interrupt context
  net: lan9303: Fix read error execution path
  net: amd-xgbe: Check only the minimum speed for active/passive cables
  net: amd-xgbe: Fix logic around active and passive cables
  net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave()
  hamradio: don't call dev_kfree_skb() under spin_lock_irqsave()
  net: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave()
  net: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave()
  net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave()
  net: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave()
  net/tunnel: wait until all sk_user_data reader finish before releasing the sock
  net: farsync: Fix kmemleak when rmmods farsync
  ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave()
  drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init()
  net: defxx: Fix missing err handling in dfx_init()
  net: vmw_vsock: vmci: Check memcpy_from_msg()
  clk: socfpga: use clk_hw_register for a5/c5
  clk: socfpga: clk-pll: Remove unused variable 'rc'
  blktrace: Fix output non-blktrace event when blk_classic option enabled
  wifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware()
  rtl8xxxu: add enumeration for channel bandwidth
  wifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h
  clk: samsung: Fix memory leak in _samsung_clk_register_pll()
  media: coda: Add check for kmalloc
  media: coda: Add check for dcoda_iram_alloc
  media: c8sectpfe: Add of_node_put() when breaking out of loop
  mmc: mmci: fix return value check of mmc_add_host()
  mmc: wbsd: fix return value check of mmc_add_host()
  mmc: via-sdmmc: fix return value check of mmc_add_host()
  mmc: meson-gx: fix return value check of mmc_add_host()
  mmc: atmel-mci: fix return value check of mmc_add_host()
  mmc: wmt-sdmmc: fix return value check of mmc_add_host()
  mmc: vub300: fix return value check of mmc_add_host()
  mmc: toshsd: fix return value check of mmc_add_host()
  mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host()
  mmc: mxcmmc: fix return value check of mmc_add_host()
  mmc: moxart: fix return value check of mmc_add_host()
  NFSv4.x: Fail client initialisation if state manager thread can't run
  SUNRPC: Fix missing release socket in rpc_sockname()
  ALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt
  media: saa7164: fix missing pci_disable_device()
  regulator: core: fix module refcount leak in set_supply()
  wifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails
  bonding: uninitialized variable in bond_miimon_inspect()
  ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe
  drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios()
  drm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios()
  ALSA: asihpi: fix missing pci_disable_device()
  NFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn
  NFSv4.2: Fix a memory stomp in decode_attr_security_label
  drm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe()
  media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC
  media: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer()
  media: dvb-core: Fix ignored return value in dvb_register_frontend()
  pinctrl: pinconf-generic: add missing of_node_put()
  media: imon: fix a race condition in send_packet()
  drbd: remove call to memset before free device/resource/connection
  mtd: maps: pxa2xx-flash: fix memory leak in probe
  bonding: Export skip slave logic to function
  clk: rockchip: Fix memory leak in rockchip_clk_register_pll()
  ALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT
  HID: hid-sensor-custom: set fixed size for custom attributes
  media: platform: exynos4-is: Fix error handling in fimc_md_init()
  media: solo6x10: fix possible memory leak in solo_sysfs_init()
  Input: elants_i2c - properly handle the reset GPIO when power is off
  mtd: lpddr2_nvm: Fix possible null-ptr-deref
  wifi: ath10k: Fix return value in ath10k_pci_init()
  ima: Fix misuse of dereference of pointer in template_desc_init_fields()
  regulator: core: fix unbalanced of node refcount in regulator_dev_lookup()
  ASoC: pxa: fix null-pointer dereference in filter()
  drm/radeon: Add the missed acpi_put_table() to fix memory leak
  net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write()
  media: camss: Clean up received buffers on failed start of streaming
  wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port
  mtd: Fix device name leak when register device failed in add_mtd_device()
  media: vivid: fix compose size exceed boundary
  spi: Update reference to struct spi_controller
  can: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming
  can: kvaser_usb: Add struct kvaser_usb_busparams
  can: kvaser_usb_leaf: Fix bogus restart events
  can: kvaser_usb_leaf: Fix wrong CAN state after stopping
  can: kvaser_usb_leaf: Fix improved state not being reported
  can: kvaser_usb_leaf: Set Warning state even without bus errors
  can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT
  can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event
  can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device
  can: kvaser_usb: do not increase tx statistics when sending error message frames
  media: i2c: ad5820: Fix error path
  pata_ipx4xx_cf: Fix unsigned comparison with less than zero
  wifi: rtl8xxxu: Fix reading the vendor of combo chips
  wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb()
  wifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs()
  rapidio: devices: fix missing put_device in mport_cdev_open
  hfs: Fix OOB Write in hfs_asc2mac
  relay: fix type mismatch when allocating memory in relay_create_buf()
  eventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD
  rapidio: fix possible UAF when kfifo_alloc() fails
  fs: sysv: Fix sysv_nblocks() returns wrong value
  MIPS: BCM63xx: Add check for NULL for clk in clk_enable
  platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]()
  PM: runtime: Do not call __rpm_callback() from rpm_idle()
  PM: runtime: Improve path in rpm_idle() when no callback
  xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource()
  x86/xen: Fix memory leak in xen_init_lock_cpu()
  x86/xen: Fix memory leak in xen_smp_intr_init{_pv}()
  xen/events: only register debug interrupt for 2-level events
  uprobes/x86: Allow to probe a NOP instruction with 0x66 prefix
  ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage()
  clocksource/drivers/sh_cmt: Make sure channel clock supply is enabled
  rapidio: rio: fix possible name leak in rio_register_mport()
  rapidio: fix possible name leaks when rio_add_device() fails
  debugfs: fix error when writing negative value to atomic_t debugfs file
  lib/notifier-error-inject: fix error when writing -errno to debugfs file
  libfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value
  cpufreq: amd_freq_sensitivity: Add missing pci_dev_put()
  irqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe()
  perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox()
  PNP: fix name memory leak in pnp_alloc_dev()
  MIPS: vpe-cmp: fix possible memory leak while module exiting
  MIPS: vpe-mt: fix possible memory leak while module exiting
  ocfs2: fix memory leak in ocfs2_stack_glue_init()
  proc: fixup uptime selftest
  timerqueue: Use rb_entry_safe() in timerqueue_getnext()
  perf: Fix possible memleak in pmu_dev_alloc()
  selftests/ftrace: event_triggers: wait longer for test_event_enable
  fs: don't audit the capability check in simple_xattr_list()
  alpha: fix syscall entry in !AUDUT_SYSCALL case
  cpuidle: dt: Return the correct numbers of parsed idle states
  tpm/tpm_crb: Fix error message in __crb_relinquish_locality()
  pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP
  ARM: mmp: fix timer_read delay
  pstore/ram: Fix error return code in ramoops_probe()
  ARM: dts: turris-omnia: Add switch port 6 node
  ARM: dts: turris-omnia: Add ethernet aliases
  ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port
  ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port
  ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port
  ARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port
  ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port
  ARM: dts: dove: Fix assigned-addresses for every PCIe Root Port
  arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name
  arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names
  arm64: dts: mt2712e: Fix unit address for pinctrl node
  arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators
  perf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init()
  soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe
  arm: dts: spear600: Fix clcd interrupt
  drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static
  ARM: dts: qcom: apq8064: fix coresight compatible
  usb: musb: remove extra check in musb_gadget_vbus_draw
  net: loopback: use NET_NAME_PREDICTABLE for name_assign_type
  Bluetooth: L2CAP: Fix u8 overflow
  igb: Initialize mailbox message for VF reset
  USB: serial: f81534: fix division by zero on line-speed change
  USB: serial: cp210x: add Kamstrup RF sniffer PIDs
  USB: serial: option: add Quectel EM05-G modem
  usb: gadget: uvc: Prevent buffer overflow in setup handler
  udf: Fix extending file within last block
  udf: Do not bother looking for prealloc extents if i_lenExtents matches i_size
  udf: Fix preallocation discarding at indirect extent boundary
  udf: Discard preallocation before extending file with a hole
  perf script python: Remove explicit shebang from tests/attr.c
  ASoC: ops: Correct bounds check for second channel on SX controls
  can: mcba_usb: Fix termination command argument
  can: sja1000: fix size of OCR_MODE_MASK define
  pinctrl: meditatek: Startup with the IRQs disabled
  ASoC: ops: Check bounds for second channel in snd_soc_put_volsw_sx()
  nfp: fix use-after-free in area_cache_get()
  block: unhash blkdev part inode when the part is deleted
  mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths
  mm/khugepaged: fix GUP-fast interaction by sending IPI
  ANDROID: Add more hvc devices for virtio-console.

 Conflicts:
	drivers/base/core.c
	drivers/edac/edac_device.c
	drivers/hwtracing/coresight/coresight-etm4x.c
	drivers/net/wireless/mac80211_hwsim.c
	drivers/scsi/ufs/ufshcd-crypto.c
	drivers/usb/gadget/function/f_fs.c
	drivers/usb/gadget/function/f_hid.c

Change-Id: Ied998db07e927ccb3376a78f044df36088d9e3b8
2023-02-08 17:19:24 +02:00

2554 lines
62 KiB
C

/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/sched/mm.h>
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <uapi/linux/sched/types.h>
#include <linux/cpuset.h>
#include <linux/random.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
#include <linux/sched/clock.h>
#include "smpboot.h"
/**
* cpuhp_cpu_state - Per cpu hotplug state storage
* @state: The current cpu state
* @target: The target state
* @thread: Pointer to the hotplug thread
* @should_run: Thread should execute
* @rollback: Perform a rollback
* @single: Single callback invocation
* @bringup: Single callback bringup or teardown selector
* @cb_state: The state for a single callback (install/uninstall)
* @result: Result of the operation
* @done_up: Signal completion to the issuer of the task for cpu-up
* @done_down: Signal completion to the issuer of the task for cpu-down
*/
struct cpuhp_cpu_state {
enum cpuhp_state state;
enum cpuhp_state target;
enum cpuhp_state fail;
#ifdef CONFIG_SMP
struct task_struct *thread;
bool should_run;
bool rollback;
bool single;
bool bringup;
bool booted_once;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
int result;
struct completion done_up;
struct completion done_down;
#endif
};
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
static struct lockdep_map cpuhp_state_down_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
static inline void cpuhp_lock_acquire(bool bringup)
{
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
static inline void cpuhp_lock_release(bool bringup)
{
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
#else
static inline void cpuhp_lock_acquire(bool bringup) { }
static inline void cpuhp_lock_release(bool bringup) { }
#endif
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
const char *name;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} startup;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} teardown;
struct hlist_head list;
bool cant_stop;
bool multi_instance;
};
static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_hp_states[];
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
return cpuhp_hp_states + state;
}
/**
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
* @cpu: The cpu for which the callback should be invoked
* @state: The state to do callbacks for
* @bringup: True if the bringup callback should be invoked
* @node: For multi-instance, do a single entry callback for install/remove
* @lastp: For multi-instance rollback, remember how far we got
*
* Called from cpu hotplug and from the state register machinery.
*/
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
bool bringup, struct hlist_node *node,
struct hlist_node **lastp)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct cpuhp_step *step = cpuhp_get_step(state);
int (*cbm)(unsigned int cpu, struct hlist_node *node);
int (*cb)(unsigned int cpu);
int ret, cnt;
if (st->fail == state) {
st->fail = CPUHP_INVALID;
if (!(bringup ? step->startup.single : step->teardown.single))
return 0;
return -EAGAIN;
}
if (!step->multi_instance) {
WARN_ON_ONCE(lastp && *lastp);
cb = bringup ? step->startup.single : step->teardown.single;
if (!cb)
return 0;
trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
cbm = bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return 0;
/* Single invocation for instance add/remove */
if (node) {
WARN_ON_ONCE(lastp && *lastp);
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
/* State transition. Invoke on all instances */
cnt = 0;
hlist_for_each(node, &step->list) {
if (lastp && node == *lastp)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
if (ret) {
if (!lastp)
goto err;
*lastp = node;
return ret;
}
cnt++;
}
if (lastp)
*lastp = NULL;
return 0;
err:
/* Rollback the instances if one failed */
cbm = !bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return ret;
hlist_for_each(node, &step->list) {
if (!cnt--)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
/*
* Rollback must not fail,
*/
WARN_ON_ONCE(ret);
}
return ret;
}
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
/*
* The extra check for CPUHP_TEARDOWN_CPU is only for documentation
* purposes as that state is handled explicitly in cpu_down.
*/
return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
wait_for_completion(done);
}
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
complete(done);
}
/*
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
*/
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void)
{
percpu_down_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_lock);
int cpus_read_trylock(void)
{
return percpu_down_read_trylock(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_trylock);
void cpus_read_unlock(void)
{
percpu_up_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_unlock);
void cpus_write_lock(void)
{
percpu_down_write(&cpu_hotplug_lock);
}
void cpus_write_unlock(void)
{
percpu_up_write(&cpu_hotplug_lock);
}
void lockdep_assert_cpus_held(void)
{
/*
* We can't have hotplug operations before userspace starts running,
* and some init codepaths will knowingly not take the hotplug lock.
* This is all valid, so mute lockdep until it makes sense to report
* unheld locks.
*/
if (system_state < SYSTEM_RUNNING)
return;
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
static void lockdep_acquire_cpus_lock(void)
{
rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
}
static void lockdep_release_cpus_lock(void)
{
rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
}
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
* the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
* hotplug path before performing hotplug operations. So acquiring that lock
* guarantees mutual exclusion from any currently running hotplug operations.
*/
void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
static void __cpu_hotplug_enable(void)
{
if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
return;
cpu_hotplug_disabled--;
}
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
__cpu_hotplug_enable();
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#else
static void lockdep_acquire_cpus_lock(void)
{
}
static void lockdep_release_cpus_lock(void)
{
}
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Architectures that need SMT-specific errata handling during SMT hotplug
* should override this.
*/
void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
void __init cpu_smt_disable(bool force)
{
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return;
if (force) {
pr_info("SMT: Force disabled\n");
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
} else {
pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED;
}
}
/*
* The decision whether SMT is supported can only be done after the full
* CPU identification. Called from architecture code.
*/
void __init cpu_smt_check_topology(void)
{
if (!topology_smt_supported())
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}
static int __init smt_cmdline_disable(char *str)
{
cpu_smt_disable(str && !strcmp(str, "force"));
return 0;
}
early_param("nosmt", smt_cmdline_disable);
static inline bool cpu_smt_allowed(unsigned int cpu)
{
if (cpu_smt_control == CPU_SMT_ENABLED)
return true;
if (topology_is_primary_thread(cpu))
return true;
/*
* On x86 it's required to boot all logical CPUs at least once so
* that the init code can get a chance to set CR4.MCE on each
* CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
* core will shutdown the machine.
*/
return !per_cpu(cpuhp_state, cpu).booted_once;
}
#else
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
#endif
static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
st->rollback = false;
st->last = NULL;
st->target = target;
st->single = false;
st->bringup = st->state < target;
return prev_state;
}
static inline void
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
{
st->rollback = true;
/*
* If we have st->last we need to undo partial multi_instance of this
* state first. Otherwise start undo at the previous state.
*/
if (!st->last) {
if (st->bringup)
st->state--;
else
st->state++;
}
st->target = prev_state;
st->bringup = !st->bringup;
}
/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
if (!st->single && st->state == st->target)
return;
st->result = 0;
/*
* Make sure the above stores are visible before should_run becomes
* true. Paired with the mb() above in cpuhp_thread_fun()
*/
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
wait_for_ap_thread(st, st->bringup);
}
static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state;
int ret;
prev_state = cpuhp_set_state(st, target);
__cpuhp_kick_ap(st);
if ((ret = st->result)) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
return ret;
}
static int bringup_wait_for_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_ap_thread(st, true);
if (WARN_ON_ONCE((!cpu_online(cpu))))
return -ECANCELED;
/* Unpark the hotplug thread of the target cpu */
kthread_unpark(st->thread);
/*
* SMT soft disabling on X86 requires to bring the CPU out of the
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
* CPU marked itself as booted_once in cpu_notify_starting() so the
* cpu_smt_allowed() check will now return false if this is not the
* primary sibling.
*/
if (!cpu_smt_allowed(cpu))
return -ECANCELED;
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(st, st->target);
}
static int bringup_cpu(unsigned int cpu)
{
struct task_struct *idle = idle_thread_get(cpu);
int ret;
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup.
*/
irq_lock_sparse();
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
if (ret)
return ret;
return bringup_wait_for_ap(cpu);
}
static int finish_cpu(unsigned int cpu)
{
struct task_struct *idle = idle_thread_get(cpu);
struct mm_struct *mm = idle->active_mm;
/*
* idle_task_exit() will have switched to &init_mm, now
* clean up any remaining active_mm state.
*/
if (mm != &init_mm)
idle->active_mm = &init_mm;
mmdrop(mm);
return 0;
}
/*
* Hotplug state machine related functions
*/
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state--; st->state > st->target; st->state--)
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
{
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
return true;
/*
* When CPU hotplug is disabled, then taking the CPU down is not
* possible because takedown_cpu() and the architecture and
* subsystem specific mechanisms are not available. So the CPU
* which would be completely unplugged again needs to stay around
* in the current state.
*/
return st->state <= CPUHP_BRINGUP_CPU;
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
if (ret) {
if (can_rollback_cpu(st)) {
st->target = prev_state;
undo_cpu_up(cpu, st);
}
break;
}
}
return ret;
}
/*
* The cpu hotplug threads manage the bringup and teardown of the cpus
*/
static void cpuhp_create(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
init_completion(&st->done_up);
init_completion(&st->done_down);
}
static int cpuhp_should_run(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
return st->should_run;
}
/*
* Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
* callbacks when a state gets [un]installed at runtime.
*
* Each invocation of this function by the smpboot thread does a single AP
* state callback.
*
* It has 3 modes of operation:
* - single: runs st->cb_state
* - up: runs ++st->state, while st->state < st->target
* - down: runs st->state--, while st->state > st->target
*
* When complete or on error, should_run is cleared and the completion is fired.
*/
static void cpuhp_thread_fun(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
bool bringup = st->bringup;
enum cpuhp_state state;
if (WARN_ON_ONCE(!st->should_run))
return;
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
/*
* The BP holds the hotplug lock, but we're now running on the AP,
* ensure that anybody asserting the lock is held, will actually find
* it so.
*/
lockdep_acquire_cpus_lock();
cpuhp_lock_acquire(bringup);
if (st->single) {
state = st->cb_state;
st->should_run = false;
} else {
if (bringup) {
st->state++;
state = st->state;
st->should_run = (st->state < st->target);
WARN_ON_ONCE(st->state > st->target);
} else {
state = st->state;
st->state--;
st->should_run = (st->state > st->target);
WARN_ON_ONCE(st->state < st->target);
}
}
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
local_irq_enable();
/*
* STARTING/DYING must not fail!
*/
WARN_ON_ONCE(st->result);
} else {
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
}
if (st->result) {
/*
* If we fail on a rollback, we're up a creek without no
* paddle, no way forward, no way back. We loose, thanks for
* playing.
*/
WARN_ON_ONCE(st->rollback);
st->should_run = false;
}
cpuhp_lock_release(bringup);
lockdep_release_cpus_lock();
if (!st->should_run)
complete_ap_thread(st, bringup);
}
/* Invoke a single callback on a remote cpu */
static int
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int ret;
if (!cpu_online(cpu))
return 0;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
*/
if (!st->thread)
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
st->rollback = false;
st->last = NULL;
st->node = node;
st->bringup = bringup;
st->cb_state = state;
st->single = true;
__cpuhp_kick_ap(st);
/*
* If we failed and did a partial, do a rollback.
*/
if ((ret = st->result) && st->last) {
st->rollback = true;
st->bringup = !bringup;
__cpuhp_kick_ap(st);
}
/*
* Clean up the leftovers so the next hotplug operation wont use stale
* data.
*/
st->node = st->last = NULL;
return ret;
}
static int cpuhp_kick_ap_work(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state prev_state = st->state;
int ret;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
ret = cpuhp_kick_ap(st, st->target);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
return ret;
}
static struct smp_hotplug_thread cpuhp_threads = {
.store = &cpuhp_state.thread,
.create = &cpuhp_create,
.thread_should_run = cpuhp_should_run,
.thread_fn = cpuhp_thread_fun,
.thread_comm = "cpuhp/%u",
.selfparking = true,
};
void __init cpuhp_threads_init(void)
{
BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
/*
*
* Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
* protected region.
*
* The operation is still serialized against concurrent CPU hotplug via
* cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
* serialized against other hotplug related activity like adding or
* removing of state callbacks and state instances, which invoke either the
* startup or the teardown callback of the affected state.
*
* This is required for subsystems which are unfixable vs. CPU hotplug and
* evade lock inversion problems by scheduling work which has to be
* completed _before_ cpu_up()/_cpu_down() returns.
*
* Don't even think about adding anything to this for any new code or even
* drivers. It's only purpose is to keep existing lock order trainwrecks
* working.
*
* For cpu_down() there might be valid reasons to finish cleanups which are
* not required to be done under cpu_hotplug_lock, but that's a different
* story and would be not invoked via this.
*/
static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
{
/*
* cpusets delegate hotplug operations to a worker to "solve" the
* lock order problems. Wait for the worker, but only if tasks are
* _not_ frozen (suspend, hibernate) as that would wait forever.
*
* The wait is required because otherwise the hotplug operation
* returns with inconsistent state, which could even be observed in
* user space when a new CPU is brought up. The CPU plug uevent
* would be delivered and user space reacting on it would fail to
* move tasks to the newly plugged CPU up to the point where the
* work has finished because up to that point the newly plugged CPU
* is not assignable in cpusets/cgroups. On unplug that's not
* necessarily a visible issue, but it is still inconsistent state,
* which is the real problem which needs to be "fixed". This can't
* prevent the transient state between scheduling the work and
* returning from waiting for it.
*/
if (!tasks_frozen)
cpuset_wait_for_hotplug();
}
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
#endif
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
*
* This function walks all processes, finds a valid mm struct for each one and
* then clears a corresponding bit in mm's cpumask. While this all sounds
* trivial, there are various non-obvious corner cases, which this function
* tries to solve in a safe manner.
*
* Also note that the function uses a somewhat relaxed locking scheme, so it may
* be called only for an already offlined CPU.
*/
void clear_tasks_mm_cpumask(int cpu)
{
struct task_struct *p;
/*
* This function is called after the cpu is taken down and marked
* offline, so its not like new tasks will ever get this cpu set in
* their mm mask. -- Peter Zijlstra
* Thus, we may use rcu_read_lock() here, instead of grabbing
* full-fledged tasklist_lock.
*/
WARN_ON(cpu_online(cpu));
rcu_read_lock();
for_each_process(p) {
struct task_struct *t;
/*
* Main thread might exit, but other threads may still have
* a valid mm. Find one.
*/
t = find_lock_task_mm(p);
if (!t)
continue;
arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t);
}
rcu_read_unlock();
}
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
int err, cpu = smp_processor_id();
int ret;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
/*
* We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
* do this step again.
*/
WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
st->state--;
/* Invoke the former CPU_DYING callbacks */
for (; st->state > target; st->state--) {
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
/*
* DYING must not fail!
*/
WARN_ON_ONCE(ret);
}
/* Give up timekeeping duties */
tick_handover_do_timer();
/* Park the stopper thread */
stop_machine_park(cpu);
return 0;
}
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
/* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
*/
irq_lock_sparse();
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
return err;
}
BUG_ON(cpu_online(cpu));
/*
* The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
* all runnable tasks from the CPU, there's only the idle task left now
* that the migration thread is done doing the stop_machine thing.
*
* Wait for the stop thread to go away.
*/
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
__cpu_die(cpu);
tick_cleanup_dead_cpu(cpu);
rcutree_migrate_callbacks(cpu);
return 0;
}
static void cpuhp_complete_idle_dead(void *arg)
{
struct cpuhp_cpu_state *st = arg;
complete_ap_thread(st, false);
}
void cpuhp_report_idle_dead(void)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
BUG_ON(st->state != CPUHP_AP_OFFLINE);
rcu_report_dead(smp_processor_id());
st->state = CPUHP_AP_IDLE_DEAD;
/*
* We cannot call complete after rcu_report_dead() so we delegate it
* to an online cpu.
*/
smp_call_function_single(cpumask_first(cpu_online_mask),
cpuhp_complete_idle_dead, st, 0);
}
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state++; st->state < st->target; st->state++)
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
for (; st->state > target; st->state--) {
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
BUG_ON(ret && st->state < CPUHP_AP_IDLE_DEAD);
if (ret) {
st->target = prev_state;
if (st->state < prev_state)
undo_cpu_down(cpu, st);
break;
}
}
return ret;
}
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
u64 start_time = 0;
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_present(cpu))
return -EINVAL;
if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
return -EBUSY;
cpus_write_lock();
if (trace_cpuhp_latency_enabled())
start_time = sched_clock();
cpuhp_tasks_frozen = tasks_frozen;
prev_state = cpuhp_set_state(st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread.
*/
if (st->state > CPUHP_TEARDOWN_CPU) {
st->target = max((int)target, CPUHP_TEARDOWN_CPU);
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
/*
* We might have stopped still in the range of the AP hotplug
* thread. Nothing to do anymore.
*/
if (st->state > CPUHP_TEARDOWN_CPU)
goto out;
st->target = target;
}
/*
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
out:
trace_cpuhp_latency(cpu, 0, start_time, ret);
cpus_write_unlock();
/*
* Do post unplug cleanup. This is still protected against
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
arch_smt_update();
cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{
if (cpu_hotplug_disabled)
return -EBUSY;
return _cpu_down(cpu, 0, target);
}
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{
int err;
/*
* When cpusets are enabled, the rebuilding of the scheduling
* domains is deferred to a workqueue context. Make sure
* that the work is completed before proceeding to the next
* hotplug. Otherwise scheduler observes an inconsistent
* view of online and offline CPUs in the root domain. If
* the online CPUs are still stuck in the offline (default)
* domain, those CPUs would not be visible when scheduling
* happens on from other CPUs in the root domain.
*/
cpuset_wait_for_hotplug();
cpu_maps_update_begin();
err = cpu_down_maps_locked(cpu, target);
cpu_maps_update_done();
return err;
}
int cpu_down(unsigned int cpu)
{
return do_cpu_down(cpu, CPUHP_OFFLINE);
}
EXPORT_SYMBOL(cpu_down);
#else
#define takedown_cpu NULL
#endif /*CONFIG_HOTPLUG_CPU*/
/**
* notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
* @cpu: cpu that just started
*
* It must be called by the arch code on the new cpu, before the new cpu
* enables interrupts and before the "boot" cpu returns from __cpu_up().
*/
void notify_cpu_starting(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
int ret;
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
st->booted_once = true;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
/*
* STARTING must not fail!
*/
WARN_ON_ONCE(ret);
}
}
/*
* Called from the idle task. Wake up the controlling task which brings the
* hotplug thread of the upcoming CPU up and then delegates the rest of the
* online bringup to the hotplug thread.
*/
void cpuhp_online_idle(enum cpuhp_state state)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
/* Happens for the boot cpu */
if (state != CPUHP_AP_ONLINE_IDLE)
return;
/*
* Unpart the stopper thread before we start the idle loop (and start
* scheduling); this ensures the stopper task is always available.
*/
stop_machine_unpark(smp_processor_id());
st->state = CPUHP_AP_ONLINE_IDLE;
complete_ap_thread(st, true);
}
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle;
int ret = 0;
u64 start_time = 0;
cpus_write_lock();
if (trace_cpuhp_latency_enabled())
start_time = sched_clock();
if (!cpu_present(cpu)) {
ret = -EINVAL;
goto out;
}
/*
* The caller of do_cpu_up might have raced with another
* caller. Ignore it for now.
*/
if (st->state >= target)
goto out;
if (st->state == CPUHP_OFFLINE) {
/* Let it fail before we try to bring the cpu up */
idle = idle_thread_get(cpu);
if (IS_ERR(idle)) {
ret = PTR_ERR(idle);
goto out;
}
}
cpuhp_tasks_frozen = tasks_frozen;
cpuhp_set_state(st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread once more.
*/
if (st->state > CPUHP_BRINGUP_CPU) {
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
}
/*
* Try to reach the target state. We max out on the BP at
* CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
* responsible for bringing it up to the target state.
*/
target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target);
out:
trace_cpuhp_latency(cpu, 1, start_time, ret);
cpus_write_unlock();
arch_smt_update();
cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
static int switch_to_rt_policy(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
unsigned int policy = current->policy;
int err;
/* Nobody should be attempting hotplug from these policy contexts. */
if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
policy == SCHED_DEADLINE)
return -EPERM;
if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
/* Only SCHED_NORMAL left. */
err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
return err;
}
static int switch_to_fair_policy(void)
{
struct sched_param param = { .sched_priority = 0 };
return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
}
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
int switch_err = 0;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
cpu);
#if defined(CONFIG_IA64)
pr_err("please check additional_cpus= boot parameter\n");
#endif
return -EINVAL;
}
cpuset_wait_for_hotplug();
switch_err = switch_to_rt_policy();
if (switch_err < 0)
return switch_err;
err = try_online_node(cpu_to_node(cpu));
if (err)
return err;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto out;
}
if (!cpu_smt_allowed(cpu)) {
err = -EPERM;
goto out;
}
err = _cpu_up(cpu, 0, target);
out:
cpu_maps_update_done();
if (!switch_err) {
switch_err = switch_to_fair_policy();
if (switch_err)
pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
switch_err, current->comm, current->pid);
}
return err;
}
int cpu_up(unsigned int cpu)
{
return do_cpu_up(cpu, CPUHP_ONLINE);
}
EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
int freeze_secondary_cpus(int primary)
{
int cpu, error = 0;
cpu_maps_update_begin();
if (!cpu_online(primary))
primary = cpumask_first(cpu_online_mask);
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == primary)
continue;
if (pm_wakeup_pending()) {
pr_info("Wakeup pending. Abort CPU freeze\n");
error = -EBUSY;
break;
}
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
else {
pr_err("Error taking CPU%d down: %d\n", cpu, error);
break;
}
}
if (!error)
BUG_ON(num_online_cpus() > 1);
else
pr_err("Non-boot CPUs are not disabled\n");
/*
* Make sure the CPUs won't be enabled by someone else. We need to do
* this even in case of failure as all disable_nonboot_cpus() users are
* supposed to do enable_nonboot_cpus() on the failure path.
*/
cpu_hotplug_disabled++;
cpu_maps_update_done();
return error;
}
void __weak arch_enable_nonboot_cpus_begin(void)
{
}
void __weak arch_enable_nonboot_cpus_end(void)
{
}
void enable_nonboot_cpus(void)
{
int cpu, error;
struct device *cpu_device;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
__cpu_hotplug_enable();
if (cpumask_empty(frozen_cpus))
goto out;
pr_info("Enabling non-boot CPUs ...\n");
arch_enable_nonboot_cpus_begin();
for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) {
pr_info("CPU%d is up\n", cpu);
cpu_device = get_cpu_device(cpu);
if (!cpu_device)
pr_err("%s: failed to get cpu%d device\n",
__func__, cpu);
else
kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
continue;
}
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
}
arch_enable_nonboot_cpus_end();
cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
}
static int __init alloc_frozen_cpus(void)
{
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
return 0;
}
core_initcall(alloc_frozen_cpus);
/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
* or not, as reported by the notification, remains unchanged *throughout the
* duration* of the execution of the callbacks.
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
*
* This synchronization is implemented by mutually excluding regular CPU
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
* Hibernate notifications.
*/
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
unsigned long action, void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
cpu_hotplug_disable();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
cpu_hotplug_enable();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int __init cpu_hotplug_pm_sync_init(void)
{
/*
* cpu_hotplug_pm_callback has higher priority than x86
* bsp_pm_callback which depends on cpu_hotplug_pm_callback
* to disable cpu hotplug to avoid cpu hotplug race.
*/
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
int __boot_cpu_id;
/* Horrific hacks because we can't add more to cpuhp_hp_states. */
static int random_and_perf_prepare_fusion(unsigned int cpu)
{
#ifdef CONFIG_PERF_EVENTS
perf_event_init_cpu(cpu);
#endif
random_prepare_cpu(cpu);
return 0;
}
static int random_and_workqueue_online_fusion(unsigned int cpu)
{
workqueue_online_cpu(cpu);
random_online_cpu(cpu);
return 0;
}
#endif /* CONFIG_SMP */
/* Boot processor state steps */
static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_OFFLINE] = {
.name = "offline",
.startup.single = NULL,
.teardown.single = NULL,
},
#ifdef CONFIG_SMP
[CPUHP_CREATE_THREADS]= {
.name = "threads:prepare",
.startup.single = smpboot_create_threads,
.teardown.single = NULL,
.cant_stop = true,
},
[CPUHP_PERF_PREPARE] = {
.name = "perf:prepare",
.startup.single = random_and_perf_prepare_fusion,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue:prepare",
.startup.single = workqueue_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_HRTIMERS_PREPARE] = {
.name = "hrtimers:prepare",
.startup.single = hrtimers_prepare_cpu,
.teardown.single = hrtimers_dead_cpu,
},
[CPUHP_SMPCFD_PREPARE] = {
.name = "smpcfd:prepare",
.startup.single = smpcfd_prepare_cpu,
.teardown.single = smpcfd_dead_cpu,
},
[CPUHP_RELAY_PREPARE] = {
.name = "relay:prepare",
.startup.single = relay_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_SLAB_PREPARE] = {
.name = "slab:prepare",
.startup.single = slab_prepare_cpu,
.teardown.single = slab_dead_cpu,
},
[CPUHP_RCUTREE_PREP] = {
.name = "RCU/tree:prepare",
.startup.single = rcutree_prepare_cpu,
.teardown.single = rcutree_dead_cpu,
},
/*
* On the tear-down path, timers_dead_cpu() must be invoked
* before blk_mq_queue_reinit_notify() from notify_dead(),
* otherwise a RCU stall occurs.
*/
[CPUHP_TIMERS_PREPARE] = {
.name = "timers:prepare",
.startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu,
},
/* Kicks the plugged cpu into life */
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = bringup_cpu,
.teardown.single = finish_cpu,
.cant_stop = true,
},
/* Final state before CPU kills itself */
[CPUHP_AP_IDLE_DEAD] = {
.name = "idle:dead",
},
/*
* Last state before CPU enters the idle loop to die. Transient state
* for synchronization.
*/
[CPUHP_AP_OFFLINE] = {
.name = "ap:offline",
.cant_stop = true,
},
/* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup.single = sched_cpu_starting,
.teardown.single = sched_cpu_dying,
},
[CPUHP_AP_RCUTREE_DYING] = {
.name = "RCU/tree:dying",
.startup.single = NULL,
.teardown.single = rcutree_dying_cpu,
},
[CPUHP_AP_SMPCFD_DYING] = {
.name = "smpcfd:dying",
.startup.single = NULL,
.teardown.single = smpcfd_dying_cpu,
},
/* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
},
/*
* Handled on controll processor until the plugged processor manages
* this itself.
*/
[CPUHP_TEARDOWN_CPU] = {
.name = "cpu:teardown",
.startup.single = NULL,
.teardown.single = takedown_cpu,
.cant_stop = true,
},
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot/threads:online",
.startup.single = smpboot_unpark_threads,
.teardown.single = smpboot_park_threads,
},
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
.name = "irq/affinity:online",
.startup.single = irq_affinity_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_PERF_ONLINE] = {
.name = "perf:online",
.startup.single = perf_event_restart_events,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_AP_WATCHDOG_ONLINE] = {
.name = "lockup_detector:online",
.startup.single = lockup_detector_online_cpu,
.teardown.single = lockup_detector_offline_cpu,
},
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue:online",
.startup.single = random_and_workqueue_online_fusion,
.teardown.single = workqueue_offline_cpu,
},
[CPUHP_AP_RCUTREE_ONLINE] = {
.name = "RCU/tree:online",
.startup.single = rcutree_online_cpu,
.teardown.single = rcutree_offline_cpu,
},
#endif
/*
* The dynamically registered state space is here
*/
#ifdef CONFIG_SMP
/* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup.single = sched_cpu_activate,
.teardown.single = sched_cpu_deactivate,
},
#endif
/* CPU is fully up and running. */
[CPUHP_ONLINE] = {
.name = "online",
.startup.single = NULL,
.teardown.single = NULL,
},
};
/* Sanity check for callbacks */
static int cpuhp_cb_check(enum cpuhp_state state)
{
if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
return -EINVAL;
return 0;
}
/*
* Returns a free for dynamic slot assignment of the Online state. The states
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
* by having no name assigned.
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
enum cpuhp_state i, end;
struct cpuhp_step *step;
switch (state) {
case CPUHP_AP_ONLINE_DYN:
step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
end = CPUHP_AP_ONLINE_DYN_END;
break;
case CPUHP_BP_PREPARE_DYN:
step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
end = CPUHP_BP_PREPARE_DYN_END;
break;
default:
return -EINVAL;
}
for (i = state; i <= end; i++, step++) {
if (!step->name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
return -ENOSPC;
}
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
int ret = 0;
/*
* If name is NULL, then the state gets removed.
*
* CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
* the first allocation from these dynamic ranges, so the removal
* would trigger a new allocation and clear the wrong (already
* empty) state, leaving the callbacks of the to be cleared state
* dangling, which causes wreckage on the next hotplug operation.
*/
if (name && (state == CPUHP_AP_ONLINE_DYN ||
state == CPUHP_BP_PREPARE_DYN)) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
return ret;
state = ret;
}
sp = cpuhp_get_step(state);
if (name && sp->name)
return -EBUSY;
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
return ret;
}
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{
return cpuhp_get_step(state)->teardown.single;
}
/*
* Call the startup/teardown function for a step either on the AP or
* on the current CPU.
*/
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int ret;
/*
* If there's nothing to do, we done.
* Relies on the union for multi_instance.
*/
if ((bringup && !sp->startup.single) ||
(!bringup && !sp->teardown.single))
return 0;
/*
* The non AP bound callbacks can fail on bringup. On teardown
* e.g. module removal we crash for now.
*/
#ifdef CONFIG_SMP
if (cpuhp_is_ap_state(state))
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#endif
BUG_ON(ret && !bringup);
return ret;
}
/*
* Called from __cpuhp_setup_state on a recoverable failure.
*
* Note: The teardown callbacks for rollback are not allowed to fail!
*/
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
struct hlist_node *node)
{
int cpu;
/* Roll back the already executed steps on the other cpus */
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpu >= failedcpu)
break;
/* Did we invoke the startup call on that cpu ? */
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
}
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
struct hlist_node *node,
bool invoke)
{
struct cpuhp_step *sp;
int cpu;
int ret;
lockdep_assert_cpus_held();
sp = cpuhp_get_step(state);
if (sp->multi_instance == false)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
goto add_node;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, node);
if (ret) {
if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node);
goto unlock;
}
}
add_node:
ret = 0;
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex);
return ret;
}
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{
int ret;
cpus_read_lock();
ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
* __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
* @state: The state to setup
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
* @startup: startup callback function
* @teardown: teardown callback function
* @multi_instance: State is set up for multiple instances which get
* added afterwards.
*
* The caller needs to hold cpus read locked while calling this function.
* Returns:
* On success:
* Positive state number if @state is CPUHP_AP_ONLINE_DYN
* 0 for all other states
* On failure: proper (negative) error code
*/
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int cpu, ret = 0;
bool dynstate;
lockdep_assert_cpus_held();
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
dynstate = state == CPUHP_AP_ONLINE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
}
if (ret || !invoke || !startup)
goto out;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, NULL);
if (ret) {
if (teardown)
cpuhp_rollback_install(cpu, state, NULL);
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
goto out;
}
}
out:
mutex_unlock(&cpuhp_state_mutex);
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
*/
if (!ret && dynstate)
return state;
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
int __cpuhp_setup_state(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int ret;
cpus_read_lock();
ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
teardown, multi_instance);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state);
int __cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
if (!sp->multi_instance)
return -EINVAL;
cpus_read_lock();
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
remove:
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
cpus_read_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
/**
* __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
* @state: The state to remove
* @invoke: If true, the teardown function is invoked for cpus where
* cpu state >= @state
*
* The caller needs to hold cpus read locked while calling this function.
* The teardown callback is currently not allowed to fail. Think
* about module removal!
*/
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
lockdep_assert_cpus_held();
mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list),
"Error: Removing state %d which has instances left.\n",
state);
goto remove;
}
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, NULL);
}
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex);
}
EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
cpus_read_lock();
__cpuhp_remove_state_cpuslocked(state, invoke);
cpus_read_unlock();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t show_cpuhp_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->state);
}
static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
static ssize_t write_cpuhp_target(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int target, ret;
ret = kstrtoint(buf, 10, &target);
if (ret)
return ret;
#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
return -EINVAL;
#else
if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
return -EINVAL;
#endif
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(target);
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
goto out;
if (st->state < target)
ret = do_cpu_up(dev->id, target);
else
ret = do_cpu_down(dev->id, target);
out:
unlock_device_hotplug();
return ret ? ret : count;
}
static ssize_t show_cpuhp_target(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->target);
}
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
static ssize_t write_cpuhp_fail(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int fail, ret;
ret = kstrtoint(buf, 10, &fail);
if (ret)
return ret;
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
return -EINVAL;
/*
* Cannot fail STARTING/DYING callbacks.
*/
if (cpuhp_is_atomic_state(fail))
return -EINVAL;
/*
* Cannot fail anything that doesn't have callbacks.
*/
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(fail);
if (!sp->startup.single && !sp->teardown.single)
ret = -EINVAL;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
return ret;
st->fail = fail;
return count;
}
static ssize_t show_cpuhp_fail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->fail);
}
static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
static struct attribute *cpuhp_cpu_attrs[] = {
&dev_attr_state.attr,
&dev_attr_target.attr,
&dev_attr_fail.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_attr_group = {
.attrs = cpuhp_cpu_attrs,
.name = "hotplug",
NULL
};
static ssize_t show_cpuhp_states(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t cur, res = 0;
int i;
mutex_lock(&cpuhp_state_mutex);
for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
struct cpuhp_step *sp = cpuhp_get_step(i);
if (sp->name) {
cur = sprintf(buf, "%3d: %s\n", i, sp->name);
buf += cur;
res += cur;
}
}
mutex_unlock(&cpuhp_state_mutex);
return res;
}
static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
static struct attribute *cpuhp_cpu_root_attrs[] = {
&dev_attr_states.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_root_attr_group = {
.attrs = cpuhp_cpu_root_attrs,
.name = "hotplug",
NULL
};
#ifdef CONFIG_HOTPLUG_SMT
static const char *smt_states[] = {
[CPU_SMT_ENABLED] = "on",
[CPU_SMT_DISABLED] = "off",
[CPU_SMT_FORCE_DISABLED] = "forceoff",
[CPU_SMT_NOT_SUPPORTED] = "notsupported",
};
static ssize_t
show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
}
static void cpuhp_offline_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = true;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
}
static void cpuhp_online_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = false;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
cpu_maps_update_begin();
for_each_online_cpu(cpu) {
if (topology_is_primary_thread(cpu))
continue;
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (ret)
break;
/*
* As this needs to hold the cpu maps lock it's impossible
* to call device_offline() because that ends up calling
* cpu_down() which takes cpu maps lock. cpu maps lock
* needs to be held as this might race against in kernel
* abusers of the hotplug machinery (thermal management).
*
* So nothing would update device:offline state. That would
* leave the sysfs entry stale and prevent onlining after
* smt control has been changed to 'off' again. This is
* called under the sysfs hotplug lock, so it is properly
* serialized against the regular offline usage.
*/
cpuhp_offline_cpu_device(cpu);
}
if (!ret)
cpu_smt_control = ctrlval;
cpu_maps_update_done();
return ret;
}
int cpuhp_smt_enable(void)
{
int cpu, ret = 0;
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
continue;
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
if (ret)
break;
/* See comment in cpuhp_smt_disable() */
cpuhp_online_cpu_device(cpu);
}
cpu_maps_update_done();
return ret;
}
static ssize_t
store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ctrlval, ret;
if (sysfs_streq(buf, "on"))
ctrlval = CPU_SMT_ENABLED;
else if (sysfs_streq(buf, "off"))
ctrlval = CPU_SMT_DISABLED;
else if (sysfs_streq(buf, "forceoff"))
ctrlval = CPU_SMT_FORCE_DISABLED;
else
return -EINVAL;
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
return -EPERM;
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return -ENODEV;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
if (ctrlval != cpu_smt_control) {
switch (ctrlval) {
case CPU_SMT_ENABLED:
ret = cpuhp_smt_enable();
break;
case CPU_SMT_DISABLED:
case CPU_SMT_FORCE_DISABLED:
ret = cpuhp_smt_disable(ctrlval);
break;
}
}
unlock_device_hotplug();
return ret ? ret : count;
}
static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
static ssize_t
show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
{
bool active = topology_max_smt_threads() > 1;
return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
}
static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
static struct attribute *cpuhp_smt_attrs[] = {
&dev_attr_control.attr,
&dev_attr_active.attr,
NULL
};
static const struct attribute_group cpuhp_smt_attr_group = {
.attrs = cpuhp_smt_attrs,
.name = "smt",
NULL
};
static int __init cpu_smt_state_init(void)
{
return sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_smt_attr_group);
}
#else
static inline int cpu_smt_state_init(void) { return 0; }
#endif
static int __init cpuhp_sysfs_init(void)
{
int cpu, ret;
ret = cpu_smt_state_init();
if (ret)
return ret;
ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_cpu_root_attr_group);
if (ret)
return ret;
for_each_possible_cpu(cpu) {
struct device *dev = get_cpu_device(cpu);
if (!dev)
continue;
ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
if (ret)
return ret;
}
return 0;
}
device_initcall(cpuhp_sysfs_init);
#endif
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
* represents all NR_CPUS bits binary values of 1<<nr.
*
* It is used by cpumask_of() to get a constant address to a CPU
* mask value that has a single bit set only.
*/
/* cpu_bit_bitmap[0] is empty - so we can back into it */
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
};
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
#ifdef CONFIG_INIT_ALL_POSSIBLE
struct cpumask __cpu_possible_mask __read_mostly
= {CPU_BITS_ALL};
#else
struct cpumask __cpu_possible_mask __read_mostly;
#endif
EXPORT_SYMBOL(__cpu_possible_mask);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
struct cpumask __cpu_isolated_mask __read_mostly;
EXPORT_SYMBOL(__cpu_isolated_mask);
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(&__cpu_present_mask, src);
}
void init_cpu_possible(const struct cpumask *src)
{
cpumask_copy(&__cpu_possible_mask, src);
}
void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(&__cpu_online_mask, src);
}
void init_cpu_isolated(const struct cpumask *src)
{
cpumask_copy(&__cpu_isolated_mask, src);
}
/*
* Activate the first processor.
*/
void __init boot_cpu_init(void)
{
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
#ifdef CONFIG_SMP
__boot_cpu_id = cpu;
#endif
}
/*
* Must be called _AFTER_ setting up the per_cpu areas
*/
void __init boot_cpu_hotplug_init(void)
{
#ifdef CONFIG_SMP
this_cpu_write(cpuhp_state.booted_once, true);
#endif
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
{
atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);
void idle_notifier_unregister(struct notifier_block *n)
{
atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
void idle_notifier_call_chain(unsigned long val)
{
atomic_notifier_call_chain(&idle_notifier, val, NULL);
}
EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
/*
* These are used for a global "mitigations=" cmdline option for toggling
* optional CPU mitigations.
*/
enum cpu_mitigations {
CPU_MITIGATIONS_OFF,
CPU_MITIGATIONS_AUTO,
CPU_MITIGATIONS_AUTO_NOSMT,
};
static enum cpu_mitigations cpu_mitigations __ro_after_init =
CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
if (!strcmp(arg, "off"))
cpu_mitigations = CPU_MITIGATIONS_OFF;
else if (!strcmp(arg, "auto"))
cpu_mitigations = CPU_MITIGATIONS_AUTO;
else if (!strcmp(arg, "auto,nosmt"))
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
else
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
arg);
return 0;
}
early_param("mitigations", mitigations_parse_cmdline);
/* mitigations=off */
bool cpu_mitigations_off(void)
{
return cpu_mitigations == CPU_MITIGATIONS_OFF;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_off);
/* mitigations=auto,nosmt */
bool cpu_mitigations_auto_nosmt(void)
{
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);