Merge 4.19.180 into android-4.19-stable
Changes in 4.19.180 btrfs: raid56: simplify tracking of Q stripe presence btrfs: fix raid6 qstripe kmap btrfs: validate qgroup inherit for SNAP_CREATE_V2 ioctl btrfs: free correct amount of space in btrfs_delayed_inode_reserve_metadata btrfs: unlock extents in btrfs_zero_range in case of quota reservation errors PM: runtime: Update device status before letting suppliers suspend dm bufio: subtract the number of initial sectors in dm_bufio_get_device_size drm/amdgpu: fix parameter error of RREG32_PCIE() in amdgpu_regs_pcie usbip: tools: fix build error for multiple definition Revert "zram: close udev startup race condition as default groups" block: genhd: add 'groups' argument to device_add_disk nvme: register ns_id attributes as default sysfs groups aoe: register default groups with device_add_disk() zram: register default groups with device_add_disk() virtio-blk: modernize sysfs attribute creation ALSA: ctxfi: cthw20k2: fix mask on conf to allow 4 bits RDMA/rxe: Fix missing kconfig dependency on CRYPTO rsxx: Return -EFAULT if copy_to_user() fails dm verity: fix FEC for RS roots unaligned to block size r8169: fix resuming from suspend on RTL8105e if machine runs on battery net: dsa: add GRO support via gro_cells dm table: fix iterate_devices based device capability checks dm table: fix DAX iterate_devices based device capability checks dm table: fix zoned iterate_devices based device capability checks iommu/amd: Fix sleeping in atomic in increase_address_space() mwifiex: pcie: skip cancel_work_sync() on reset failure path platform/x86: acer-wmi: Cleanup ACER_CAP_FOO defines platform/x86: acer-wmi: Cleanup accelerometer device handling platform/x86: acer-wmi: Add new force_caps module parameter platform/x86: acer-wmi: Add ACER_CAP_SET_FUNCTION_MODE capability flag platform/x86: acer-wmi: Add support for SW_TABLET_MODE on Switch devices platform/x86: acer-wmi: Add ACER_CAP_KBD_DOCK quirk for the Aspire Switch 10E SW3-016 HID: mf: add support for 0079:1846 Mayflash/Dragonrise USB Gamecube Adapter media: cx23885: add more quirks for reset DMA on some AMD IOMMU ASoC: Intel: bytcr_rt5640: Add quirk for ARCHOS Cesium 140 PCI: Add function 1 DMA alias quirk for Marvell 9215 SATA controller misc: eeprom_93xx46: Add quirk to support Microchip 93LC46B eeprom drm/msm/a5xx: Remove overwriting A5XX_PC_DBG_ECO_CNTL register mmc: sdhci-of-dwcmshc: set SDHCI_QUIRK2_PRESET_VALUE_BROKEN Linux 4.19.180 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I1d133776c03abf37203427cbc1c0edfa9c396edd
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 179
|
SUBLEVEL = 180
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|||||||
@@ -891,7 +891,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
|
|||||||
|
|
||||||
disk->private_data = &ubd_devs[unit];
|
disk->private_data = &ubd_devs[unit];
|
||||||
disk->queue = ubd_devs[unit].queue;
|
disk->queue = ubd_devs[unit].queue;
|
||||||
device_add_disk(parent, disk);
|
device_add_disk(parent, disk, NULL);
|
||||||
|
|
||||||
*disk_out = disk;
|
*disk_out = disk;
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -582,7 +582,8 @@ static int exact_lock(dev_t devt, void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void register_disk(struct device *parent, struct gendisk *disk)
|
static void register_disk(struct device *parent, struct gendisk *disk,
|
||||||
|
const struct attribute_group **groups)
|
||||||
{
|
{
|
||||||
struct device *ddev = disk_to_dev(disk);
|
struct device *ddev = disk_to_dev(disk);
|
||||||
struct block_device *bdev;
|
struct block_device *bdev;
|
||||||
@@ -597,6 +598,10 @@ static void register_disk(struct device *parent, struct gendisk *disk)
|
|||||||
/* delay uevents, until we scanned partition table */
|
/* delay uevents, until we scanned partition table */
|
||||||
dev_set_uevent_suppress(ddev, 1);
|
dev_set_uevent_suppress(ddev, 1);
|
||||||
|
|
||||||
|
if (groups) {
|
||||||
|
WARN_ON(ddev->groups);
|
||||||
|
ddev->groups = groups;
|
||||||
|
}
|
||||||
if (device_add(ddev))
|
if (device_add(ddev))
|
||||||
return;
|
return;
|
||||||
if (!sysfs_deprecated) {
|
if (!sysfs_deprecated) {
|
||||||
@@ -664,6 +669,7 @@ static void register_disk(struct device *parent, struct gendisk *disk)
|
|||||||
* __device_add_disk - add disk information to kernel list
|
* __device_add_disk - add disk information to kernel list
|
||||||
* @parent: parent device for the disk
|
* @parent: parent device for the disk
|
||||||
* @disk: per-device partitioning information
|
* @disk: per-device partitioning information
|
||||||
|
* @groups: Additional per-device sysfs groups
|
||||||
* @register_queue: register the queue if set to true
|
* @register_queue: register the queue if set to true
|
||||||
*
|
*
|
||||||
* This function registers the partitioning information in @disk
|
* This function registers the partitioning information in @disk
|
||||||
@@ -672,6 +678,7 @@ static void register_disk(struct device *parent, struct gendisk *disk)
|
|||||||
* FIXME: error handling
|
* FIXME: error handling
|
||||||
*/
|
*/
|
||||||
static void __device_add_disk(struct device *parent, struct gendisk *disk,
|
static void __device_add_disk(struct device *parent, struct gendisk *disk,
|
||||||
|
const struct attribute_group **groups,
|
||||||
bool register_queue)
|
bool register_queue)
|
||||||
{
|
{
|
||||||
dev_t devt;
|
dev_t devt;
|
||||||
@@ -715,7 +722,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
|
|||||||
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
||||||
exact_match, exact_lock, disk);
|
exact_match, exact_lock, disk);
|
||||||
}
|
}
|
||||||
register_disk(parent, disk);
|
register_disk(parent, disk, groups);
|
||||||
if (register_queue)
|
if (register_queue)
|
||||||
blk_register_queue(disk);
|
blk_register_queue(disk);
|
||||||
|
|
||||||
@@ -729,15 +736,17 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
|
|||||||
blk_integrity_add(disk);
|
blk_integrity_add(disk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void device_add_disk(struct device *parent, struct gendisk *disk)
|
void device_add_disk(struct device *parent, struct gendisk *disk,
|
||||||
|
const struct attribute_group **groups)
|
||||||
|
|
||||||
{
|
{
|
||||||
__device_add_disk(parent, disk, true);
|
__device_add_disk(parent, disk, groups, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(device_add_disk);
|
EXPORT_SYMBOL(device_add_disk);
|
||||||
|
|
||||||
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
|
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
|
||||||
{
|
{
|
||||||
__device_add_disk(parent, disk, false);
|
__device_add_disk(parent, disk, NULL, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
|
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
|
||||||
|
|
||||||
|
|||||||
@@ -304,22 +304,22 @@ static void rpm_put_suppliers(struct device *dev)
|
|||||||
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||||
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
||||||
{
|
{
|
||||||
int retval, idx;
|
|
||||||
bool use_links = dev->power.links_count > 0;
|
bool use_links = dev->power.links_count > 0;
|
||||||
|
bool get = false;
|
||||||
|
int retval, idx;
|
||||||
|
bool put;
|
||||||
|
|
||||||
if (dev->power.irq_safe) {
|
if (dev->power.irq_safe) {
|
||||||
spin_unlock(&dev->power.lock);
|
spin_unlock(&dev->power.lock);
|
||||||
|
} else if (!use_links) {
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
} else {
|
} else {
|
||||||
|
get = dev->power.runtime_status == RPM_RESUMING;
|
||||||
|
|
||||||
spin_unlock_irq(&dev->power.lock);
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
/*
|
/* Resume suppliers if necessary. */
|
||||||
* Resume suppliers if necessary.
|
if (get) {
|
||||||
*
|
|
||||||
* The device's runtime PM status cannot change until this
|
|
||||||
* routine returns, so it is safe to read the status outside of
|
|
||||||
* the lock.
|
|
||||||
*/
|
|
||||||
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
|
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
retval = rpm_get_suppliers(dev);
|
retval = rpm_get_suppliers(dev);
|
||||||
@@ -334,24 +334,36 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||||||
|
|
||||||
if (dev->power.irq_safe) {
|
if (dev->power.irq_safe) {
|
||||||
spin_lock(&dev->power.lock);
|
spin_lock(&dev->power.lock);
|
||||||
} else {
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
if (!use_links)
|
||||||
|
return retval;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the device is suspending and the callback has returned
|
* If the device is suspending and the callback has returned success,
|
||||||
* success, drop the usage counters of the suppliers that have
|
* drop the usage counters of the suppliers that have been reference
|
||||||
* been reference counted on its resume.
|
* counted on its resume.
|
||||||
*
|
*
|
||||||
* Do that if resume fails too.
|
* Do that if the resume fails too.
|
||||||
*/
|
*/
|
||||||
if (use_links
|
put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
|
||||||
&& ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
|
if (put)
|
||||||
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
|
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||||
|
else
|
||||||
|
put = get && retval;
|
||||||
|
|
||||||
|
if (put) {
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
rpm_put_suppliers(dev);
|
rpm_put_suppliers(dev);
|
||||||
|
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&dev->power.lock);
|
spin_lock_irq(&dev->power.lock);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -201,7 +201,6 @@ int aoeblk_init(void);
|
|||||||
void aoeblk_exit(void);
|
void aoeblk_exit(void);
|
||||||
void aoeblk_gdalloc(void *);
|
void aoeblk_gdalloc(void *);
|
||||||
void aoedisk_rm_debugfs(struct aoedev *d);
|
void aoedisk_rm_debugfs(struct aoedev *d);
|
||||||
void aoedisk_rm_sysfs(struct aoedev *d);
|
|
||||||
|
|
||||||
int aoechr_init(void);
|
int aoechr_init(void);
|
||||||
void aoechr_exit(void);
|
void aoechr_exit(void);
|
||||||
|
|||||||
@@ -177,10 +177,15 @@ static struct attribute *aoe_attrs[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct attribute_group attr_group = {
|
static const struct attribute_group aoe_attr_group = {
|
||||||
.attrs = aoe_attrs,
|
.attrs = aoe_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group *aoe_attr_groups[] = {
|
||||||
|
&aoe_attr_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct file_operations aoe_debugfs_fops = {
|
static const struct file_operations aoe_debugfs_fops = {
|
||||||
.open = aoe_debugfs_open,
|
.open = aoe_debugfs_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
@@ -219,17 +224,6 @@ aoedisk_rm_debugfs(struct aoedev *d)
|
|||||||
d->debugfs = NULL;
|
d->debugfs = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
aoedisk_add_sysfs(struct aoedev *d)
|
|
||||||
{
|
|
||||||
return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
aoedisk_rm_sysfs(struct aoedev *d)
|
|
||||||
{
|
|
||||||
sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
aoeblk_open(struct block_device *bdev, fmode_t mode)
|
aoeblk_open(struct block_device *bdev, fmode_t mode)
|
||||||
{
|
{
|
||||||
@@ -417,8 +411,7 @@ aoeblk_gdalloc(void *vp)
|
|||||||
|
|
||||||
spin_unlock_irqrestore(&d->lock, flags);
|
spin_unlock_irqrestore(&d->lock, flags);
|
||||||
|
|
||||||
add_disk(gd);
|
device_add_disk(NULL, gd, aoe_attr_groups);
|
||||||
aoedisk_add_sysfs(d);
|
|
||||||
aoedisk_add_debugfs(d);
|
aoedisk_add_debugfs(d);
|
||||||
|
|
||||||
spin_lock_irqsave(&d->lock, flags);
|
spin_lock_irqsave(&d->lock, flags);
|
||||||
|
|||||||
@@ -275,7 +275,6 @@ freedev(struct aoedev *d)
|
|||||||
del_timer_sync(&d->timer);
|
del_timer_sync(&d->timer);
|
||||||
if (d->gd) {
|
if (d->gd) {
|
||||||
aoedisk_rm_debugfs(d);
|
aoedisk_rm_debugfs(d);
|
||||||
aoedisk_rm_sysfs(d);
|
|
||||||
del_gendisk(d->gd);
|
del_gendisk(d->gd);
|
||||||
put_disk(d->gd);
|
put_disk(d->gd);
|
||||||
blk_cleanup_queue(d->blkq);
|
blk_cleanup_queue(d->blkq);
|
||||||
|
|||||||
@@ -4714,7 +4714,7 @@ static int __init do_floppy_init(void)
|
|||||||
/* to be cleaned up... */
|
/* to be cleaned up... */
|
||||||
disks[drive]->private_data = (void *)(long)drive;
|
disks[drive]->private_data = (void *)(long)drive;
|
||||||
disks[drive]->flags |= GENHD_FL_REMOVABLE;
|
disks[drive]->flags |= GENHD_FL_REMOVABLE;
|
||||||
device_add_disk(&floppy_device[drive].dev, disks[drive]);
|
device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -3861,7 +3861,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||||||
set_capacity(dd->disk, capacity);
|
set_capacity(dd->disk, capacity);
|
||||||
|
|
||||||
/* Enable the block device and add it to /dev */
|
/* Enable the block device and add it to /dev */
|
||||||
device_add_disk(&dd->pdev->dev, dd->disk);
|
device_add_disk(&dd->pdev->dev, dd->disk, NULL);
|
||||||
|
|
||||||
dd->bdev = bdget_disk(dd->disk, 0);
|
dd->bdev = bdget_disk(dd->disk, 0);
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -499,7 +499,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
|
|||||||
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
|
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
|
||||||
get_capacity(gendisk) >> 11);
|
get_capacity(gendisk) >> 11);
|
||||||
|
|
||||||
device_add_disk(&dev->sbd.core, gendisk);
|
device_add_disk(&dev->sbd.core, gendisk, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_cleanup_queue:
|
fail_cleanup_queue:
|
||||||
|
|||||||
@@ -769,7 +769,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
|
|||||||
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
|
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
|
||||||
gendisk->disk_name, get_capacity(gendisk) >> 11);
|
gendisk->disk_name, get_capacity(gendisk) >> 11);
|
||||||
|
|
||||||
device_add_disk(&dev->core, gendisk);
|
device_add_disk(&dev->core, gendisk, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_cleanup_queue:
|
fail_cleanup_queue:
|
||||||
|
|||||||
@@ -179,15 +179,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
|
|||||||
{
|
{
|
||||||
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
|
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
|
||||||
char *buf;
|
char *buf;
|
||||||
ssize_t st;
|
int st;
|
||||||
|
|
||||||
buf = kzalloc(cnt, GFP_KERNEL);
|
buf = kzalloc(cnt, GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
|
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
|
||||||
if (!st)
|
if (!st) {
|
||||||
st = copy_to_user(ubuf, buf, cnt);
|
if (copy_to_user(ubuf, buf, cnt))
|
||||||
|
st = -EFAULT;
|
||||||
|
}
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
if (st)
|
if (st)
|
||||||
return st;
|
return st;
|
||||||
|
|||||||
@@ -226,7 +226,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
|
|||||||
set_capacity(card->gendisk, card->size8 >> 9);
|
set_capacity(card->gendisk, card->size8 >> 9);
|
||||||
else
|
else
|
||||||
set_capacity(card->gendisk, 0);
|
set_capacity(card->gendisk, 0);
|
||||||
device_add_disk(CARD_TO_DEV(card), card->gendisk);
|
device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
|
||||||
card->bdev_attached = 1;
|
card->bdev_attached = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3104,7 +3104,7 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|||||||
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
|
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
|
||||||
{
|
{
|
||||||
dev_dbg(&skdev->pdev->dev, "add_disk\n");
|
dev_dbg(&skdev->pdev->dev, "add_disk\n");
|
||||||
device_add_disk(parent, skdev->disk);
|
device_add_disk(parent, skdev->disk, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -862,7 +862,7 @@ static int probe_disk(struct vdc_port *port)
|
|||||||
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
|
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
|
||||||
port->vio.ver.major, port->vio.ver.minor);
|
port->vio.ver.major, port->vio.ver.minor);
|
||||||
|
|
||||||
device_add_disk(&port->vio.vdev->dev, g);
|
device_add_disk(&port->vio.vdev->dev, g, NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -423,7 +423,7 @@ static int minor_to_index(int minor)
|
|||||||
return minor >> PART_BITS;
|
return minor >> PART_BITS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t virtblk_serial_show(struct device *dev,
|
static ssize_t serial_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct gendisk *disk = dev_to_disk(dev);
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
@@ -443,7 +443,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
|
static DEVICE_ATTR_RO(serial);
|
||||||
|
|
||||||
/* The queue's logical block size must be set before calling this */
|
/* The queue's logical block size must be set before calling this */
|
||||||
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
|
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
|
||||||
@@ -619,7 +619,7 @@ static const char *const virtblk_cache_types[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
|
cache_type_store(struct device *dev, struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gendisk *disk = dev_to_disk(dev);
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
@@ -638,8 +638,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
|
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
char *buf)
|
|
||||||
{
|
{
|
||||||
struct gendisk *disk = dev_to_disk(dev);
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
struct virtio_blk *vblk = disk->private_data;
|
struct virtio_blk *vblk = disk->private_data;
|
||||||
@@ -649,12 +648,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
|
|||||||
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
|
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct device_attribute dev_attr_cache_type_ro =
|
static DEVICE_ATTR_RW(cache_type);
|
||||||
__ATTR(cache_type, 0444,
|
|
||||||
virtblk_cache_type_show, NULL);
|
static struct attribute *virtblk_attrs[] = {
|
||||||
static const struct device_attribute dev_attr_cache_type_rw =
|
&dev_attr_serial.attr,
|
||||||
__ATTR(cache_type, 0644,
|
&dev_attr_cache_type.attr,
|
||||||
virtblk_cache_type_show, virtblk_cache_type_store);
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
|
||||||
|
struct attribute *a, int n)
|
||||||
|
{
|
||||||
|
struct device *dev = container_of(kobj, struct device, kobj);
|
||||||
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
|
struct virtio_blk *vblk = disk->private_data;
|
||||||
|
struct virtio_device *vdev = vblk->vdev;
|
||||||
|
|
||||||
|
if (a == &dev_attr_cache_type.attr &&
|
||||||
|
!virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
|
||||||
|
return S_IRUGO;
|
||||||
|
|
||||||
|
return a->mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct attribute_group virtblk_attr_group = {
|
||||||
|
.attrs = virtblk_attrs,
|
||||||
|
.is_visible = virtblk_attrs_are_visible,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group *virtblk_attr_groups[] = {
|
||||||
|
&virtblk_attr_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||||
unsigned int hctx_idx, unsigned int numa_node)
|
unsigned int hctx_idx, unsigned int numa_node)
|
||||||
@@ -858,24 +883,9 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||||||
virtblk_update_capacity(vblk, false);
|
virtblk_update_capacity(vblk, false);
|
||||||
virtio_device_ready(vdev);
|
virtio_device_ready(vdev);
|
||||||
|
|
||||||
device_add_disk(&vdev->dev, vblk->disk);
|
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
|
||||||
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
|
|
||||||
if (err)
|
|
||||||
goto out_del_disk;
|
|
||||||
|
|
||||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
|
|
||||||
err = device_create_file(disk_to_dev(vblk->disk),
|
|
||||||
&dev_attr_cache_type_rw);
|
|
||||||
else
|
|
||||||
err = device_create_file(disk_to_dev(vblk->disk),
|
|
||||||
&dev_attr_cache_type_ro);
|
|
||||||
if (err)
|
|
||||||
goto out_del_disk;
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_del_disk:
|
|
||||||
del_gendisk(vblk->disk);
|
|
||||||
blk_cleanup_queue(vblk->disk->queue);
|
|
||||||
out_free_tags:
|
out_free_tags:
|
||||||
blk_mq_free_tag_set(&vblk->tag_set);
|
blk_mq_free_tag_set(&vblk->tag_set);
|
||||||
out_put_disk:
|
out_put_disk:
|
||||||
|
|||||||
@@ -2422,7 +2422,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||||||
for (i = 0; i < info->nr_rings; i++)
|
for (i = 0; i < info->nr_rings; i++)
|
||||||
kick_pending_request_queues(&info->rinfo[i]);
|
kick_pending_request_queues(&info->rinfo[i]);
|
||||||
|
|
||||||
device_add_disk(&info->xbdev->dev, info->gd);
|
device_add_disk(&info->xbdev->dev, info->gd, NULL);
|
||||||
|
|
||||||
info->is_ready = 1;
|
info->is_ready = 1;
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -1971,8 +1971,7 @@ static int zram_add(void)
|
|||||||
|
|
||||||
zram->disk->queue->backing_dev_info->capabilities |=
|
zram->disk->queue->backing_dev_info->capabilities |=
|
||||||
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
|
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
|
||||||
disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
|
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
|
||||||
add_disk(zram->disk);
|
|
||||||
|
|
||||||
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
|
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
|
||||||
|
|
||||||
@@ -2008,6 +2007,7 @@ static int zram_remove(struct zram *zram)
|
|||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
|
|
||||||
zram_debugfs_unregister(zram);
|
zram_debugfs_unregister(zram);
|
||||||
|
|
||||||
/* Make sure all the pending I/O are finished */
|
/* Make sure all the pending I/O are finished */
|
||||||
fsync_bdev(bdev);
|
fsync_bdev(bdev);
|
||||||
zram_reset_device(zram);
|
zram_reset_device(zram);
|
||||||
|
|||||||
@@ -239,7 +239,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
|||||||
while (size) {
|
while (size) {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
value = RREG32_PCIE(*pos >> 2);
|
value = RREG32_PCIE(*pos);
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -282,7 +282,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
WREG32_PCIE(*pos >> 2, value);
|
WREG32_PCIE(*pos, value);
|
||||||
|
|
||||||
result += 4;
|
result += 4;
|
||||||
buf += 4;
|
buf += 4;
|
||||||
|
|||||||
@@ -681,8 +681,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
||||||
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
|
|
||||||
|
|
||||||
/* Enable USE_RETENTION_FLOPS */
|
/* Enable USE_RETENTION_FLOPS */
|
||||||
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
|
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
|
||||||
|
|
||||||
|
|||||||
@@ -358,6 +358,7 @@
|
|||||||
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
|
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
|
||||||
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
|
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
|
||||||
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
|
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
|
||||||
|
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846
|
||||||
|
|
||||||
#define USB_VENDOR_ID_DWAV 0x0eef
|
#define USB_VENDOR_ID_DWAV 0x0eef
|
||||||
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
|
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
|
||||||
|
|||||||
@@ -161,6 +161,8 @@ static const struct hid_device_id mf_devices[] = {
|
|||||||
.driver_data = HID_QUIRK_MULTI_INPUT },
|
.driver_data = HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
|
||||||
.driver_data = 0 }, /* No quirk required */
|
.driver_data = 0 }, /* No quirk required */
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3),
|
||||||
|
.driver_data = HID_QUIRK_MULTI_INPUT },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(hid, mf_devices);
|
MODULE_DEVICE_TABLE(hid, mf_devices);
|
||||||
|
|||||||
@@ -74,6 +74,7 @@ static const struct hid_device_id hid_quirks[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
|
||||||
@@ -498,6 +499,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) },
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_HID_MICROSOFT)
|
#if IS_ENABLED(CONFIG_HID_MICROSOFT)
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
|
||||||
|
|||||||
@@ -1784,7 +1784,7 @@ static int ide_cd_probe(ide_drive_t *drive)
|
|||||||
ide_cd_read_toc(drive);
|
ide_cd_read_toc(drive);
|
||||||
g->fops = &idecd_ops;
|
g->fops = &idecd_ops;
|
||||||
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
|
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
|
||||||
device_add_disk(&drive->gendev, g);
|
device_add_disk(&drive->gendev, g, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_disk:
|
out_free_disk:
|
||||||
|
|||||||
@@ -416,7 +416,7 @@ static int ide_gd_probe(ide_drive_t *drive)
|
|||||||
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
|
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
|
||||||
g->flags = GENHD_FL_REMOVABLE;
|
g->flags = GENHD_FL_REMOVABLE;
|
||||||
g->fops = &ide_gd_ops;
|
g->fops = &ide_gd_ops;
|
||||||
device_add_disk(&drive->gendev, g);
|
device_add_disk(&drive->gendev, g, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_disk:
|
out_free_disk:
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ config RDMA_RXE
|
|||||||
depends on INET && PCI && INFINIBAND
|
depends on INET && PCI && INFINIBAND
|
||||||
depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
|
depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
|
||||||
select NET_UDP_TUNNEL
|
select NET_UDP_TUNNEL
|
||||||
|
select CRYPTO
|
||||||
select CRYPTO_CRC32
|
select CRYPTO_CRC32
|
||||||
select DMA_VIRT_OPS
|
select DMA_VIRT_OPS
|
||||||
---help---
|
---help---
|
||||||
|
|||||||
@@ -1348,24 +1348,26 @@ static void increase_address_space(struct protection_domain *domain,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 *pte;
|
u64 *pte;
|
||||||
|
|
||||||
|
pte = (void *)get_zeroed_page(gfp);
|
||||||
|
if (!pte)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&domain->lock, flags);
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
|
|
||||||
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
|
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
|
||||||
/* address space already 64 bit large */
|
/* address space already 64 bit large */
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pte = (void *)get_zeroed_page(gfp);
|
|
||||||
if (!pte)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
*pte = PM_LEVEL_PDE(domain->mode,
|
*pte = PM_LEVEL_PDE(domain->mode,
|
||||||
iommu_virt_to_phys(domain->pt_root));
|
iommu_virt_to_phys(domain->pt_root));
|
||||||
domain->pt_root = pte;
|
domain->pt_root = pte;
|
||||||
domain->mode += 1;
|
domain->mode += 1;
|
||||||
domain->updated = true;
|
domain->updated = true;
|
||||||
|
pte = NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
free_page((unsigned long)pte);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1442,6 +1442,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
|
|||||||
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
|
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
|
||||||
{
|
{
|
||||||
sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
|
sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
|
||||||
|
if (s >= c->start)
|
||||||
|
s -= c->start;
|
||||||
|
else
|
||||||
|
s = 0;
|
||||||
if (likely(c->sectors_per_block_bits >= 0))
|
if (likely(c->sectors_per_block_bits >= 0))
|
||||||
s >>= c->sectors_per_block_bits;
|
s >>= c->sectors_per_block_bits;
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -893,10 +893,10 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_table_set_type);
|
EXPORT_SYMBOL_GPL(dm_table_set_type);
|
||||||
|
|
||||||
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
return bdev_dax_supported(dev->bdev, PAGE_SIZE);
|
return !bdev_dax_supported(dev->bdev, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_supports_dax(struct dm_table *t)
|
static bool dm_table_supports_dax(struct dm_table *t)
|
||||||
@@ -912,7 +912,7 @@ static bool dm_table_supports_dax(struct dm_table *t)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!ti->type->iterate_devices ||
|
if (!ti->type->iterate_devices ||
|
||||||
!ti->type->iterate_devices(ti, device_supports_dax, NULL))
|
ti->type->iterate_devices(ti, device_not_dax_capable, NULL))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1394,6 +1394,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
|
|||||||
return &t->targets[(KEYS_PER_NODE * n) + k];
|
return &t->targets[(KEYS_PER_NODE * n) + k];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* type->iterate_devices() should be called when the sanity check needs to
|
||||||
|
* iterate and check all underlying data devices. iterate_devices() will
|
||||||
|
* iterate all underlying data devices until it encounters a non-zero return
|
||||||
|
* code, returned by whether the input iterate_devices_callout_fn, or
|
||||||
|
* iterate_devices() itself internally.
|
||||||
|
*
|
||||||
|
* For some target type (e.g. dm-stripe), one call of iterate_devices() may
|
||||||
|
* iterate multiple underlying devices internally, in which case a non-zero
|
||||||
|
* return code returned by iterate_devices_callout_fn will stop the iteration
|
||||||
|
* in advance.
|
||||||
|
*
|
||||||
|
* Cases requiring _any_ underlying device supporting some kind of attribute,
|
||||||
|
* should use the iteration structure like dm_table_any_dev_attr(), or call
|
||||||
|
* it directly. @func should handle semantics of positive examples, e.g.
|
||||||
|
* capable of something.
|
||||||
|
*
|
||||||
|
* Cases requiring _all_ underlying devices supporting some kind of attribute,
|
||||||
|
* should use the iteration structure like dm_table_supports_nowait() or
|
||||||
|
* dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
|
||||||
|
* uses an @anti_func that handle semantics of counter examples, e.g. not
|
||||||
|
* capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
|
||||||
|
*/
|
||||||
|
static bool dm_table_any_dev_attr(struct dm_table *t,
|
||||||
|
iterate_devices_callout_fn func, void *data)
|
||||||
|
{
|
||||||
|
struct dm_target *ti;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||||
|
ti = dm_table_get_target(t, i);
|
||||||
|
|
||||||
|
if (ti->type->iterate_devices &&
|
||||||
|
ti->type->iterate_devices(ti, func, data))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
@@ -1430,13 +1470,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
enum blk_zoned_model *zoned_model = data;
|
enum blk_zoned_model *zoned_model = data;
|
||||||
|
|
||||||
return q && blk_queue_zoned_model(q) == *zoned_model;
|
return !q || blk_queue_zoned_model(q) != *zoned_model;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_supports_zoned_model(struct dm_table *t,
|
static bool dm_table_supports_zoned_model(struct dm_table *t,
|
||||||
@@ -1453,37 +1493,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!ti->type->iterate_devices ||
|
if (!ti->type->iterate_devices ||
|
||||||
!ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
|
ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
|
static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
unsigned int *zone_sectors = data;
|
unsigned int *zone_sectors = data;
|
||||||
|
|
||||||
return q && blk_queue_zone_sectors(q) == *zone_sectors;
|
return !q || blk_queue_zone_sectors(q) != *zone_sectors;
|
||||||
}
|
|
||||||
|
|
||||||
static bool dm_table_matches_zone_sectors(struct dm_table *t,
|
|
||||||
unsigned int zone_sectors)
|
|
||||||
{
|
|
||||||
struct dm_target *ti;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
|
||||||
ti = dm_table_get_target(t, i);
|
|
||||||
|
|
||||||
if (!ti->type->iterate_devices ||
|
|
||||||
!ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int validate_hardware_zoned_model(struct dm_table *table,
|
static int validate_hardware_zoned_model(struct dm_table *table,
|
||||||
@@ -1503,7 +1526,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
|
|||||||
if (!zone_sectors || !is_power_of_2(zone_sectors))
|
if (!zone_sectors || !is_power_of_2(zone_sectors))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
|
if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
|
||||||
DMERR("%s: zone sectors is not consistent across all devices",
|
DMERR("%s: zone sectors is not consistent across all devices",
|
||||||
dm_device_name(table->md));
|
dm_device_name(table->md));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1741,29 +1764,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_table_supports_dax_write_cache(struct dm_table *t)
|
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
|
||||||
{
|
|
||||||
struct dm_target *ti;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
|
||||||
ti = dm_table_get_target(t, i);
|
|
||||||
|
|
||||||
if (ti->type->iterate_devices &&
|
|
||||||
ti->type->iterate_devices(ti,
|
|
||||||
device_dax_write_cache_enabled, NULL))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
|
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
|
|
||||||
return q && blk_queue_nonrot(q);
|
return q && !blk_queue_nonrot(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
|
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
|
||||||
@@ -1774,43 +1780,26 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
|
|||||||
return q && !blk_queue_add_random(q);
|
return q && !blk_queue_add_random(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
|
static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
|
|
||||||
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
|
return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_all_devices_attribute(struct dm_table *t,
|
static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
|
||||||
iterate_devices_callout_fn func)
|
|
||||||
{
|
|
||||||
struct dm_target *ti;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
|
||||||
ti = dm_table_get_target(t, i);
|
|
||||||
|
|
||||||
if (!ti->type->iterate_devices ||
|
|
||||||
!ti->type->iterate_devices(ti, func, NULL))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
|
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
/* For now, NVMe devices are the only devices of this class */
|
/* For now, NVMe devices are the only devices of this class */
|
||||||
return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
|
return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
|
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
|
||||||
{
|
{
|
||||||
return dm_table_all_devices_attribute(t, device_no_partial_completion);
|
return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
|
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||||
@@ -1937,27 +1926,6 @@ static int device_requires_stable_pages(struct dm_target *ti,
|
|||||||
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
|
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If any underlying device requires stable pages, a table must require
|
|
||||||
* them as well. Only targets that support iterate_devices are considered:
|
|
||||||
* don't want error, zero, etc to require stable pages.
|
|
||||||
*/
|
|
||||||
static bool dm_table_requires_stable_pages(struct dm_table *t)
|
|
||||||
{
|
|
||||||
struct dm_target *ti;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
|
||||||
ti = dm_table_get_target(t, i);
|
|
||||||
|
|
||||||
if (ti->type->iterate_devices &&
|
|
||||||
ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
struct queue_limits *limits)
|
struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
@@ -1994,24 +1962,24 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
else
|
else
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
|
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
|
||||||
|
|
||||||
if (dm_table_supports_dax_write_cache(t))
|
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
|
||||||
dax_write_cache(t->md->dax_dev, true);
|
dax_write_cache(t->md->dax_dev, true);
|
||||||
|
|
||||||
/* Ensure that all underlying devices are non-rotational. */
|
/* Ensure that all underlying devices are non-rotational. */
|
||||||
if (dm_table_all_devices_attribute(t, device_is_nonrot))
|
if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
|
||||||
else
|
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||||
|
else
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||||
|
|
||||||
if (!dm_table_supports_write_same(t))
|
if (!dm_table_supports_write_same(t))
|
||||||
q->limits.max_write_same_sectors = 0;
|
q->limits.max_write_same_sectors = 0;
|
||||||
if (!dm_table_supports_write_zeroes(t))
|
if (!dm_table_supports_write_zeroes(t))
|
||||||
q->limits.max_write_zeroes_sectors = 0;
|
q->limits.max_write_zeroes_sectors = 0;
|
||||||
|
|
||||||
if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
|
if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL))
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
|
|
||||||
else
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
|
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
|
||||||
|
else
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
|
||||||
|
|
||||||
dm_table_verify_integrity(t);
|
dm_table_verify_integrity(t);
|
||||||
|
|
||||||
@@ -2020,8 +1988,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
/*
|
/*
|
||||||
* Some devices don't use blk_integrity but still want stable pages
|
* Some devices don't use blk_integrity but still want stable pages
|
||||||
* because they do their own checksumming.
|
* because they do their own checksumming.
|
||||||
|
* If any underlying device requires stable pages, a table must require
|
||||||
|
* them as well. Only targets that support iterate_devices are considered:
|
||||||
|
* don't want error, zero, etc to require stable pages.
|
||||||
*/
|
*/
|
||||||
if (dm_table_requires_stable_pages(t))
|
if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
|
||||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||||
else
|
else
|
||||||
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||||
@@ -2032,7 +2003,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
|
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
|
||||||
* have it set.
|
* have it set.
|
||||||
*/
|
*/
|
||||||
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
|
if (blk_queue_add_random(q) &&
|
||||||
|
dm_table_any_dev_attr(t, device_is_not_random, NULL))
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
|
||||||
|
|
||||||
/* io_pages is used for readahead */
|
/* io_pages is used for readahead */
|
||||||
|
|||||||
@@ -65,19 +65,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
|||||||
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
|
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
|
||||||
unsigned *offset, struct dm_buffer **buf)
|
unsigned *offset, struct dm_buffer **buf)
|
||||||
{
|
{
|
||||||
u64 position, block;
|
u64 position, block, rem;
|
||||||
u8 *res;
|
u8 *res;
|
||||||
|
|
||||||
position = (index + rsb) * v->fec->roots;
|
position = (index + rsb) * v->fec->roots;
|
||||||
block = position >> v->data_dev_block_bits;
|
block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
|
||||||
*offset = (unsigned)(position - (block << v->data_dev_block_bits));
|
*offset = (unsigned)rem;
|
||||||
|
|
||||||
res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
|
res = dm_bufio_read(v->fec->bufio, block, buf);
|
||||||
if (unlikely(IS_ERR(res))) {
|
if (unlikely(IS_ERR(res))) {
|
||||||
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
|
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
|
||||||
v->data_dev->name, (unsigned long long)rsb,
|
v->data_dev->name, (unsigned long long)rsb,
|
||||||
(unsigned long long)(v->fec->start + block),
|
(unsigned long long)block, PTR_ERR(res));
|
||||||
PTR_ERR(res));
|
|
||||||
*buf = NULL;
|
*buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,7 +158,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
|||||||
|
|
||||||
/* read the next block when we run out of parity bytes */
|
/* read the next block when we run out of parity bytes */
|
||||||
offset += v->fec->roots;
|
offset += v->fec->roots;
|
||||||
if (offset >= 1 << v->data_dev_block_bits) {
|
if (offset >= v->fec->roots << SECTOR_SHIFT) {
|
||||||
dm_bufio_release(buf);
|
dm_bufio_release(buf);
|
||||||
|
|
||||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
|
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
|
||||||
@@ -675,7 +674,7 @@ int verity_fec_ctr(struct dm_verity *v)
|
|||||||
{
|
{
|
||||||
struct dm_verity_fec *f = v->fec;
|
struct dm_verity_fec *f = v->fec;
|
||||||
struct dm_target *ti = v->ti;
|
struct dm_target *ti = v->ti;
|
||||||
u64 hash_blocks;
|
u64 hash_blocks, fec_blocks;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!verity_fec_is_enabled(v)) {
|
if (!verity_fec_is_enabled(v)) {
|
||||||
@@ -745,15 +744,17 @@ int verity_fec_ctr(struct dm_verity *v)
|
|||||||
}
|
}
|
||||||
|
|
||||||
f->bufio = dm_bufio_client_create(f->dev->bdev,
|
f->bufio = dm_bufio_client_create(f->dev->bdev,
|
||||||
1 << v->data_dev_block_bits,
|
f->roots << SECTOR_SHIFT,
|
||||||
1, 0, NULL, NULL);
|
1, 0, NULL, NULL);
|
||||||
if (IS_ERR(f->bufio)) {
|
if (IS_ERR(f->bufio)) {
|
||||||
ti->error = "Cannot initialize FEC bufio client";
|
ti->error = "Cannot initialize FEC bufio client";
|
||||||
return PTR_ERR(f->bufio);
|
return PTR_ERR(f->bufio);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dm_bufio_get_device_size(f->bufio) <
|
dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
|
||||||
((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
|
|
||||||
|
fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
|
||||||
|
if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
|
||||||
ti->error = "FEC device is too small";
|
ti->error = "FEC device is too small";
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2084,6 +2084,10 @@ static struct {
|
|||||||
* 0x1451 is PCI ID for the IOMMU found on Ryzen
|
* 0x1451 is PCI ID for the IOMMU found on Ryzen
|
||||||
*/
|
*/
|
||||||
{ PCI_VENDOR_ID_AMD, 0x1451 },
|
{ PCI_VENDOR_ID_AMD, 0x1451 },
|
||||||
|
/* According to sudo lspci -nn,
|
||||||
|
* 0x1423 is the PCI ID for the IOMMU found on Kaveri
|
||||||
|
*/
|
||||||
|
{ PCI_VENDOR_ID_AMD, 0x1423 },
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool cx23885_does_need_dma_reset(void)
|
static bool cx23885_does_need_dma_reset(void)
|
||||||
|
|||||||
@@ -2146,7 +2146,7 @@ static int msb_init_disk(struct memstick_dev *card)
|
|||||||
set_disk_ro(msb->disk, 1);
|
set_disk_ro(msb->disk, 1);
|
||||||
|
|
||||||
msb_start(card);
|
msb_start(card);
|
||||||
device_add_disk(&card->dev, msb->disk);
|
device_add_disk(&card->dev, msb->disk, NULL);
|
||||||
dbg("Disk added");
|
dbg("Disk added");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@@ -1236,7 +1236,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
|
|||||||
set_capacity(msb->disk, capacity);
|
set_capacity(msb->disk, capacity);
|
||||||
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
|
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
|
||||||
|
|
||||||
device_add_disk(&card->dev, msb->disk);
|
device_add_disk(&card->dev, msb->disk, NULL);
|
||||||
msb->active = 1;
|
msb->active = 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@@ -38,6 +38,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
|
|||||||
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
|
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
|
||||||
|
.quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
|
||||||
|
};
|
||||||
|
|
||||||
struct eeprom_93xx46_dev {
|
struct eeprom_93xx46_dev {
|
||||||
struct spi_device *spi;
|
struct spi_device *spi;
|
||||||
struct eeprom_93xx46_platform_data *pdata;
|
struct eeprom_93xx46_platform_data *pdata;
|
||||||
@@ -58,6 +62,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
|
|||||||
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
|
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
|
||||||
|
{
|
||||||
|
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
|
||||||
|
}
|
||||||
|
|
||||||
static int eeprom_93xx46_read(void *priv, unsigned int off,
|
static int eeprom_93xx46_read(void *priv, unsigned int off,
|
||||||
void *val, size_t count)
|
void *val, size_t count)
|
||||||
{
|
{
|
||||||
@@ -99,6 +108,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
|
|||||||
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
|
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
|
||||||
cmd_addr, edev->spi->max_speed_hz);
|
cmd_addr, edev->spi->max_speed_hz);
|
||||||
|
|
||||||
|
if (has_quirk_extra_read_cycle(edev)) {
|
||||||
|
cmd_addr <<= 1;
|
||||||
|
bits += 1;
|
||||||
|
}
|
||||||
|
|
||||||
spi_message_init(&m);
|
spi_message_init(&m);
|
||||||
|
|
||||||
t[0].tx_buf = (char *)&cmd_addr;
|
t[0].tx_buf = (char *)&cmd_addr;
|
||||||
@@ -366,6 +380,7 @@ static void select_deassert(void *context)
|
|||||||
static const struct of_device_id eeprom_93xx46_of_table[] = {
|
static const struct of_device_id eeprom_93xx46_of_table[] = {
|
||||||
{ .compatible = "eeprom-93xx46", },
|
{ .compatible = "eeprom-93xx46", },
|
||||||
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
|
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
|
||||||
|
{ .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
|
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
|
||||||
|
|||||||
@@ -2674,7 +2674,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
|
|||||||
int ret;
|
int ret;
|
||||||
struct mmc_card *card = md->queue.card;
|
struct mmc_card *card = md->queue.card;
|
||||||
|
|
||||||
device_add_disk(md->parent, md->disk);
|
device_add_disk(md->parent, md->disk, NULL);
|
||||||
md->force_ro.show = force_ro_show;
|
md->force_ro.show = force_ro_show;
|
||||||
md->force_ro.store = force_ro_store;
|
md->force_ro.store = force_ro_store;
|
||||||
sysfs_attr_init(&md->force_ro.attr);
|
sysfs_attr_init(&md->force_ro.attr);
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
|
|||||||
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
|
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
|
||||||
.ops = &sdhci_dwcmshc_ops,
|
.ops = &sdhci_dwcmshc_ops,
|
||||||
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
||||||
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int dwcmshc_probe(struct platform_device *pdev)
|
static int dwcmshc_probe(struct platform_device *pdev)
|
||||||
|
|||||||
@@ -447,7 +447,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
|
|||||||
if (new->readonly)
|
if (new->readonly)
|
||||||
set_disk_ro(gd, 1);
|
set_disk_ro(gd, 1);
|
||||||
|
|
||||||
device_add_disk(&new->mtd->dev, gd);
|
device_add_disk(&new->mtd->dev, gd, NULL);
|
||||||
|
|
||||||
if (new->disk_attributes) {
|
if (new->disk_attributes) {
|
||||||
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
|
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
|
||||||
|
|||||||
@@ -4238,6 +4238,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
|
|||||||
|
|
||||||
switch (tp->mac_version) {
|
switch (tp->mac_version) {
|
||||||
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
|
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
|
||||||
|
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
|
||||||
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
|
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
|
||||||
case RTL_GIGA_MAC_VER_37:
|
case RTL_GIGA_MAC_VER_37:
|
||||||
case RTL_GIGA_MAC_VER_39:
|
case RTL_GIGA_MAC_VER_39:
|
||||||
@@ -4265,6 +4266,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
|
|||||||
{
|
{
|
||||||
switch (tp->mac_version) {
|
switch (tp->mac_version) {
|
||||||
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
|
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
|
||||||
|
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
|
||||||
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
|
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
|
||||||
case RTL_GIGA_MAC_VER_37:
|
case RTL_GIGA_MAC_VER_37:
|
||||||
case RTL_GIGA_MAC_VER_39:
|
case RTL_GIGA_MAC_VER_39:
|
||||||
|
|||||||
@@ -381,6 +381,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
|
|||||||
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
|
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
|
||||||
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
|
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
|
||||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||||
|
|
||||||
|
card->pci_reset_ongoing = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -409,6 +411,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
|
|||||||
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
|
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
|
||||||
else
|
else
|
||||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||||
|
|
||||||
|
card->pci_reset_ongoing = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pci_error_handlers mwifiex_pcie_err_handler = {
|
static const struct pci_error_handlers mwifiex_pcie_err_handler = {
|
||||||
@@ -3000,7 +3004,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
|
|||||||
int ret;
|
int ret;
|
||||||
u32 fw_status;
|
u32 fw_status;
|
||||||
|
|
||||||
|
/* Perform the cancel_work_sync() only when we're not resetting
|
||||||
|
* the card. It's because that function never returns if we're
|
||||||
|
* in reset path. If we're here when resetting the card, it means
|
||||||
|
* that we failed to reset the card (reset failure path).
|
||||||
|
*/
|
||||||
|
if (!card->pci_reset_ongoing) {
|
||||||
|
mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
|
||||||
cancel_work_sync(&card->work);
|
cancel_work_sync(&card->work);
|
||||||
|
mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
|
||||||
|
} else {
|
||||||
|
mwifiex_dbg(adapter, MSG,
|
||||||
|
"skipped cancel_work_sync() because we're in card reset failure path\n");
|
||||||
|
}
|
||||||
|
|
||||||
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
|
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
|
||||||
if (fw_status == FIRMWARE_READY_PCIE) {
|
if (fw_status == FIRMWARE_READY_PCIE) {
|
||||||
|
|||||||
@@ -391,6 +391,8 @@ struct pcie_service_card {
|
|||||||
struct mwifiex_msix_context share_irq_ctx;
|
struct mwifiex_msix_context share_irq_ctx;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned long work_flags;
|
unsigned long work_flags;
|
||||||
|
|
||||||
|
bool pci_reset_ongoing;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
|||||||
@@ -290,7 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
|
|||||||
}
|
}
|
||||||
|
|
||||||
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
|
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
|
||||||
device_add_disk(dev, disk);
|
device_add_disk(dev, disk, NULL);
|
||||||
revalidate_disk(disk);
|
revalidate_disk(disk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1565,7 +1565,7 @@ static int btt_blk_init(struct btt *btt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
|
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
|
||||||
device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
|
device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
|
||||||
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
|
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
|
||||||
revalidate_disk(btt->btt_disk);
|
revalidate_disk(btt->btt_disk);
|
||||||
|
|
||||||
|
|||||||
@@ -482,7 +482,7 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
gendev = disk_to_dev(disk);
|
gendev = disk_to_dev(disk);
|
||||||
gendev->groups = pmem_attribute_groups;
|
gendev->groups = pmem_attribute_groups;
|
||||||
|
|
||||||
device_add_disk(dev, disk);
|
device_add_disk(dev, disk, NULL);
|
||||||
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
|
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|||||||
@@ -2842,6 +2842,14 @@ const struct attribute_group nvme_ns_id_attr_group = {
|
|||||||
.is_visible = nvme_ns_id_attrs_are_visible,
|
.is_visible = nvme_ns_id_attrs_are_visible,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *nvme_ns_id_attr_groups[] = {
|
||||||
|
&nvme_ns_id_attr_group,
|
||||||
|
#ifdef CONFIG_NVM
|
||||||
|
&nvme_nvm_attr_group,
|
||||||
|
#endif
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
#define nvme_show_str_function(field) \
|
#define nvme_show_str_function(field) \
|
||||||
static ssize_t field##_show(struct device *dev, \
|
static ssize_t field##_show(struct device *dev, \
|
||||||
struct device_attribute *attr, char *buf) \
|
struct device_attribute *attr, char *buf) \
|
||||||
@@ -3211,14 +3219,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||||||
|
|
||||||
nvme_get_ctrl(ctrl);
|
nvme_get_ctrl(ctrl);
|
||||||
|
|
||||||
device_add_disk(ctrl->device, ns->disk);
|
device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
|
||||||
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
|
|
||||||
&nvme_ns_id_attr_group))
|
|
||||||
pr_warn("%s: failed to create sysfs group for identification\n",
|
|
||||||
ns->disk->disk_name);
|
|
||||||
if (ns->ndev && nvme_nvm_register_sysfs(ns))
|
|
||||||
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
|
|
||||||
ns->disk->disk_name);
|
|
||||||
|
|
||||||
nvme_mpath_add_disk(ns, id);
|
nvme_mpath_add_disk(ns, id);
|
||||||
nvme_fault_inject_init(ns);
|
nvme_fault_inject_init(ns);
|
||||||
@@ -3252,10 +3253,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||||||
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
|
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
|
||||||
|
|
||||||
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
|
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
|
||||||
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
|
|
||||||
&nvme_ns_id_attr_group);
|
|
||||||
if (ns->ndev)
|
|
||||||
nvme_nvm_unregister_sysfs(ns);
|
|
||||||
del_gendisk(ns->disk);
|
del_gendisk(ns->disk);
|
||||||
blk_cleanup_queue(ns->queue);
|
blk_cleanup_queue(ns->queue);
|
||||||
if (blk_get_integrity(ns->disk))
|
if (blk_get_integrity(ns->disk))
|
||||||
|
|||||||
@@ -1193,42 +1193,6 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes);
|
|||||||
static NVM_DEV_ATTR_12_RO(media_capabilities);
|
static NVM_DEV_ATTR_12_RO(media_capabilities);
|
||||||
static NVM_DEV_ATTR_12_RO(max_phys_secs);
|
static NVM_DEV_ATTR_12_RO(max_phys_secs);
|
||||||
|
|
||||||
static struct attribute *nvm_dev_attrs_12[] = {
|
|
||||||
&dev_attr_version.attr,
|
|
||||||
&dev_attr_capabilities.attr,
|
|
||||||
|
|
||||||
&dev_attr_vendor_opcode.attr,
|
|
||||||
&dev_attr_device_mode.attr,
|
|
||||||
&dev_attr_media_manager.attr,
|
|
||||||
&dev_attr_ppa_format.attr,
|
|
||||||
&dev_attr_media_type.attr,
|
|
||||||
&dev_attr_flash_media_type.attr,
|
|
||||||
&dev_attr_num_channels.attr,
|
|
||||||
&dev_attr_num_luns.attr,
|
|
||||||
&dev_attr_num_planes.attr,
|
|
||||||
&dev_attr_num_blocks.attr,
|
|
||||||
&dev_attr_num_pages.attr,
|
|
||||||
&dev_attr_page_size.attr,
|
|
||||||
&dev_attr_hw_sector_size.attr,
|
|
||||||
&dev_attr_oob_sector_size.attr,
|
|
||||||
&dev_attr_read_typ.attr,
|
|
||||||
&dev_attr_read_max.attr,
|
|
||||||
&dev_attr_prog_typ.attr,
|
|
||||||
&dev_attr_prog_max.attr,
|
|
||||||
&dev_attr_erase_typ.attr,
|
|
||||||
&dev_attr_erase_max.attr,
|
|
||||||
&dev_attr_multiplane_modes.attr,
|
|
||||||
&dev_attr_media_capabilities.attr,
|
|
||||||
&dev_attr_max_phys_secs.attr,
|
|
||||||
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct attribute_group nvm_dev_attr_group_12 = {
|
|
||||||
.name = "lightnvm",
|
|
||||||
.attrs = nvm_dev_attrs_12,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* 2.0 values */
|
/* 2.0 values */
|
||||||
static NVM_DEV_ATTR_20_RO(groups);
|
static NVM_DEV_ATTR_20_RO(groups);
|
||||||
static NVM_DEV_ATTR_20_RO(punits);
|
static NVM_DEV_ATTR_20_RO(punits);
|
||||||
@@ -1244,10 +1208,37 @@ static NVM_DEV_ATTR_20_RO(write_max);
|
|||||||
static NVM_DEV_ATTR_20_RO(reset_typ);
|
static NVM_DEV_ATTR_20_RO(reset_typ);
|
||||||
static NVM_DEV_ATTR_20_RO(reset_max);
|
static NVM_DEV_ATTR_20_RO(reset_max);
|
||||||
|
|
||||||
static struct attribute *nvm_dev_attrs_20[] = {
|
static struct attribute *nvm_dev_attrs[] = {
|
||||||
|
/* version agnostic attrs */
|
||||||
&dev_attr_version.attr,
|
&dev_attr_version.attr,
|
||||||
&dev_attr_capabilities.attr,
|
&dev_attr_capabilities.attr,
|
||||||
|
&dev_attr_read_typ.attr,
|
||||||
|
&dev_attr_read_max.attr,
|
||||||
|
|
||||||
|
/* 1.2 attrs */
|
||||||
|
&dev_attr_vendor_opcode.attr,
|
||||||
|
&dev_attr_device_mode.attr,
|
||||||
|
&dev_attr_media_manager.attr,
|
||||||
|
&dev_attr_ppa_format.attr,
|
||||||
|
&dev_attr_media_type.attr,
|
||||||
|
&dev_attr_flash_media_type.attr,
|
||||||
|
&dev_attr_num_channels.attr,
|
||||||
|
&dev_attr_num_luns.attr,
|
||||||
|
&dev_attr_num_planes.attr,
|
||||||
|
&dev_attr_num_blocks.attr,
|
||||||
|
&dev_attr_num_pages.attr,
|
||||||
|
&dev_attr_page_size.attr,
|
||||||
|
&dev_attr_hw_sector_size.attr,
|
||||||
|
&dev_attr_oob_sector_size.attr,
|
||||||
|
&dev_attr_prog_typ.attr,
|
||||||
|
&dev_attr_prog_max.attr,
|
||||||
|
&dev_attr_erase_typ.attr,
|
||||||
|
&dev_attr_erase_max.attr,
|
||||||
|
&dev_attr_multiplane_modes.attr,
|
||||||
|
&dev_attr_media_capabilities.attr,
|
||||||
|
&dev_attr_max_phys_secs.attr,
|
||||||
|
|
||||||
|
/* 2.0 attrs */
|
||||||
&dev_attr_groups.attr,
|
&dev_attr_groups.attr,
|
||||||
&dev_attr_punits.attr,
|
&dev_attr_punits.attr,
|
||||||
&dev_attr_chunks.attr,
|
&dev_attr_chunks.attr,
|
||||||
@@ -1258,8 +1249,6 @@ static struct attribute *nvm_dev_attrs_20[] = {
|
|||||||
&dev_attr_maxocpu.attr,
|
&dev_attr_maxocpu.attr,
|
||||||
&dev_attr_mw_cunits.attr,
|
&dev_attr_mw_cunits.attr,
|
||||||
|
|
||||||
&dev_attr_read_typ.attr,
|
|
||||||
&dev_attr_read_max.attr,
|
|
||||||
&dev_attr_write_typ.attr,
|
&dev_attr_write_typ.attr,
|
||||||
&dev_attr_write_max.attr,
|
&dev_attr_write_max.attr,
|
||||||
&dev_attr_reset_typ.attr,
|
&dev_attr_reset_typ.attr,
|
||||||
@@ -1268,44 +1257,38 @@ static struct attribute *nvm_dev_attrs_20[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct attribute_group nvm_dev_attr_group_20 = {
|
static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
|
||||||
.name = "lightnvm",
|
struct attribute *attr, int index)
|
||||||
.attrs = nvm_dev_attrs_20,
|
|
||||||
};
|
|
||||||
|
|
||||||
int nvme_nvm_register_sysfs(struct nvme_ns *ns)
|
|
||||||
{
|
{
|
||||||
|
struct device *dev = container_of(kobj, struct device, kobj);
|
||||||
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
|
struct nvme_ns *ns = disk->private_data;
|
||||||
struct nvm_dev *ndev = ns->ndev;
|
struct nvm_dev *ndev = ns->ndev;
|
||||||
struct nvm_geo *geo = &ndev->geo;
|
struct device_attribute *dev_attr =
|
||||||
|
container_of(attr, typeof(*dev_attr), attr);
|
||||||
|
|
||||||
if (!ndev)
|
if (!ndev)
|
||||||
return -EINVAL;
|
return 0;
|
||||||
|
|
||||||
switch (geo->major_ver_id) {
|
if (dev_attr->show == nvm_dev_attr_show)
|
||||||
|
return attr->mode;
|
||||||
|
|
||||||
|
switch (ndev->geo.major_ver_id) {
|
||||||
case 1:
|
case 1:
|
||||||
return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
|
if (dev_attr->show == nvm_dev_attr_show_12)
|
||||||
&nvm_dev_attr_group_12);
|
return attr->mode;
|
||||||
case 2:
|
|
||||||
return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
|
|
||||||
&nvm_dev_attr_group_20);
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
|
|
||||||
{
|
|
||||||
struct nvm_dev *ndev = ns->ndev;
|
|
||||||
struct nvm_geo *geo = &ndev->geo;
|
|
||||||
|
|
||||||
switch (geo->major_ver_id) {
|
|
||||||
case 1:
|
|
||||||
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
|
|
||||||
&nvm_dev_attr_group_12);
|
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
|
if (dev_attr->show == nvm_dev_attr_show_20)
|
||||||
&nvm_dev_attr_group_20);
|
return attr->mode;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const struct attribute_group nvme_nvm_attr_group = {
|
||||||
|
.name = "lightnvm",
|
||||||
|
.attrs = nvm_dev_attrs,
|
||||||
|
.is_visible = nvm_dev_attrs_visible,
|
||||||
|
};
|
||||||
|
|||||||
@@ -313,13 +313,9 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
|
|||||||
if (!head->disk)
|
if (!head->disk)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!(head->disk->flags & GENHD_FL_UP)) {
|
if (!(head->disk->flags & GENHD_FL_UP))
|
||||||
device_add_disk(&head->subsys->dev, head->disk);
|
device_add_disk(&head->subsys->dev, head->disk,
|
||||||
if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
|
nvme_ns_id_attr_groups);
|
||||||
&nvme_ns_id_attr_group))
|
|
||||||
dev_warn(&head->subsys->dev,
|
|
||||||
"failed to create id group.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronize_srcu(&ns->head->srcu);
|
synchronize_srcu(&ns->head->srcu);
|
||||||
kblockd_schedule_work(&ns->head->requeue_work);
|
kblockd_schedule_work(&ns->head->requeue_work);
|
||||||
@@ -541,11 +537,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
|||||||
{
|
{
|
||||||
if (!head->disk)
|
if (!head->disk)
|
||||||
return;
|
return;
|
||||||
if (head->disk->flags & GENHD_FL_UP) {
|
if (head->disk->flags & GENHD_FL_UP)
|
||||||
sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
|
|
||||||
&nvme_ns_id_attr_group);
|
|
||||||
del_gendisk(head->disk);
|
del_gendisk(head->disk);
|
||||||
}
|
|
||||||
blk_set_queue_dying(head->disk->queue);
|
blk_set_queue_dying(head->disk->queue);
|
||||||
/* make sure all pending bios are cleaned up */
|
/* make sure all pending bios are cleaned up */
|
||||||
kblockd_schedule_work(&head->requeue_work);
|
kblockd_schedule_work(&head->requeue_work);
|
||||||
|
|||||||
@@ -464,7 +464,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
|
|||||||
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
|
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
|
||||||
void *log, size_t size, u64 offset);
|
void *log, size_t size, u64 offset);
|
||||||
|
|
||||||
extern const struct attribute_group nvme_ns_id_attr_group;
|
extern const struct attribute_group *nvme_ns_id_attr_groups[];
|
||||||
extern const struct block_device_operations nvme_ns_head_ops;
|
extern const struct block_device_operations nvme_ns_head_ops;
|
||||||
|
|
||||||
#ifdef CONFIG_NVME_MULTIPATH
|
#ifdef CONFIG_NVME_MULTIPATH
|
||||||
@@ -589,8 +589,7 @@ static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
|
|||||||
void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
|
void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
|
||||||
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
|
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
|
||||||
void nvme_nvm_unregister(struct nvme_ns *ns);
|
void nvme_nvm_unregister(struct nvme_ns *ns);
|
||||||
int nvme_nvm_register_sysfs(struct nvme_ns *ns);
|
extern const struct attribute_group nvme_nvm_attr_group;
|
||||||
void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
|
|
||||||
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
|
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
|
||||||
#else
|
#else
|
||||||
static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
|
static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
|
||||||
@@ -601,11 +600,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
|
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
|
||||||
static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
|
|
||||||
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
|
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -3961,6 +3961,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
|
|||||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
|
||||||
quirk_dma_func1_alias);
|
quirk_dma_func1_alias);
|
||||||
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
|
||||||
|
quirk_dma_func1_alias);
|
||||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
|
||||||
quirk_dma_func1_alias);
|
quirk_dma_func1_alias);
|
||||||
|
|||||||
@@ -43,6 +43,7 @@
|
|||||||
#include <linux/input/sparse-keymap.h>
|
#include <linux/input/sparse-keymap.h>
|
||||||
#include <acpi/video.h>
|
#include <acpi/video.h>
|
||||||
|
|
||||||
|
ACPI_MODULE_NAME(KBUILD_MODNAME);
|
||||||
MODULE_AUTHOR("Carlos Corbacho");
|
MODULE_AUTHOR("Carlos Corbacho");
|
||||||
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
|
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
@@ -93,7 +94,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
|
|||||||
|
|
||||||
enum acer_wmi_event_ids {
|
enum acer_wmi_event_ids {
|
||||||
WMID_HOTKEY_EVENT = 0x1,
|
WMID_HOTKEY_EVENT = 0x1,
|
||||||
WMID_ACCEL_EVENT = 0x5,
|
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct key_entry acer_wmi_keymap[] __initconst = {
|
static const struct key_entry acer_wmi_keymap[] __initconst = {
|
||||||
@@ -141,7 +142,9 @@ struct event_return_value {
|
|||||||
u8 function;
|
u8 function;
|
||||||
u8 key_num;
|
u8 key_num;
|
||||||
u16 device_state;
|
u16 device_state;
|
||||||
u32 reserved;
|
u16 reserved1;
|
||||||
|
u8 kbd_dock_state;
|
||||||
|
u8 reserved2;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -219,14 +222,13 @@ struct hotkey_function_type_aa {
|
|||||||
/*
|
/*
|
||||||
* Interface capability flags
|
* Interface capability flags
|
||||||
*/
|
*/
|
||||||
#define ACER_CAP_MAILLED (1<<0)
|
#define ACER_CAP_MAILLED BIT(0)
|
||||||
#define ACER_CAP_WIRELESS (1<<1)
|
#define ACER_CAP_WIRELESS BIT(1)
|
||||||
#define ACER_CAP_BLUETOOTH (1<<2)
|
#define ACER_CAP_BLUETOOTH BIT(2)
|
||||||
#define ACER_CAP_BRIGHTNESS (1<<3)
|
#define ACER_CAP_BRIGHTNESS BIT(3)
|
||||||
#define ACER_CAP_THREEG (1<<4)
|
#define ACER_CAP_THREEG BIT(4)
|
||||||
#define ACER_CAP_ACCEL (1<<5)
|
#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
|
||||||
#define ACER_CAP_RFBTN (1<<6)
|
#define ACER_CAP_KBD_DOCK BIT(6)
|
||||||
#define ACER_CAP_ANY (0xFFFFFFFF)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interface type flags
|
* Interface type flags
|
||||||
@@ -249,6 +251,7 @@ static int mailled = -1;
|
|||||||
static int brightness = -1;
|
static int brightness = -1;
|
||||||
static int threeg = -1;
|
static int threeg = -1;
|
||||||
static int force_series;
|
static int force_series;
|
||||||
|
static int force_caps = -1;
|
||||||
static bool ec_raw_mode;
|
static bool ec_raw_mode;
|
||||||
static bool has_type_aa;
|
static bool has_type_aa;
|
||||||
static u16 commun_func_bitmap;
|
static u16 commun_func_bitmap;
|
||||||
@@ -258,11 +261,13 @@ module_param(mailled, int, 0444);
|
|||||||
module_param(brightness, int, 0444);
|
module_param(brightness, int, 0444);
|
||||||
module_param(threeg, int, 0444);
|
module_param(threeg, int, 0444);
|
||||||
module_param(force_series, int, 0444);
|
module_param(force_series, int, 0444);
|
||||||
|
module_param(force_caps, int, 0444);
|
||||||
module_param(ec_raw_mode, bool, 0444);
|
module_param(ec_raw_mode, bool, 0444);
|
||||||
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
|
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
|
||||||
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
|
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
|
||||||
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
|
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
|
||||||
MODULE_PARM_DESC(force_series, "Force a different laptop series");
|
MODULE_PARM_DESC(force_series, "Force a different laptop series");
|
||||||
|
MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
|
||||||
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
|
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
|
||||||
|
|
||||||
struct acer_data {
|
struct acer_data {
|
||||||
@@ -333,6 +338,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init set_force_caps(const struct dmi_system_id *dmi)
|
||||||
|
{
|
||||||
|
if (force_caps == -1) {
|
||||||
|
force_caps = (uintptr_t)dmi->driver_data;
|
||||||
|
pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps);
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static struct quirk_entry quirk_unknown = {
|
static struct quirk_entry quirk_unknown = {
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -511,6 +525,33 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
|
|||||||
},
|
},
|
||||||
.driver_data = &quirk_acer_travelmate_2490,
|
.driver_data = &quirk_acer_travelmate_2490,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.callback = set_force_caps,
|
||||||
|
.ident = "Acer Aspire Switch 10E SW3-016",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)ACER_CAP_KBD_DOCK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = set_force_caps,
|
||||||
|
.ident = "Acer Aspire Switch 10 SW5-012",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)ACER_CAP_KBD_DOCK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = set_force_caps,
|
||||||
|
.ident = "Acer One 10 (S1003)",
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)ACER_CAP_KBD_DOCK,
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1266,10 +1307,8 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d)
|
|||||||
interface->capability |= ACER_CAP_THREEG;
|
interface->capability |= ACER_CAP_THREEG;
|
||||||
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
|
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
|
||||||
interface->capability |= ACER_CAP_BLUETOOTH;
|
interface->capability |= ACER_CAP_BLUETOOTH;
|
||||||
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) {
|
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN)
|
||||||
interface->capability |= ACER_CAP_RFBTN;
|
|
||||||
commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
|
commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
|
||||||
}
|
|
||||||
|
|
||||||
commun_fn_key_number = type_aa->commun_fn_key_number;
|
commun_fn_key_number = type_aa->commun_fn_key_number;
|
||||||
}
|
}
|
||||||
@@ -1530,7 +1569,7 @@ static int acer_gsensor_event(void)
|
|||||||
struct acpi_buffer output;
|
struct acpi_buffer output;
|
||||||
union acpi_object out_obj[5];
|
union acpi_object out_obj[5];
|
||||||
|
|
||||||
if (!has_cap(ACER_CAP_ACCEL))
|
if (!acer_wmi_accel_dev)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
output.length = sizeof(out_obj);
|
output.length = sizeof(out_obj);
|
||||||
@@ -1553,6 +1592,71 @@ static int acer_gsensor_event(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Switch series keyboard dock status
|
||||||
|
*/
|
||||||
|
static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state)
|
||||||
|
{
|
||||||
|
switch (kbd_dock_state) {
|
||||||
|
case 0x01: /* Docked, traditional clamshell laptop mode */
|
||||||
|
return 0;
|
||||||
|
case 0x04: /* Stand-alone tablet */
|
||||||
|
case 0x40: /* Docked, tent mode, keyboard not usable */
|
||||||
|
return 1;
|
||||||
|
default:
|
||||||
|
pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acer_kbd_dock_get_initial_state(void)
|
||||||
|
{
|
||||||
|
u8 *output, input[8] = { 0x05, 0x00, };
|
||||||
|
struct acpi_buffer input_buf = { sizeof(input), input };
|
||||||
|
struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||||
|
union acpi_object *obj;
|
||||||
|
acpi_status status;
|
||||||
|
int sw_tablet_mode;
|
||||||
|
|
||||||
|
status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = output_buf.pointer;
|
||||||
|
if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
|
||||||
|
pr_err("Unexpected output format getting keyboard-dock initial status\n");
|
||||||
|
goto out_free_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
output = obj->buffer.pointer;
|
||||||
|
if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) {
|
||||||
|
pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n",
|
||||||
|
output[0], output[3]);
|
||||||
|
goto out_free_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]);
|
||||||
|
input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
|
||||||
|
|
||||||
|
out_free_obj:
|
||||||
|
kfree(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acer_kbd_dock_event(const struct event_return_value *event)
|
||||||
|
{
|
||||||
|
int sw_tablet_mode;
|
||||||
|
|
||||||
|
if (!has_cap(ACER_CAP_KBD_DOCK))
|
||||||
|
return;
|
||||||
|
|
||||||
|
sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state);
|
||||||
|
input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
|
||||||
|
input_sync(acer_wmi_input_dev);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rfkill devices
|
* Rfkill devices
|
||||||
*/
|
*/
|
||||||
@@ -1780,8 +1884,9 @@ static void acer_wmi_notify(u32 value, void *context)
|
|||||||
sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
|
sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case WMID_ACCEL_EVENT:
|
case WMID_ACCEL_OR_KBD_DOCK_EVENT:
|
||||||
acer_gsensor_event();
|
acer_gsensor_event();
|
||||||
|
acer_kbd_dock_event(&return_value);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("Unknown function number - %d - %d\n",
|
pr_warn("Unknown function number - %d - %d\n",
|
||||||
@@ -1939,8 +2044,6 @@ static int __init acer_wmi_accel_setup(void)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
interface->capability |= ACER_CAP_ACCEL;
|
|
||||||
|
|
||||||
acer_wmi_accel_dev = input_allocate_device();
|
acer_wmi_accel_dev = input_allocate_device();
|
||||||
if (!acer_wmi_accel_dev)
|
if (!acer_wmi_accel_dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1966,11 +2069,6 @@ static int __init acer_wmi_accel_setup(void)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void acer_wmi_accel_destroy(void)
|
|
||||||
{
|
|
||||||
input_unregister_device(acer_wmi_accel_dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init acer_wmi_input_setup(void)
|
static int __init acer_wmi_input_setup(void)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
@@ -1988,6 +2086,9 @@ static int __init acer_wmi_input_setup(void)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_free_dev;
|
goto err_free_dev;
|
||||||
|
|
||||||
|
if (has_cap(ACER_CAP_KBD_DOCK))
|
||||||
|
input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE);
|
||||||
|
|
||||||
status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
|
status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
|
||||||
acer_wmi_notify, NULL);
|
acer_wmi_notify, NULL);
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
@@ -1995,6 +2096,9 @@ static int __init acer_wmi_input_setup(void)
|
|||||||
goto err_free_dev;
|
goto err_free_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (has_cap(ACER_CAP_KBD_DOCK))
|
||||||
|
acer_kbd_dock_get_initial_state();
|
||||||
|
|
||||||
err = input_register_device(acer_wmi_input_dev);
|
err = input_register_device(acer_wmi_input_dev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_uninstall_notifier;
|
goto err_uninstall_notifier;
|
||||||
@@ -2125,7 +2229,7 @@ static int acer_resume(struct device *dev)
|
|||||||
if (has_cap(ACER_CAP_BRIGHTNESS))
|
if (has_cap(ACER_CAP_BRIGHTNESS))
|
||||||
set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
|
set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
|
||||||
|
|
||||||
if (has_cap(ACER_CAP_ACCEL))
|
if (acer_wmi_accel_dev)
|
||||||
acer_gsensor_init();
|
acer_gsensor_init();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -2240,7 +2344,7 @@ static int __init acer_wmi_init(void)
|
|||||||
}
|
}
|
||||||
/* WMID always provides brightness methods */
|
/* WMID always provides brightness methods */
|
||||||
interface->capability |= ACER_CAP_BRIGHTNESS;
|
interface->capability |= ACER_CAP_BRIGHTNESS;
|
||||||
} else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) {
|
} else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) {
|
||||||
pr_err("No WMID device detection method found\n");
|
pr_err("No WMID device detection method found\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
@@ -2270,7 +2374,14 @@ static int __init acer_wmi_init(void)
|
|||||||
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
|
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
|
||||||
interface->capability &= ~ACER_CAP_BRIGHTNESS;
|
interface->capability &= ~ACER_CAP_BRIGHTNESS;
|
||||||
|
|
||||||
if (wmi_has_guid(WMID_GUID3)) {
|
if (wmi_has_guid(WMID_GUID3))
|
||||||
|
interface->capability |= ACER_CAP_SET_FUNCTION_MODE;
|
||||||
|
|
||||||
|
if (force_caps != -1)
|
||||||
|
interface->capability = force_caps;
|
||||||
|
|
||||||
|
if (wmi_has_guid(WMID_GUID3) &&
|
||||||
|
(interface->capability & ACER_CAP_SET_FUNCTION_MODE)) {
|
||||||
if (ACPI_FAILURE(acer_wmi_enable_rf_button()))
|
if (ACPI_FAILURE(acer_wmi_enable_rf_button()))
|
||||||
pr_warn("Cannot enable RF Button Driver\n");
|
pr_warn("Cannot enable RF Button Driver\n");
|
||||||
|
|
||||||
@@ -2333,8 +2444,8 @@ static int __init acer_wmi_init(void)
|
|||||||
error_platform_register:
|
error_platform_register:
|
||||||
if (wmi_has_guid(ACERWMID_EVENT_GUID))
|
if (wmi_has_guid(ACERWMID_EVENT_GUID))
|
||||||
acer_wmi_input_destroy();
|
acer_wmi_input_destroy();
|
||||||
if (has_cap(ACER_CAP_ACCEL))
|
if (acer_wmi_accel_dev)
|
||||||
acer_wmi_accel_destroy();
|
input_unregister_device(acer_wmi_accel_dev);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -2344,8 +2455,8 @@ static void __exit acer_wmi_exit(void)
|
|||||||
if (wmi_has_guid(ACERWMID_EVENT_GUID))
|
if (wmi_has_guid(ACERWMID_EVENT_GUID))
|
||||||
acer_wmi_input_destroy();
|
acer_wmi_input_destroy();
|
||||||
|
|
||||||
if (has_cap(ACER_CAP_ACCEL))
|
if (acer_wmi_accel_dev)
|
||||||
acer_wmi_accel_destroy();
|
input_unregister_device(acer_wmi_accel_dev);
|
||||||
|
|
||||||
remove_debugfs();
|
remove_debugfs();
|
||||||
platform_device_unregister(acer_platform_device);
|
platform_device_unregister(acer_platform_device);
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
|||||||
gdp->queue = block->request_queue;
|
gdp->queue = block->request_queue;
|
||||||
block->gdp = gdp;
|
block->gdp = gdp;
|
||||||
set_capacity(block->gdp, 0);
|
set_capacity(block->gdp, 0);
|
||||||
device_add_disk(&base->cdev->dev, block->gdp);
|
device_add_disk(&base->cdev->dev, block->gdp, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -685,7 +685,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
|||||||
}
|
}
|
||||||
|
|
||||||
get_device(&dev_info->dev);
|
get_device(&dev_info->dev);
|
||||||
device_add_disk(&dev_info->dev, dev_info->gd);
|
device_add_disk(&dev_info->dev, dev_info->gd, NULL);
|
||||||
|
|
||||||
switch (dev_info->segment_type) {
|
switch (dev_info->segment_type) {
|
||||||
case SEG_TYPE_SR:
|
case SEG_TYPE_SR:
|
||||||
|
|||||||
@@ -500,7 +500,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
|||||||
|
|
||||||
/* 512 byte sectors */
|
/* 512 byte sectors */
|
||||||
set_capacity(bdev->gendisk, scmdev->size >> 9);
|
set_capacity(bdev->gendisk, scmdev->size >> 9);
|
||||||
device_add_disk(&scmdev->dev, bdev->gendisk);
|
device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_queue:
|
out_queue:
|
||||||
|
|||||||
@@ -3359,7 +3359,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
|
|||||||
pm_runtime_set_autosuspend_delay(dev,
|
pm_runtime_set_autosuspend_delay(dev,
|
||||||
sdp->host->hostt->rpm_autosuspend_delay);
|
sdp->host->hostt->rpm_autosuspend_delay);
|
||||||
}
|
}
|
||||||
device_add_disk(dev, gd);
|
device_add_disk(dev, gd, NULL);
|
||||||
if (sdkp->capacity)
|
if (sdkp->capacity)
|
||||||
sd_dif_config_host(sdkp);
|
sd_dif_config_host(sdkp);
|
||||||
|
|
||||||
|
|||||||
@@ -758,7 +758,7 @@ static int sr_probe(struct device *dev)
|
|||||||
|
|
||||||
dev_set_drvdata(dev, cd);
|
dev_set_drvdata(dev, cd);
|
||||||
disk->flags |= GENHD_FL_REMOVABLE;
|
disk->flags |= GENHD_FL_REMOVABLE;
|
||||||
device_add_disk(&sdev->sdev_gendev, disk);
|
device_add_disk(&sdev->sdev_gendev, disk, NULL);
|
||||||
|
|
||||||
sdev_printk(KERN_DEBUG, sdev,
|
sdev_printk(KERN_DEBUG, sdev,
|
||||||
"Attached scsi CD-ROM %s\n", cd->cdi.name);
|
"Attached scsi CD-ROM %s\n", cd->cdi.name);
|
||||||
|
|||||||
@@ -642,7 +642,7 @@ static int btrfs_delayed_inode_reserve_metadata(
|
|||||||
btrfs_ino(inode),
|
btrfs_ino(inode),
|
||||||
num_bytes, 1);
|
num_bytes, 1);
|
||||||
} else {
|
} else {
|
||||||
btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
|
btrfs_qgroup_free_meta_prealloc(root, num_bytes);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3016,8 +3016,11 @@ static int btrfs_zero_range(struct inode *inode,
|
|||||||
goto out;
|
goto out;
|
||||||
ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
|
ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
|
||||||
alloc_start, bytes_to_reserve);
|
alloc_start, bytes_to_reserve);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
|
||||||
|
lockend, &cached_state);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
|
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
|
||||||
alloc_end - alloc_start,
|
alloc_end - alloc_start,
|
||||||
i_blocksize(inode),
|
i_blocksize(inode),
|
||||||
|
|||||||
@@ -1842,7 +1842,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
|
|||||||
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
|
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
|
||||||
readonly = true;
|
readonly = true;
|
||||||
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
|
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
|
||||||
if (vol_args->size > PAGE_SIZE) {
|
u64 nums;
|
||||||
|
|
||||||
|
if (vol_args->size < sizeof(*inherit) ||
|
||||||
|
vol_args->size > PAGE_SIZE) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto free_args;
|
goto free_args;
|
||||||
}
|
}
|
||||||
@@ -1851,6 +1854,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
|
|||||||
ret = PTR_ERR(inherit);
|
ret = PTR_ERR(inherit);
|
||||||
goto free_args;
|
goto free_args;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (inherit->num_qgroups > PAGE_SIZE ||
|
||||||
|
inherit->num_ref_copies > PAGE_SIZE ||
|
||||||
|
inherit->num_excl_copies > PAGE_SIZE) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
|
||||||
|
2 * inherit->num_excl_copies;
|
||||||
|
if (vol_args->size != struct_size(inherit, qgroups, nums)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_inherit;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
|
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
|
||||||
|
|||||||
@@ -1182,22 +1182,19 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
|
|||||||
int nr_data = rbio->nr_data;
|
int nr_data = rbio->nr_data;
|
||||||
int stripe;
|
int stripe;
|
||||||
int pagenr;
|
int pagenr;
|
||||||
int p_stripe = -1;
|
bool has_qstripe;
|
||||||
int q_stripe = -1;
|
|
||||||
struct bio_list bio_list;
|
struct bio_list bio_list;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bio_list_init(&bio_list);
|
bio_list_init(&bio_list);
|
||||||
|
|
||||||
if (rbio->real_stripes - rbio->nr_data == 1) {
|
if (rbio->real_stripes - rbio->nr_data == 1)
|
||||||
p_stripe = rbio->real_stripes - 1;
|
has_qstripe = false;
|
||||||
} else if (rbio->real_stripes - rbio->nr_data == 2) {
|
else if (rbio->real_stripes - rbio->nr_data == 2)
|
||||||
p_stripe = rbio->real_stripes - 2;
|
has_qstripe = true;
|
||||||
q_stripe = rbio->real_stripes - 1;
|
else
|
||||||
} else {
|
|
||||||
BUG();
|
BUG();
|
||||||
}
|
|
||||||
|
|
||||||
/* at this point we either have a full stripe,
|
/* at this point we either have a full stripe,
|
||||||
* or we've read the full stripe from the drive.
|
* or we've read the full stripe from the drive.
|
||||||
@@ -1241,7 +1238,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
|
|||||||
SetPageUptodate(p);
|
SetPageUptodate(p);
|
||||||
pointers[stripe++] = kmap(p);
|
pointers[stripe++] = kmap(p);
|
||||||
|
|
||||||
if (q_stripe != -1) {
|
if (has_qstripe) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* raid6, add the qstripe and call the
|
* raid6, add the qstripe and call the
|
||||||
@@ -2340,8 +2337,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||||||
int nr_data = rbio->nr_data;
|
int nr_data = rbio->nr_data;
|
||||||
int stripe;
|
int stripe;
|
||||||
int pagenr;
|
int pagenr;
|
||||||
int p_stripe = -1;
|
bool has_qstripe;
|
||||||
int q_stripe = -1;
|
|
||||||
struct page *p_page = NULL;
|
struct page *p_page = NULL;
|
||||||
struct page *q_page = NULL;
|
struct page *q_page = NULL;
|
||||||
struct bio_list bio_list;
|
struct bio_list bio_list;
|
||||||
@@ -2351,14 +2347,12 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||||||
|
|
||||||
bio_list_init(&bio_list);
|
bio_list_init(&bio_list);
|
||||||
|
|
||||||
if (rbio->real_stripes - rbio->nr_data == 1) {
|
if (rbio->real_stripes - rbio->nr_data == 1)
|
||||||
p_stripe = rbio->real_stripes - 1;
|
has_qstripe = false;
|
||||||
} else if (rbio->real_stripes - rbio->nr_data == 2) {
|
else if (rbio->real_stripes - rbio->nr_data == 2)
|
||||||
p_stripe = rbio->real_stripes - 2;
|
has_qstripe = true;
|
||||||
q_stripe = rbio->real_stripes - 1;
|
else
|
||||||
} else {
|
|
||||||
BUG();
|
BUG();
|
||||||
}
|
|
||||||
|
|
||||||
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
|
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
|
||||||
is_replace = 1;
|
is_replace = 1;
|
||||||
@@ -2380,17 +2374,22 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
SetPageUptodate(p_page);
|
SetPageUptodate(p_page);
|
||||||
|
|
||||||
if (q_stripe != -1) {
|
if (has_qstripe) {
|
||||||
|
/* RAID6, allocate and map temp space for the Q stripe */
|
||||||
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
|
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
|
||||||
if (!q_page) {
|
if (!q_page) {
|
||||||
__free_page(p_page);
|
__free_page(p_page);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
SetPageUptodate(q_page);
|
SetPageUptodate(q_page);
|
||||||
|
pointers[rbio->real_stripes - 1] = kmap(q_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_set(&rbio->error, 0);
|
atomic_set(&rbio->error, 0);
|
||||||
|
|
||||||
|
/* Map the parity stripe just once */
|
||||||
|
pointers[nr_data] = kmap(p_page);
|
||||||
|
|
||||||
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
|
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
|
||||||
struct page *p;
|
struct page *p;
|
||||||
void *parity;
|
void *parity;
|
||||||
@@ -2400,17 +2399,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||||||
pointers[stripe] = kmap(p);
|
pointers[stripe] = kmap(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* then add the parity stripe */
|
if (has_qstripe) {
|
||||||
pointers[stripe++] = kmap(p_page);
|
/* RAID6, call the library function to fill in our P/Q */
|
||||||
|
|
||||||
if (q_stripe != -1) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* raid6, add the qstripe and call the
|
|
||||||
* library function to fill in our p/q
|
|
||||||
*/
|
|
||||||
pointers[stripe++] = kmap(q_page);
|
|
||||||
|
|
||||||
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
|
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
|
||||||
pointers);
|
pointers);
|
||||||
} else {
|
} else {
|
||||||
@@ -2431,12 +2421,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||||||
|
|
||||||
for (stripe = 0; stripe < nr_data; stripe++)
|
for (stripe = 0; stripe < nr_data; stripe++)
|
||||||
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
|
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
|
||||||
kunmap(p_page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kunmap(p_page);
|
||||||
__free_page(p_page);
|
__free_page(p_page);
|
||||||
if (q_page)
|
if (q_page) {
|
||||||
|
kunmap(q_page);
|
||||||
__free_page(q_page);
|
__free_page(q_page);
|
||||||
|
}
|
||||||
|
|
||||||
writeback:
|
writeback:
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
|
|||||||
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
|
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
|
||||||
/* Instructions such as EWEN are (addrlen + 2) in length. */
|
/* Instructions such as EWEN are (addrlen + 2) in length. */
|
||||||
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
|
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
|
||||||
|
/* Add extra cycle after address during a read */
|
||||||
|
#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* optional hooks to control additional logic
|
* optional hooks to control additional logic
|
||||||
|
|||||||
@@ -417,10 +417,11 @@ static inline void free_part_info(struct hd_struct *part)
|
|||||||
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
|
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
|
||||||
|
|
||||||
/* block/genhd.c */
|
/* block/genhd.c */
|
||||||
extern void device_add_disk(struct device *parent, struct gendisk *disk);
|
extern void device_add_disk(struct device *parent, struct gendisk *disk,
|
||||||
|
const struct attribute_group **groups);
|
||||||
static inline void add_disk(struct gendisk *disk)
|
static inline void add_disk(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
device_add_disk(NULL, disk);
|
device_add_disk(NULL, disk, NULL);
|
||||||
}
|
}
|
||||||
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
|
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
|
||||||
static inline void add_disk_no_queue_reg(struct gendisk *disk)
|
static inline void add_disk_no_queue_reg(struct gendisk *disk)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ config NET_DSA
|
|||||||
tristate "Distributed Switch Architecture"
|
tristate "Distributed Switch Architecture"
|
||||||
depends on HAVE_NET_DSA && MAY_USE_DEVLINK
|
depends on HAVE_NET_DSA && MAY_USE_DEVLINK
|
||||||
depends on BRIDGE || BRIDGE=n
|
depends on BRIDGE || BRIDGE=n
|
||||||
|
select GRO_CELLS
|
||||||
select NET_SWITCHDEV
|
select NET_SWITCHDEV
|
||||||
select PHYLINK
|
select PHYLINK
|
||||||
---help---
|
---help---
|
||||||
|
|||||||
@@ -191,7 +191,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (dsa_skb_defer_rx_timestamp(p, skb))
|
if (dsa_skb_defer_rx_timestamp(p, skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
netif_receive_skb(skb);
|
gro_cells_receive(&p->gcells, skb);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <linux/netpoll.h>
|
#include <linux/netpoll.h>
|
||||||
#include <net/dsa.h>
|
#include <net/dsa.h>
|
||||||
|
#include <net/gro_cells.h>
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
DSA_NOTIFIER_AGEING_TIME,
|
DSA_NOTIFIER_AGEING_TIME,
|
||||||
@@ -72,6 +73,8 @@ struct dsa_slave_priv {
|
|||||||
|
|
||||||
struct pcpu_sw_netstats *stats64;
|
struct pcpu_sw_netstats *stats64;
|
||||||
|
|
||||||
|
struct gro_cells gcells;
|
||||||
|
|
||||||
/* DSA port data, such as switch, port index, etc. */
|
/* DSA port data, such as switch, port index, etc. */
|
||||||
struct dsa_port *dp;
|
struct dsa_port *dp;
|
||||||
|
|
||||||
|
|||||||
@@ -1337,6 +1337,11 @@ int dsa_slave_create(struct dsa_port *port)
|
|||||||
free_netdev(slave_dev);
|
free_netdev(slave_dev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = gro_cells_init(&p->gcells, slave_dev);
|
||||||
|
if (ret)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
p->dp = port;
|
p->dp = port;
|
||||||
INIT_LIST_HEAD(&p->mall_tc_list);
|
INIT_LIST_HEAD(&p->mall_tc_list);
|
||||||
p->xmit = cpu_dp->tag_ops->xmit;
|
p->xmit = cpu_dp->tag_ops->xmit;
|
||||||
@@ -1347,7 +1352,7 @@ int dsa_slave_create(struct dsa_port *port)
|
|||||||
ret = dsa_slave_phy_setup(slave_dev);
|
ret = dsa_slave_phy_setup(slave_dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
netdev_err(master, "error %d setting up slave phy\n", ret);
|
netdev_err(master, "error %d setting up slave phy\n", ret);
|
||||||
goto out_free;
|
goto out_gcells;
|
||||||
}
|
}
|
||||||
|
|
||||||
dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
|
dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
|
||||||
@@ -1366,6 +1371,8 @@ int dsa_slave_create(struct dsa_port *port)
|
|||||||
phylink_disconnect_phy(p->dp->pl);
|
phylink_disconnect_phy(p->dp->pl);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
phylink_destroy(p->dp->pl);
|
phylink_destroy(p->dp->pl);
|
||||||
|
out_gcells:
|
||||||
|
gro_cells_destroy(&p->gcells);
|
||||||
out_free:
|
out_free:
|
||||||
free_percpu(p->stats64);
|
free_percpu(p->stats64);
|
||||||
free_netdev(slave_dev);
|
free_netdev(slave_dev);
|
||||||
@@ -1386,6 +1393,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
|
|||||||
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
|
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
|
||||||
unregister_netdev(slave_dev);
|
unregister_netdev(slave_dev);
|
||||||
phylink_destroy(dp->pl);
|
phylink_destroy(dp->pl);
|
||||||
|
gro_cells_destroy(&p->gcells);
|
||||||
free_percpu(p->stats64);
|
free_percpu(p->stats64);
|
||||||
free_netdev(slave_dev);
|
free_netdev(slave_dev);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -995,7 +995,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
|
|||||||
|
|
||||||
if (idx < 4) {
|
if (idx < 4) {
|
||||||
/* S/PDIF output */
|
/* S/PDIF output */
|
||||||
switch ((conf & 0x7)) {
|
switch ((conf & 0xf)) {
|
||||||
case 1:
|
case 1:
|
||||||
set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
|
set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -444,6 +444,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
|
|||||||
BYT_RT5640_SSP0_AIF1 |
|
BYT_RT5640_SSP0_AIF1 |
|
||||||
BYT_RT5640_MCLK_EN),
|
BYT_RT5640_MCLK_EN),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)(BYT_RT5640_IN1_MAP |
|
||||||
|
BYT_RT5640_JD_SRC_JD2_IN4N |
|
||||||
|
BYT_RT5640_OVCD_TH_2000UA |
|
||||||
|
BYT_RT5640_OVCD_SF_0P75 |
|
||||||
|
BYT_RT5640_SSP0_AIF1 |
|
||||||
|
BYT_RT5640_MCLK_EN),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||||
|
|||||||
@@ -35,7 +35,7 @@
|
|||||||
#include "list.h"
|
#include "list.h"
|
||||||
#include "sysfs_utils.h"
|
#include "sysfs_utils.h"
|
||||||
|
|
||||||
struct udev *udev_context;
|
extern struct udev *udev_context;
|
||||||
|
|
||||||
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
|
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user