Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
99ce885
Merge branch 'block-6.19' into for-next
axboe Dec 28, 2025
a8a3489
Merge branch 'for-7.0/block' into for-next
axboe Dec 28, 2025
88f8587
Merge branch 'for-7.0/io_uring' into for-next
axboe Dec 28, 2025
8cf7794
Merge branch 'for-7.0/block' into for-next
axboe Dec 29, 2025
734b65d
Merge branch 'block-6.19' into for-next
axboe Dec 30, 2025
4132042
Merge branch 'io_uring-6.19' into for-next
axboe Dec 31, 2025
1f5f808
Merge branch 'block-6.19' into for-next
axboe Dec 31, 2025
dc58434
Merge branch 'io_uring-6.19' into for-next
axboe Jan 1, 2026
528478e
Merge branch 'block-6.19' into for-next
axboe Jan 1, 2026
38b882a
Merge branch 'io_uring-6.19' into for-next
axboe Jan 5, 2026
013d419
Merge branch 'for-7.0/io_uring' into for-next
axboe Jan 6, 2026
362d412
Merge branch 'for-7.0/block' into for-next
axboe Jan 6, 2026
29cefd6
Merge branch 'block-6.19' into for-next
axboe Jan 6, 2026
073b9bf
nvme-pci: Use size_t for length fields to handle larger sizes
rleon Dec 17, 2025
fcf463b
types: move phys_vec definition to common header
rleon Dec 17, 2025
5ee81d4
Merge branch 'for-7.0/blk-pvec' into for-next
axboe Jan 6, 2026
0427c68
Merge branch 'block-6.19' into for-next
axboe Jan 7, 2026
faf0be9
Merge branch 'block-6.19' into for-next
axboe Jan 7, 2026
17637da
Merge branch 'block-6.19' into for-next
axboe Jan 7, 2026
8ba16e4
Merge branch 'for-7.0/block' into for-next
axboe Jan 7, 2026
28fd54b
Merge branch 'block-6.19' into for-next
axboe Jan 7, 2026
f274b49
Dummy commit
kawasaki Jan 9, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion block/bfq-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
blkg_rwstat_add_aux(&to->merged, &from->merged);
blkg_rwstat_add_aux(&to->service_time, &from->service_time);
blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
bfq_stat_add_aux(&from->time, &from->time);
bfq_stat_add_aux(&to->time, &from->time);
bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
bfq_stat_add_aux(&to->avg_queue_size_samples,
&from->avg_queue_size_samples);
Expand Down
2 changes: 1 addition & 1 deletion block/bfq-iosched.h
Original file line number Diff line number Diff line change
Expand Up @@ -984,7 +984,7 @@ struct bfq_group_data {
* unused for the root group. Used to know whether there
* are groups with more than one active @bfq_entity
* (see the comments to the function
* bfq_bfqq_may_idle()).
* bfq_better_to_idle()).
* @rq_pos_tree: rbtree sorted by next_request position, used when
* determining if two or more queues have interleaving
* requests (see bfq_find_close_cooperator()).
Expand Down
23 changes: 18 additions & 5 deletions block/blk-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
struct bio_integrity_payload *bip, *bip_next;

if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
return true;

if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
return false;

if (bio_integrity(req->bio)->bip_flags !=
bio_integrity(next->bio)->bip_flags)
bip = bio_integrity(req->bio);
bip_next = bio_integrity(next->bio);
if (bip->bip_flags != bip_next->bip_flags)
return false;

if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_next->app_tag)
return false;

if (req->nr_integrity_segments + next->nr_integrity_segments >
Expand All @@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
int nr_integrity_segs;

if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
return true;

if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
return false;

bip = bio_integrity(req->bio);
if (bip->bip_flags != bip_bio->bip_flags)
return false;

if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_bio->app_tag)
return false;

nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
Expand Down
11 changes: 5 additions & 6 deletions block/blk-mq-dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,6 @@
#include <linux/blk-mq-dma.h>
#include "blk.h"

struct phys_vec {
phys_addr_t paddr;
u32 len;
};

static bool __blk_map_iter_next(struct blk_map_iter *iter)
{
if (iter->iter.bi_size)
Expand Down Expand Up @@ -112,8 +107,8 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
struct phys_vec *vec)
{
enum dma_data_direction dir = rq_dma_dir(req);
unsigned int mapped = 0;
unsigned int attrs = 0;
size_t mapped = 0;
int error;

iter->addr = state->addr;
Expand Down Expand Up @@ -297,6 +292,8 @@ int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
blk_rq_map_iter_init(rq, &iter);
while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist);

WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
nsegs++;
Expand Down Expand Up @@ -417,6 +414,8 @@ int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)

while (blk_map_iter_next(rq, &iter, &vec)) {
sg = blk_next_sg(&sg, sglist);

WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
segments++;
Expand Down
5 changes: 2 additions & 3 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -3721,7 +3721,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx, cpuhp_online);
int ret = 0;

if (blk_mq_hctx_has_online_cpu(hctx, cpu))
if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0;

/*
Expand Down Expand Up @@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
* Make sure reading the old queue_hw_ctx from other
* context concurrently won't trigger uaf.
*/
synchronize_rcu_expedited();
kfree(hctxs);
kfree_rcu_mightsleep(hctxs);
hctxs = new_hctxs;
}

Expand Down
25 changes: 9 additions & 16 deletions block/blk-rq-qos.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);

static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}

static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}

static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}

static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}

Expand Down Expand Up @@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)

static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
Expand All @@ -172,25 +168,22 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}

static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
}

static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}

Expand Down
45 changes: 33 additions & 12 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -1225,16 +1225,28 @@ static int loop_clr_fd(struct loop_device *lo)
}

static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_status(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev, const struct loop_info64 *info)
{
int err;
bool partscan = false;
bool size_changed = false;
unsigned int memflags;

/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
*/
if (!(mode & BLK_OPEN_EXCL)) {
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
if (err)
goto out_reread_partitions;
}

err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
goto out_abort_claiming;

if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
Expand Down Expand Up @@ -1273,6 +1285,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
out_abort_claiming:
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_set_status);
out_reread_partitions:
if (partscan)
loop_reread_partitions(lo);

Expand Down Expand Up @@ -1352,25 +1368,29 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
}

static int
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;

if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}

static int
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info64 __user *arg)
{
struct loop_info64 info64;

if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}

static int
Expand Down Expand Up @@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status_old(lo, argp);
err = loop_set_status_old(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status64(lo, argp);
err = loop_set_status64(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
Expand Down Expand Up @@ -1650,16 +1670,17 @@ loop_info64_to_compat(const struct loop_info64 *info64,
}

static int
loop_set_status_compat(struct loop_device *lo,
const struct compat_loop_info __user *arg)
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;

ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}

static int
Expand All @@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,

switch(cmd) {
case LOOP_SET_STATUS:
err = loop_set_status_compat(lo,
err = loop_set_status_compat(lo, mode, bdev,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
Expand Down
35 changes: 32 additions & 3 deletions drivers/block/ublk_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ struct ublk_device {
bool canceling;
pid_t ublksrv_tgid;
struct delayed_work exit_work;
struct work_struct partition_scan_work;

struct ublk_queue *queues[];
};
Expand All @@ -254,6 +255,20 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);

static void ublk_partition_scan_work(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, partition_scan_work);

if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&ub->ub_disk->state)))
return;

mutex_lock(&ub->ub_disk->open_mutex);
bdev_disk_changed(ub->ub_disk, false);
mutex_unlock(&ub->ub_disk->open_mutex);
}

static inline struct ublksrv_io_desc *
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
Expand Down Expand Up @@ -2026,6 +2041,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
flush_work(&ub->partition_scan_work);
ublk_cancel_dev(ub);
}

Expand Down Expand Up @@ -2954,9 +2970,17 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,

ublk_apply_params(ub);

/* don't probe partitions if any daemon task is un-trusted */
if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
/*
* Suppress partition scan to avoid potential IO hang.
*
* If ublk server error occurs during partition scan, the IO may
* wait while holding ub->mutex, which can deadlock with other
* operations that need the mutex. Defer partition scan to async
* work.
* For unprivileged daemons, keep GD_SUPPRESS_PART_SCAN set
* permanently.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);

ublk_get_device(ub);
ub->dev_info.state = UBLK_S_DEV_LIVE;
Expand All @@ -2973,6 +2997,10 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,

set_bit(UB_STATE_USED, &ub->state);

/* Schedule async partition scan for trusted daemons */
if (!ub->unprivileged_daemons)
schedule_work(&ub->partition_scan_work);

out_put_cdev:
if (ret) {
ublk_detach_disk(ub);
Expand Down Expand Up @@ -3138,6 +3166,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
mutex_init(&ub->cancel_mutex);
INIT_WORK(&ub->partition_scan_work, ublk_partition_scan_work);

ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
Expand Down
Loading