Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
59 commits
Select commit Hold shift + click to select a range
cabc52d
Merge branch 'io_uring-7.0' into for-next
axboe Mar 17, 2026
479f382
Merge branch 'for-7.1/block' into for-next
axboe Mar 17, 2026
7745de3
Merge branch 'for-7.1/io_uring' into for-next
axboe Mar 17, 2026
b7a7317
Merge branch 'for-7.1/io_uring' into for-next
axboe Mar 17, 2026
bfa0914
Merge branch 'for-7.1/block' into for-next
axboe Mar 18, 2026
71c3ee5
Merge branch 'for-7.1/block' into for-next
axboe Mar 18, 2026
5c72e4d
Merge branch 'for-7.1/block' into for-next
axboe Mar 18, 2026
27c7fa5
Merge branch 'for-7.1/block' into for-next
axboe Mar 19, 2026
c4ae841
Merge branch 'io_uring-7.0' into for-next
axboe Mar 19, 2026
ff16884
io_uring/kbuf: add support for kernel-managed buffer rings
joannekoong Mar 6, 2026
bf4a6eb
io_uring/kbuf: support kernel-managed buffer rings in buffer selection
joannekoong Mar 6, 2026
b6ffe53
io_uring/kbuf: add buffer ring pinning/unpinning
joannekoong Mar 6, 2026
9aa9e55
io_uring/kbuf: return buffer id in buffer selection
joannekoong Mar 6, 2026
de48bc0
io_uring/kbuf: add recycling for kernel managed buffer rings
joannekoong Mar 6, 2026
08b57b6
io_uring/kbuf: add io_uring_is_kmbuf_ring()
joannekoong Mar 6, 2026
4706d1f
io_uring/kbuf: export io_ring_buffer_select()
joannekoong Mar 6, 2026
3515a2a
io_uring/cmd: set selected buffer index in __io_uring_cmd_done()
joannekoong Mar 6, 2026
a8f6028
Merge branch 'for-7.1/io_uring' into for-next
axboe Mar 20, 2026
c9263e3
Merge branch 'for-7.1/block' into for-next
axboe Mar 21, 2026
b36478a
bcache: fix cached_dev.sb_bio use-after-free and crash
zoumingzhe Mar 22, 2026
53c3830
Merge branch 'block-7.0' into for-next
axboe Mar 22, 2026
6383b4d
Merge branch 'for-7.1/block' into for-next
axboe Mar 22, 2026
4107f06
Merge branch 'for-7.1/block' into for-next
axboe Mar 23, 2026
e7cbe11
Merge branch 'for-7.1/block' into for-next
axboe Mar 23, 2026
fda90d4
io_uring/zcrx: return back two step unregistration
isilence Mar 23, 2026
234fe7b
io_uring/zcrx: fully clean area on error in io_import_umem()
isilence Mar 23, 2026
8c0cab0
io_uring/zcrx: always dma map in advance
isilence Mar 23, 2026
80a4144
io_uring/zcrx: extract netdev+area init into a helper
isilence Mar 23, 2026
c117280
io_uring/zcrx: implement device-less mode for zcrx
isilence Mar 23, 2026
3bb8e06
io_uring/zcrx: use better name for RQ region
isilence Mar 23, 2026
161399f
io_uring/zcrx: add a struct for refill queue
isilence Mar 23, 2026
a5da6e3
io_uring/zcrx: use guards for locking
isilence Mar 23, 2026
ac02a64
io_uring/zcrx: move count check into zcrx_get_free_niov
isilence Mar 23, 2026
072237b
io_uring/zcrx: warn on alloc with non-empty pp cache
isilence Mar 23, 2026
f3e6e4b
io_uring/zcrx: netmem array as refiling format
isilence Mar 23, 2026
2bd8e50
io_uring/zcrx: consolidate dma syncing
isilence Mar 23, 2026
d2df9b6
io_uring/zcrx: warn on a repeated area append
isilence Mar 23, 2026
edec451
io_uring/zcrx: cache fallback availability in zcrx ctx
isilence Mar 23, 2026
4910552
io_uring/zcrx: check ctrl op payload struct sizes
isilence Mar 23, 2026
623a6d4
io_uring/zcrx: rename zcrx [un]register functions
isilence Mar 23, 2026
c9b52ec
Merge branch 'for-7.1/io_uring' into for-next
axboe Mar 23, 2026
993c9b5
Merge branch 'for-7.1/block' into for-next
axboe Mar 25, 2026
d2b8f51
Merge branch 'for-7.1/block' into for-next
axboe Mar 25, 2026
d61665c
Merge branch 'io_uring-7.0' into for-next
axboe Mar 26, 2026
18f2cf2
Merge branch 'for-7.1/block' into for-next
axboe Mar 26, 2026
58362fa
Merge branch 'io_uring-7.0' into for-next
axboe Mar 27, 2026
bd500ea
Merge branch 'for-7.1/block' into for-next
axboe Mar 27, 2026
cdd71b7
Merge branch 'io_uring-7.0' into for-next
axboe Mar 29, 2026
437429a
Merge branch 'for-7.1/block' into for-next
axboe Mar 31, 2026
9d0a7bd
io_uring: protect remaining lockless ctx->rings accesses with RCU
axboe Mar 31, 2026
fc446a7
Merge branch 'io_uring-7.0' into for-next
axboe Mar 31, 2026
37912f1
io_uring/rsrc: use io_cache_free() to free node
JackieLiu1 Mar 31, 2026
7fa8af9
Merge branch 'for-7.1/io_uring' into for-next
axboe Mar 31, 2026
4368fc6
Merge branch 'for-7.1/block' into for-next
axboe Mar 31, 2026
876a3cd
io_uring/bpf_filters: retain COW'ed settings on parse failures
axboe Mar 31, 2026
a8eb8a0
Merge branch 'io_uring-7.0' into for-next
axboe Mar 31, 2026
208cd44
Merge branch 'for-7.1/block' into for-next
axboe Mar 31, 2026
132ba7a
Merge branch 'for-7.1/block' into for-next
axboe Mar 31, 2026
1c3bc85
Dummy commit
kawasaki Apr 2, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions Documentation/ABI/stable/sysfs-block
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,21 @@ Description:
zone commands, they will be treated as regular block devices and
zoned will report "none".

What: /sys/block/<disk>/queue/zoned_qd1_writes
Date: January 2026
Contact: Damien Le Moal <[email protected]>
Description:
[RW] zoned_qd1_writes indicates if write operations to a zoned
block device are being handled using a single issuer context (a
kernel thread) operating at a maximum queue depth of 1. This
attribute is visible only for zoned block devices. The default
value for zoned block devices that are not rotational devices
(e.g. ZNS SSDs or zoned UFS devices) is 0. For rotational zoned
block devices (e.g. SMR HDDs) the default value is 1. Since
this default may not be appropriate for some devices, e.g.
remotely connected devices over high latency networks, the user
can disable this feature by setting this attribute to 0.


What: /sys/block/<disk>/hidden
Date: March 2023
Expand Down
13 changes: 13 additions & 0 deletions Documentation/ABI/testing/sysfs-nvme
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
What: /sys/devices/virtual/nvme-fabrics/ctl/.../tls_configured_key
Date: November 2025
KernelVersion: 6.19
Contact: Linux NVMe mailing list <[email protected]>
Description:
The file is avaliable when using a secure concatanation
connection to a NVMe target. Reading the file will return
the serial of the currently negotiated key.

Writing 0 to the file will trigger a PSK reauthentication
(REPLACETLSPSK) with the target. After a reauthentication
the value returned by tls_configured_key will be the new
serial.
10 changes: 9 additions & 1 deletion Documentation/admin-guide/blockdev/zoned_loop.rst
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ The options available for the add command can be listed by reading the
/dev/zloop-control device::

$ cat /dev/zloop-control
add id=%d,capacity_mb=%u,zone_size_mb=%u,zone_capacity_mb=%u,conv_zones=%u,base_dir=%s,nr_queues=%u,queue_depth=%u,buffered_io
add id=%d,capacity_mb=%u,zone_size_mb=%u,zone_capacity_mb=%u,conv_zones=%u,max_open_zones=%u,base_dir=%s,nr_queues=%u,queue_depth=%u,buffered_io,zone_append=%u,ordered_zone_append,discard_write_cache
remove id=%d

In more details, the options that can be used with the "add" command are as
Expand All @@ -80,6 +80,9 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower
conv_zones Total number of conventioanl zones starting from
sector 0
Default: 8
max_open_zones Maximum number of open sequential write required zones
(0 for no limit).
Default: 0
base_dir Path to the base directory where to create the directory
containing the zone files of the device.
Default=/var/local/zloop.
Expand All @@ -104,6 +107,11 @@ ordered_zone_append Enable zloop mitigation of zone append reordering.
(extents), as when enabled, this can significantly reduce
the number of data extents needed to for a file data
mapping.
discard_write_cache Discard all data that was not explicitly persisted using a
flush operation when the device is removed by truncating
each zone file to the size recorded during the last flush
operation. This simulates power fail events where
uncommitted data is lost.
=================== =========================================================

3) Deleting a Zoned Device
Expand Down
2 changes: 1 addition & 1 deletion block/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o

obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o \
bio-integrity-auto.o
bio-integrity-auto.o bio-integrity-fs.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
Expand Down
80 changes: 11 additions & 69 deletions block/bio-integrity-auto.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
container_of(work, struct bio_integrity_data, work);
struct bio *bio = bid->bio;

blk_integrity_verify_iter(bio, &bid->saved_bio_iter);
bio->bi_status = bio_integrity_verify(bio, &bid->saved_bio_iter);
bio_integrity_finish(bid);
bio_endio(bio);
}
Expand All @@ -50,11 +50,6 @@ static bool bip_should_check(struct bio_integrity_payload *bip)
return bip->bip_flags & BIP_CHECK_FLAGS;
}

static bool bi_offload_capable(struct blk_integrity *bi)
{
return bi->metadata_size == bi->pi_tuple_size;
}

/**
* __bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
Expand Down Expand Up @@ -84,83 +79,30 @@ bool __bio_integrity_endio(struct bio *bio)
/**
* bio_integrity_prep - Prepare bio for integrity I/O
* @bio: bio to prepare
* @action: preparation action needed (BI_ACT_*)
*
* Checks if the bio already has an integrity payload attached. If it does, the
* payload has been generated by another kernel subsystem, and we just pass it
* through.
* Otherwise allocates integrity payload and for writes the integrity metadata
* will be generated. For reads, the completion handler will verify the
* metadata.
* Allocate the integrity payload. For writes, generate the integrity metadata
* and for reads, setup the completion handler to verify the metadata.
*
* This is used for bios that do not have user integrity payloads attached.
*/
bool bio_integrity_prep(struct bio *bio)
void bio_integrity_prep(struct bio *bio, unsigned int action)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_data *bid;
bool set_flags = true;
gfp_t gfp = GFP_NOIO;

if (!bi)
return true;

if (!bio_sectors(bio))
return true;

/* Already protected? */
if (bio_integrity(bio))
return true;

switch (bio_op(bio)) {
case REQ_OP_READ:
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
if (bi_offload_capable(bi))
return true;
set_flags = false;
}
break;
case REQ_OP_WRITE:
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk for non-integrity metadata where nothing else
* initializes the memory.
*/
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
if (bi_offload_capable(bi))
return true;
set_flags = false;
gfp |= __GFP_ZERO;
} else if (bi->metadata_size > bi->pi_tuple_size)
gfp |= __GFP_ZERO;
break;
default:
return true;
}

if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
return true;

bid = mempool_alloc(&bid_pool, GFP_NOIO);
bio_integrity_init(bio, &bid->bip, &bid->bvec, 1);
bid->bio = bio;
bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
bio_integrity_alloc_buf(bio, gfp & __GFP_ZERO);

bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);

if (set_flags) {
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bid->bip.bip_flags |= BIP_IP_CHECKSUM;
if (bi->csum_type)
bid->bip.bip_flags |= BIP_CHECK_GUARD;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
bid->bip.bip_flags |= BIP_CHECK_REFTAG;
}
bio_integrity_alloc_buf(bio, action & BI_ACT_ZERO);
if (action & BI_ACT_CHECK)
bio_integrity_setup_default(bio);

/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
blk_integrity_generate(bio);
bio_integrity_generate(bio);
else
bid->saved_bio_iter = bio->bi_iter;
return true;
}
EXPORT_SYMBOL(bio_integrity_prep);

Expand Down
81 changes: 81 additions & 0 deletions block/bio-integrity-fs.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2025 Christoph Hellwig.
*/
#include <linux/blk-integrity.h>
#include <linux/bio-integrity.h>
#include "blk.h"

struct fs_bio_integrity_buf {
struct bio_integrity_payload bip;
struct bio_vec bvec;
};

static struct kmem_cache *fs_bio_integrity_cache;
static mempool_t fs_bio_integrity_pool;

unsigned int fs_bio_integrity_alloc(struct bio *bio)
{
struct fs_bio_integrity_buf *iib;
unsigned int action;

action = bio_integrity_action(bio);
if (!action)
return 0;

iib = mempool_alloc(&fs_bio_integrity_pool, GFP_NOIO);
bio_integrity_init(bio, &iib->bip, &iib->bvec, 1);

bio_integrity_alloc_buf(bio, action & BI_ACT_ZERO);
if (action & BI_ACT_CHECK)
bio_integrity_setup_default(bio);
return action;
}

void fs_bio_integrity_free(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);

bio_integrity_free_buf(bip);
mempool_free(container_of(bip, struct fs_bio_integrity_buf, bip),
&fs_bio_integrity_pool);

bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}

void fs_bio_integrity_generate(struct bio *bio)
{
if (fs_bio_integrity_alloc(bio))
bio_integrity_generate(bio);
}
EXPORT_SYMBOL_GPL(fs_bio_integrity_generate);

int fs_bio_integrity_verify(struct bio *bio, sector_t sector, unsigned int size)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);

/*
* Reinitialize bip->bip_iter.
*
* This is for use in the submitter after the driver is done with the
* bio. Requires the submitter to remember the sector and the size.
*/
memset(&bip->bip_iter, 0, sizeof(bip->bip_iter));
bip->bip_iter.bi_sector = sector;
bip->bip_iter.bi_size = bio_integrity_bytes(bi, size >> SECTOR_SHIFT);
return blk_status_to_errno(bio_integrity_verify(bio, &bip->bip_iter));
}

static int __init fs_bio_integrity_init(void)
{
fs_bio_integrity_cache = kmem_cache_create("fs_bio_integrity",
sizeof(struct fs_bio_integrity_buf), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
if (mempool_init_slab_pool(&fs_bio_integrity_pool, BIO_POOL_SIZE,
fs_bio_integrity_cache))
panic("fs_bio_integrity: can't create pool\n");
return 0;
}
fs_initcall(fs_bio_integrity_init);
64 changes: 64 additions & 0 deletions block/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <linux/blk-integrity.h>
#include <linux/t10-pi.h>
#include "blk.h"

struct bio_integrity_alloc {
Expand All @@ -16,6 +17,53 @@ struct bio_integrity_alloc {

static mempool_t integrity_buf_pool;

static bool bi_offload_capable(struct blk_integrity *bi)
{
return bi->metadata_size == bi->pi_tuple_size;
}

unsigned int __bio_integrity_action(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);

if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
return 0;

switch (bio_op(bio)) {
case REQ_OP_READ:
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
if (bi_offload_capable(bi))
return 0;
return BI_ACT_BUFFER;
}
return BI_ACT_BUFFER | BI_ACT_CHECK;
case REQ_OP_WRITE:
/*
* Flush masquerading as write?
*/
if (!bio_sectors(bio))
return 0;

/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk for non-integrity metadata where nothing else
* initializes the memory.
*/
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
if (bi_offload_capable(bi))
return 0;
return BI_ACT_BUFFER | BI_ACT_ZERO;
}

if (bi->metadata_size > bi->pi_tuple_size)
return BI_ACT_BUFFER | BI_ACT_CHECK | BI_ACT_ZERO;
return BI_ACT_BUFFER | BI_ACT_CHECK;
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(__bio_integrity_action);

void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
Expand Down Expand Up @@ -53,6 +101,22 @@ void bio_integrity_free_buf(struct bio_integrity_payload *bip)
kfree(bvec_virt(bv));
}

void bio_integrity_setup_default(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);

bip_set_seed(bip, bio->bi_iter.bi_sector);

if (bi->csum_type) {
bip->bip_flags |= BIP_CHECK_GUARD;
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bip->bip_flags |= BIP_IP_CHECKSUM;
}
if (bi->flags & BLK_INTEGRITY_REF_TAG)
bip->bip_flags |= BIP_CHECK_REFTAG;
}

/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
Expand Down
Loading
Loading