mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 18:09:56 +00:00
Merge tag 'for-5.9/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: - writecache fix to allow dax_direct_access() to partitioned pmem devices. - multipath fix to avoid any Path Group initialization if 'pg_init_in_progress' isn't set. - crypt fix to use DECLARE_CRYPTO_WAIT() for onstack wait structures. - integrity fix to properly check integrity after device creation when in bitmap mode. - thinp and cache target __create_persistent_data_objects() fixes to reset the metadata's dm_block_manager pointer from PTR_ERR to NULL before returning from error path. - persistent-data block manager fix to guard against dm_block_manager NULL pointer dereference in dm_bm_is_read_only() and update various opencoded bm->read_only checks to use dm_bm_is_read_only() instead. * tag 'for-5.9/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm thin metadata: Fix use-after-free in dm_bm_set_read_only dm thin metadata: Avoid returning cmd->bm wild pointer on error dm cache metadata: Avoid returning cmd->bm wild pointer on error dm integrity: fix error reporting in bitmap mode after creation dm crypt: Initialize crypto wait structures dm mpath: fix racey management of PG initialization dm writecache: handle DAX to partitions on persistent memory correctly
This commit is contained in:
@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
|
|||||||
CACHE_MAX_CONCURRENT_LOCKS);
|
CACHE_MAX_CONCURRENT_LOCKS);
|
||||||
if (IS_ERR(cmd->bm)) {
|
if (IS_ERR(cmd->bm)) {
|
||||||
DMERR("could not create block manager");
|
DMERR("could not create block manager");
|
||||||
return PTR_ERR(cmd->bm);
|
r = PTR_ERR(cmd->bm);
|
||||||
|
cmd->bm = NULL;
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = __open_or_format_metadata(cmd, may_format_device);
|
r = __open_or_format_metadata(cmd, may_format_device);
|
||||||
if (r)
|
if (r) {
|
||||||
dm_block_manager_destroy(cmd->bm);
|
dm_block_manager_destroy(cmd->bm);
|
||||||
|
cmd->bm = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -739,7 +739,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
|
|||||||
u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
|
u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
|
||||||
struct skcipher_request *req;
|
struct skcipher_request *req;
|
||||||
struct scatterlist src, dst;
|
struct scatterlist src, dst;
|
||||||
struct crypto_wait wait;
|
DECLARE_CRYPTO_WAIT(wait);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
|
req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
|
||||||
@@ -936,7 +936,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
|
|||||||
u8 *es, *ks, *data, *data2, *data_offset;
|
u8 *es, *ks, *data, *data2, *data_offset;
|
||||||
struct skcipher_request *req;
|
struct skcipher_request *req;
|
||||||
struct scatterlist *sg, *sg2, src, dst;
|
struct scatterlist *sg, *sg2, src, dst;
|
||||||
struct crypto_wait wait;
|
DECLARE_CRYPTO_WAIT(wait);
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
|
req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
|
||||||
|
|||||||
@@ -2487,6 +2487,7 @@ next_chunk:
|
|||||||
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
||||||
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
|
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
|
||||||
if (ic->mode == 'B') {
|
if (ic->mode == 'B') {
|
||||||
|
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
|
||||||
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
|
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
|
||||||
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
|
||||||
}
|
}
|
||||||
@@ -2564,6 +2565,17 @@ next_chunk:
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ic->mode == 'B') {
|
||||||
|
sector_t start, end;
|
||||||
|
start = (range.logical_sector >>
|
||||||
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
|
||||||
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
||||||
|
end = ((range.logical_sector + range.n_sectors) >>
|
||||||
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
|
||||||
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
||||||
|
block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
|
||||||
|
}
|
||||||
|
|
||||||
advance_and_next:
|
advance_and_next:
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
|
|||||||
@@ -1287,17 +1287,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
|
|||||||
static void flush_multipath_work(struct multipath *m)
|
static void flush_multipath_work(struct multipath *m)
|
||||||
{
|
{
|
||||||
if (m->hw_handler_name) {
|
if (m->hw_handler_name) {
|
||||||
set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
|
unsigned long flags;
|
||||||
smp_mb__after_atomic();
|
|
||||||
|
if (!atomic_read(&m->pg_init_in_progress))
|
||||||
|
goto skip;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&m->lock, flags);
|
||||||
|
if (atomic_read(&m->pg_init_in_progress) &&
|
||||||
|
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
|
||||||
|
spin_unlock_irqrestore(&m->lock, flags);
|
||||||
|
|
||||||
if (atomic_read(&m->pg_init_in_progress))
|
|
||||||
flush_workqueue(kmpath_handlerd);
|
flush_workqueue(kmpath_handlerd);
|
||||||
multipath_wait_for_pg_init_completion(m);
|
multipath_wait_for_pg_init_completion(m);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&m->lock, flags);
|
||||||
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
|
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
|
||||||
smp_mb__after_atomic();
|
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&m->lock, flags);
|
||||||
|
}
|
||||||
|
skip:
|
||||||
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
||||||
flush_work(&m->process_queued_bios);
|
flush_work(&m->process_queued_bios);
|
||||||
flush_work(&m->trigger_event);
|
flush_work(&m->trigger_event);
|
||||||
|
|||||||
@@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
|
|||||||
THIN_MAX_CONCURRENT_LOCKS);
|
THIN_MAX_CONCURRENT_LOCKS);
|
||||||
if (IS_ERR(pmd->bm)) {
|
if (IS_ERR(pmd->bm)) {
|
||||||
DMERR("could not create block manager");
|
DMERR("could not create block manager");
|
||||||
return PTR_ERR(pmd->bm);
|
r = PTR_ERR(pmd->bm);
|
||||||
|
pmd->bm = NULL;
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = __open_or_format_metadata(pmd, format_device);
|
r = __open_or_format_metadata(pmd, format_device);
|
||||||
if (r)
|
if (r) {
|
||||||
dm_block_manager_destroy(pmd->bm);
|
dm_block_manager_destroy(pmd->bm);
|
||||||
|
pmd->bm = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
pmd_write_lock_in_core(pmd);
|
pmd_write_lock_in_core(pmd);
|
||||||
if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
|
if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
|
||||||
r = __commit_transaction(pmd);
|
r = __commit_transaction(pmd);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
DMWARN("%s: __commit_transaction() failed, error = %d",
|
DMWARN("%s: __commit_transaction() failed, error = %d",
|
||||||
|
|||||||
@@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
|||||||
pfn_t pfn;
|
pfn_t pfn;
|
||||||
int id;
|
int id;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
sector_t offset;
|
||||||
|
|
||||||
wc->memory_vmapped = false;
|
wc->memory_vmapped = false;
|
||||||
|
|
||||||
@@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
|||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offset = get_start_sect(wc->ssd_dev->bdev);
|
||||||
|
if (offset & (PAGE_SIZE / 512 - 1)) {
|
||||||
|
r = -EINVAL;
|
||||||
|
goto err1;
|
||||||
|
}
|
||||||
|
offset >>= PAGE_SHIFT - 9;
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
|
|
||||||
da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
|
da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
|
||||||
if (da < 0) {
|
if (da < 0) {
|
||||||
wc->memory_map = NULL;
|
wc->memory_map = NULL;
|
||||||
r = da;
|
r = da;
|
||||||
@@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
|||||||
i = 0;
|
i = 0;
|
||||||
do {
|
do {
|
||||||
long daa;
|
long daa;
|
||||||
daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
|
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
|
||||||
NULL, &pfn);
|
NULL, &pfn);
|
||||||
if (daa <= 0) {
|
if (daa <= 0) {
|
||||||
r = daa ? daa : -EINVAL;
|
r = daa ? daa : -EINVAL;
|
||||||
|
|||||||
@@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
|
|||||||
void *p;
|
void *p;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (bm->read_only)
|
if (dm_bm_is_read_only(bm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
|
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
|
||||||
@@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
|
|||||||
struct buffer_aux *aux;
|
struct buffer_aux *aux;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (bm->read_only)
|
if (dm_bm_is_read_only(bm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
|
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
|
||||||
@@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock);
|
|||||||
|
|
||||||
int dm_bm_flush(struct dm_block_manager *bm)
|
int dm_bm_flush(struct dm_block_manager *bm)
|
||||||
{
|
{
|
||||||
if (bm->read_only)
|
if (dm_bm_is_read_only(bm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
return dm_bufio_write_dirty_buffers(bm->bufio);
|
return dm_bufio_write_dirty_buffers(bm->bufio);
|
||||||
@@ -616,18 +616,20 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
|
|||||||
|
|
||||||
bool dm_bm_is_read_only(struct dm_block_manager *bm)
|
bool dm_bm_is_read_only(struct dm_block_manager *bm)
|
||||||
{
|
{
|
||||||
return bm->read_only;
|
return (bm ? bm->read_only : true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
|
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
|
||||||
|
|
||||||
void dm_bm_set_read_only(struct dm_block_manager *bm)
|
void dm_bm_set_read_only(struct dm_block_manager *bm)
|
||||||
{
|
{
|
||||||
|
if (bm)
|
||||||
bm->read_only = true;
|
bm->read_only = true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
|
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
|
||||||
|
|
||||||
void dm_bm_set_read_write(struct dm_block_manager *bm)
|
void dm_bm_set_read_write(struct dm_block_manager *bm)
|
||||||
{
|
{
|
||||||
|
if (bm)
|
||||||
bm->read_only = false;
|
bm->read_only = false;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
|
EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
|
||||||
|
|||||||
Reference in New Issue
Block a user